hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
abbbab78ac2efd12cd47b899f708bfccff53c88b
| 2,823
|
py
|
Python
|
__init__.py
|
magicaltrevor/myvis
|
77d0f597095208f2e64544d8cc0d3b912bbd6716
|
[
"Apache-2.0"
] | null | null | null |
__init__.py
|
magicaltrevor/myvis
|
77d0f597095208f2e64544d8cc0d3b912bbd6716
|
[
"Apache-2.0"
] | null | null | null |
__init__.py
|
magicaltrevor/myvis
|
77d0f597095208f2e64544d8cc0d3b912bbd6716
|
[
"Apache-2.0"
] | null | null | null |
# TODO: Add an appropriate license to your skill before publishing. See
# the LICENSE file for more information.
# Below is the list of outside modules you'll be using in your skill.
# They might be built-in to Python, from mycroft-core or from external
# libraries. If you use an external library, be sure to include it
# in the requirements.txt file so the library is installed properly
# when the skill gets installed later by a user.
from adapt.intent import IntentBuilder
from mycroft.skills.core import MycroftSkill, intent_handler
from mycroft.util.log import LOG
# Each skill is contained within its own class, which inherits base methods
# from the MycroftSkill class. You extend this class as shown below.
# TODO: Change "Template" to a unique name for your skill
class MyVisSkill(MycroftSkill):
# The constructor of the skill, which calls MycroftSkill's constructor
def __init__(self):
super(MyVisSkill, self).__init__(name="MyVisSkill")
# Initialize working variables used within the skill.
self.count = 0
# The "handle_xxxx_intent" function is triggered by Mycroft when the
# skill's intent is matched. The intent is defined by the IntentBuilder()
# pieces, and is triggered when the user's utterance matches the pattern
# defined by the keywords. In this case, the match occurs when one word
# is found from each of the files:
# vocab/en-us/Hello.voc
# vocab/en-us/World.voc
# In this example that means it would match on utterances like:
# 'Hello world'
# 'Howdy you great big world'
# 'Greetings planet earth'
@intent_handler(IntentBuilder("").require("Hello").require("Lifeform"))
def handle_hello_lifeform_intent(self, message):
# In this case, respond by simply speaking a canned response.
# Mycroft will randomly speak one of the lines from the file
# dialogs/en-us/hello.world.dialog
self.speak_dialog("hello.world")
@intent_handler(IntentBuilder("").require("Count").require("Dir"))
def handle_count_intent(self, message):
if message.data["Dir"] == "up":
self.count += 1
else: # assume "down"
self.count -= 1
self.speak_dialog("count.is.now", data={"count": self.count})
# The "stop" method defines what Mycroft does when told to stop during
# the skill's execution. In this case, since the skill's functionality
# is extremely simple, there is no need to override it. If you DO
# need to implement stop, you should return True to indicate you handled
# it.
#
# def stop(self):
# return False
# The "create_skill()" method is used to create an instance of the skill.
# Note that it's outside the class itself.
def create_skill():
return MyVisSkill()
| 42.772727
| 78
| 0.69961
|
1fdabe5e78207ead75184f8f55f254e6a6fc5d18
| 4,845
|
py
|
Python
|
FINN/test/test_hwgq_sfc_w1a2.py
|
HyunwooKim2/FINN
|
fac4ff40aaba1c14aa416738e3d22802649a94f8
|
[
"BSD-3-Clause"
] | null | null | null |
FINN/test/test_hwgq_sfc_w1a2.py
|
HyunwooKim2/FINN
|
fac4ff40aaba1c14aa416738e3d22802649a94f8
|
[
"BSD-3-Clause"
] | null | null | null |
FINN/test/test_hwgq_sfc_w1a2.py
|
HyunwooKim2/FINN
|
fac4ff40aaba1c14aa416738e3d22802649a94f8
|
[
"BSD-3-Clause"
] | 1
|
2021-03-18T01:43:29.000Z
|
2021-03-18T01:43:29.000Z
|
#!/usr/bin/env python
# Copyright (c) 2018, Xilinx
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the <organization> nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
from FINN.core.config import FINN_ROOT
import FINN.core.nn as nn
from FINN.frontend.caffeloader import CaffeLoader
from FINN.core.coverification import testOnMNIST
import FINN.transforms.transformations as transform
import FINN.backend.fpga.backend_fpga as fpga_backend
import copy
import numpy as np
import sys
import tempfile
import shutil
class TestHWGQSFCw1a2(unittest.TestCase):
"""Test HWGQ network import and streamlining using a small binarized FC net."""
def setUp(self):
nname = "sfc-w1a2"
proto = FINN_ROOT +"/inputs/%s.prototxt" % nname
weights = FINN_ROOT + "/inputs/%s.caffemodel" % nname
l = CaffeLoader(weights, proto)
self.net = nn.NN(l)
self.streamlined_net = copy.deepcopy(self.net)
self.streamlined_net.layers = transform.makeCromulent(self.streamlined_net.layers)
# use the first numImagesToTest of the test set for verification
self.numImagesToTest = 1000
# expected number of successful predictions
self.ok_golden = 967
# expected number of unsuccessful predictions
self.nok_golden = 33
def test_num_matrix_layers(self):
self.assertIs(4, self.net.count_matrix_layers())
def test_import_correctness(self):
(ok, nok) = testOnMNIST(self.net, self.numImagesToTest)
self.assertTrue(ok == self.ok_golden and nok == self.nok_golden)
def test_streamline_correctness(self):
(ok, nok) = testOnMNIST(self.streamlined_net, self.numImagesToTest)
self.assertTrue(ok == self.ok_golden and nok == self.nok_golden)
def test_fpgabackend_rawhls(self):
# resource allocation function to set number of PE/SIMD per layer
# the allocation is statically determined for this test case.
def res_alloc_predetermined(pipeline):
ret_pipeline = copy.deepcopy(pipeline)
layer_simd = [16, 64, 64, 64]
layer_pe = [64, 64, 64, 10]
for i in range(4):
ret_pipeline[i].simd = layer_simd[i]
ret_pipeline[i].pe = layer_pe[i]
return ret_pipeline
# make a temp dir for generated HLS
dirpath = tempfile.mkdtemp()
# pick all layers except first (input quantization) and last
# (final batchnorm) of the streamlined network
hlslayers = self.streamlined_net.layers[1:-1]
# call the FPGA backend to generate HLS and compile raw HLS sim
ret = fpga_backend.synthesize(hlslayers, res_alloc_predetermined, dirpath, "sfcall-")
hlspipeline = ret.getSimLayer()
# build a "mixed pipeline", where the first and last layers are in
# device-neutral simulation, and everything in the middle is handled
# by the HLS sim executable
mixed_pipeline = [self.streamlined_net.layers[0]] + hlspipeline + [self.streamlined_net.layers[-1]]
# test on MNIST
(ok, nok) = testOnMNIST(nn.NN(layers=mixed_pipeline), self.numImagesToTest)
# remove temp dir
shutil.rmtree(dirpath)
self.assertTrue(ok == self.ok_golden and nok == self.nok_golden)
if __name__ == "__main__":
suite = unittest.TestLoader().loadTestsFromTestCase(TestHWGQSFCw1a2)
unittest.TextTestRunner(verbosity=2).run(suite)
| 47.970297
| 107
| 0.714138
|
53babcbb8017fd5b00a9c7086eafe5559e67df41
| 17,163
|
py
|
Python
|
src/ts_analysis/imaging/multivariate.py
|
tedchengf/ts_analysis
|
b1ed127b5392d177c51bd136107aa0fec4a1759c
|
[
"MIT"
] | 1
|
2022-01-11T00:19:26.000Z
|
2022-01-11T00:19:26.000Z
|
src/ts_analysis/imaging/multivariate.py
|
tedchengf/ts_analysis
|
b1ed127b5392d177c51bd136107aa0fec4a1759c
|
[
"MIT"
] | null | null | null |
src/ts_analysis/imaging/multivariate.py
|
tedchengf/ts_analysis
|
b1ed127b5392d177c51bd136107aa0fec4a1759c
|
[
"MIT"
] | null | null | null |
# multivariate.py
import numpy as np
from numba import njit, jit, prange
from scipy import stats
from tqdm import tqdm
import warnings
import matplotlib
import matplotlib.pyplot as plt
from ts_analysis.dataframes import rdm
from ts_analysis.utilities import aux
from ts_analysis.utilities import matop
from ts_analysis.dataframes.dframe import DFrame
###############################################################################
# tsRSA class #
###############################################################################
class tsRSA:
def __init__(self, target_tsRDMs, candidate_RDMs, name, identifier):
self.name = None
self.target_tsRDMs = None
self.candidate_RDMs = None
self.identifier = None
self.trial_distribution = None
self.__target_dict = None
self.__candidate_dict = None
self.__identifier_dict = None
self.__initialize_instance(target_tsRDMs, candidate_RDMs, name, identifier)
def RSA(self, target_names = None, candidate_names = None, interpolation = "subject", show_progress = False):
if show_progress == True:
print ("\nPerforming RSA")
tar_tsRDMs, cand_RDMs = self.__assemble_data(target_names, candidate_names)
results = []
pbar = tqdm(total = len(cand_RDMs), disable = not show_progress)
c_names = []
for cand in cand_RDMs:
c_names.append(cand.name)
if interpolation == "subject":
sum_tsRDM, count_RDM = self.__average_tsRDM(tar_tsRDMs)
tri_mask = count_RDM.tri > 0
averaged_tstri = np.divide(sum_tsRDM.ts_tri[:, tri_mask], count_RDM.tri[tri_mask])
curr_result = np.empty(averaged_tstri.shape[0])
quick_pearsonr_tstri_b(averaged_tstri, cand.tri[tri_mask], curr_result)
results.append([curr_result])
else:
target_results = []
for tsRDM in tar_tsRDMs:
target_tri = tsRDM.ts_tri
candidate_tri = cand.slice(tsRDM.identifier, ktype = "identity")
curr_result = np.empty(tsRDM.ts_tri.shape[0])
quick_pearsonr_tstri_b(target_tri, candidate_tri, curr_result)
target_results.append(curr_result)
results.append(np.array(target_results))
pbar.update(1)
if interpolation == "subject": t_names = ["averaged targets"]
else:
t_names = []
for ts_RDM in tar_tsRDMs: t_names.append(ts_RDM.name)
results = np.array(results)
# print(results.shape)
# return DFrame(results, ["candidates", "targets", "time points"], [c_names, t_names, np.arange(results.shape[2])])
return RSA_results(results, t_names, c_names)
def estimate_upper_lower_bounds(self, TFunc, target_names = None, show_progress = False):
if show_progress == True:
print ("\nEstimating Bounds")
assert self.identifier is not None, "The identifier is undefined"
# Step 1: Apply transformation to the target RDMs
tar_tsRDMs = self.__assemble_data(target_names, None)[0]
pbar = tqdm(total = 3, disable = not show_progress)
transformed_tsRDMs = []
for ts_RDM in tar_tsRDMs:
transformed_RDM = ts_RDM.transform(TFunc, return_type = "instance")
transformed_tsRDMs.append(transformed_RDM)
pbar.update(1)
# Step 2: Obtain Average target RDM
sum_tsRDM, count_RDM = self.__average_tsRDM(transformed_tsRDMs)
pbar.update(1)
# Step 3: Estimate upper and lower bound
upperbound = np.zeros((sum_tsRDM.ts_tri.shape[0]))
lowerbound = np.zeros((sum_tsRDM.ts_tri.shape[0]))
for ts_RDM in transformed_tsRDMs:
temp_results = np.zeros((sum_tsRDM.ts_tri.shape[0]))
curr_tstri = sum_tsRDM.slice(ts_RDM.identifier, ktype = "identity")
curr_count = count_RDM.slice(ts_RDM.identifier, ktype = "identity")
# estimate upperbound
upperbound_tstri = np.divide(curr_tstri, curr_count)
quick_pearsonr_tstri(ts_RDM.ts_tri, upperbound_tstri, temp_results)
upperbound += temp_results
# estimate lowerbound
curr_tstri -= ts_RDM.ts_tri
curr_count -= 1
# A mask is needed because it is possible that the the subject contain triangular values that are not found in the lowerbound_tstri
curr_count_mask = curr_count > 0
lowerbound_tstri = np.divide(curr_tstri[:, curr_count_mask], curr_count[curr_count_mask])
quick_pearsonr_tstri(ts_RDM.ts_tri[:, curr_count_mask], lowerbound_tstri, temp_results)
lowerbound += temp_results
pbar.update(1)
return np.divide(upperbound, len(transformed_tsRDMs)), np.divide(lowerbound, len(transformed_tsRDMs))
def check_distribution(self):
count_RDM = rdm.RDM(np.empty((self.identifier.shape[0])), "RDM overlap", tri = np.zeros((matop.find_tri_dim(self.identifier.shape[0])), dtype = int), identifier = self.identifier)
for ts_RDM in self.target_tsRDMs:
curr_trial_ind = aux.dict_arr_query(ts_RDM.identifier, self.__identifier_dict)[0]
curr_tri_ind = matop.extract_tri_ind(curr_trial_ind, len(self.identifier))
count_RDM.tri[curr_tri_ind] += 1
return self.trial_distribution.copy(), count_RDM
# Other Basic Functions #
def slice(self, trial_ind, ktype = "index", target_names = None, candidate_names = None):
assert ktype in ("index", "identity"), "The parameter ktype must be one from (index, identity)"
if ktype == "index":
extract_identifier = self.identifier[trial_ind]
else:
extract_identifier = trial_ind
tar_tsRDMs, cand_RDMs = self.__assemble_data(target_names, candidate_names)
new_target_tsRDMs = []
for ts_RDM in tar_tsRDMs:
new_target_tsRDMs.append(ts_RDM.slice(extract_identifier, ktype = "identity", return_type = "instance", silence_warning = True))
new_candidate_RDMs = []
for cRDM in cand_RDMs:
new_candidate_RDMs.append(cRDM.slice(extract_identifier, ktype = "identity", return_type = "instance"))
return tsRSA(new_target_tsRDMs, new_candidate_RDMs, self.name, extract_identifier)
def copy(self, name = None):
if name is None:
name = self.name
return tsRSA(self.target_tsRDMs.copy(), self.candidate_RDMs.copy(), name, self.identifier)
def __repr__(self):
type_str = "Type: tsRSA"
name_str = "Data Name: " + self.name
trial_str = "Trial: " + str(len(self.identifier))
target_str = "Target tsRDMs: " + str(len(self.target_tsRDMs))
candidate_str = "Candidate RDMs:"
for k in self.__candidate_dict.keys():
candidate_str += "\n - " + k
return type_str + "\n" + name_str + "\n" + trial_str + "\n" + target_str + "\n" + candidate_str
#------------------------------- Private Functions ---------------------------#
def __initialize_instance(self, target_tsRDMs, candidate_RDMs, name, identifier):
self.name = name
assert len(identifier) == candidate_RDMs[0].data.shape[0]
# Initialize trial identifiers
self.identifier = np.array(identifier)
assert len(identifier.shape) == 1, "The parameter identifier must be an instance of numpy.ndarray with exactly 1 dimensions"
self.__identifier_dict = dict(zip(identifier, np.arange(len(identifier))))
# Initialize tsRDMs and trial distribution
self.target_tsRDMs = np.array(target_tsRDMs)
self.trial_distribution = np.zeros((len(self.target_tsRDMs), len(self.identifier)), dtype = bool)
self.__target_dict = {}
for i, ts_RDM in enumerate(target_tsRDMs):
assert isinstance(ts_RDM, rdm.tsRDM), "The parameter target_tsRDM must be a list of tsRDM instances"
self.__target_dict.update({ts_RDM.name: i})
curr_dist, missing_keys = aux.dict_arr_query(ts_RDM.identifier, self.__identifier_dict)
assert len(missing_keys) == 0, "The target_tsRDMs contain identifiers uninitialized in the current instance: " + str(missing_keys)
self.trial_distribution[i][curr_dist] = True
self.candidate_RDMs = np.array(candidate_RDMs)
self.__candidate_dict = {}
for i, c_RDM in enumerate(candidate_RDMs):
assert isinstance(c_RDM, rdm.RDM), "The parameter candidate_RDMs must be a list of RDM instances"
assert candidate_RDMs[0].data.shape[0] == c_RDM.data.shape[0], "All RDM instances in the parameter candidate_RDMs should have the same trial dimension"
self.__candidate_dict.update({c_RDM.name: i})
def __assemble_data(self, target_names, candidate_names):
if target_names is None:
tar_tsRDMs = self.target_tsRDMs
else:
tar_ind, missing_keys = aux.dict_arr_query(target_names, self.__target_dict)
if len(missing_keys) != 0:
warnings.warn("The following target names are undefined in the current instance: " + str(missing_keys))
tar_tsRDMs = self.target_tsRDMs[tar_ind]
if candidate_names is None:
cand_RDMs = self.candidate_RDMs
else:
can_ind, missing_keys = aux.dict_arr_query(candidate_names, self.__candidate_dict)
if len(missing_keys) != 0:
warnings.warn("The following candidate names are undefined in the current instance: " + str(missing_keys))
cand_RDMs = self.candidate_RDMs[can_ind]
return tar_tsRDMs, cand_RDMs
def __average_tsRDM(self, ts_RDMs):
count_RDM = rdm.RDM(np.zeros((self.identifier.shape[0]), dtype = int), "RDM overlap", tri = np.zeros((matop.find_tri_dim(self.identifier.shape[0])), dtype = int), identifier = self.identifier)
sum_tsRDM = rdm.tsRDM(np.empty((self.identifier.shape[0], 1, 1)), "Sum RDM", ts_tri = np.zeros((ts_RDMs[0].ts_tri.shape[0],matop.find_tri_dim(self.identifier.shape[0]))), identifier = self.identifier)
for ts_RDM in ts_RDMs:
curr_trial_ind, missing_keys = aux.dict_arr_query(ts_RDM.identifier, self.__identifier_dict)
curr_tri_ind = matop.extract_tri_ind(curr_trial_ind, len(self.identifier))
count_RDM.data[curr_trial_ind] += 1
count_RDM.tri[curr_tri_ind] += 1
sum_tsRDM.ts_tri[:, curr_tri_ind] += ts_RDM.ts_tri
return sum_tsRDM, count_RDM
def __ts_corr(self, tar_tsRDMs, cand_RDMs, interpolation):
results = []
pbar = tqdm(total = len(cand_RDMs), disable = not show_progress)
c_names = []
for cand in cand_RDMs:
c_names.append(cand.name)
if interpolation == "subject":
sum_tsRDM, count_RDM = self.__average_tsRDM(tar_tsRDMs)
tri_mask = count_RDM.tri > 0
averaged_tstri = np.divide(sum_tsRDM.ts_tri[:, tri_mask], count_RDM.tri[tri_mask])
curr_result = np.empty(averaged_tstri.shape[0])
quick_pearsonr_tstri_b(averaged_tstri, cand.tri[tri_mask], curr_result)
results.append([curr_result])
else:
target_results = []
for tsRDM in tar_tsRDMs:
target_tri = tsRDM.ts_tri
candidate_tri = cand.slice(tsRDM.identifier, ktype = "identity")
curr_result = np.empty(tsRDM.ts_tri.shape[0])
quick_pearsonr_tstri_b(target_tri, candidate_tri, curr_result)
target_results.append(curr_result)
results.append(np.array(target_results))
pbar.update(1)
if interpolation == "subject": t_names = ["averaged targets"]
else:
t_names = []
for ts_RDM in tar_tsRDMs: t_names.append(ts_RDM.name)
return RSA_results(results, t_names, c_names)
###############################################################################
# RSA_results class #
###############################################################################
def collapse_RSA_results(all_RSA_results, name, collapse_dimension = "target", target_names = None, candidate_names = None):
if collapse_dimension == "target":
target_names = all_RSA_results[0].target_names.copy()
base_results = all_RSA_results[0].slice(candidate_names = candidate_names)
for ind in range(1, len(all_RSA_results)):
if candidate_names is not None: assert all_RSA_results[0].candidate_names == all_RSA_results[ind].candidate_names, "Candidate names mismatch between instances of all_RSA_results"
target_names = np.append(target_names,all_RSA_results[ind].target_names)
curr_result = all_RSA_results[ind].slice(candidate_names = candidate_names)
base_results = np.append(base_results, curr_result, axis = 1)
if candidate_names is None: candidate_names = all_RSA_results[0].candidate_names
return RSA_results(base_results, target_names, candidate_names)
return None
class RSA_results:
def __init__(self, results, target_names, candidate_names):
self.name = None
self.results = None
self.target_names = None
self.candidate_names = None
self.upperbound = None
self.lowerbound = None
self.__target_dict = None
self.__candidate_dict = None
# Initialization
results = np.array(results)
self.results = results
self.target_names = np.array(target_names)
self.candidate_names = np.array(candidate_names)
assert len(results.shape) == 3, "The parameter results must have three dimensions"
assert results.shape[0] == len(candidate_names), "The parameter candidate_names must match the first dimension of the results"
assert results.shape[1] == len(target_names), "The parameter target_names must match the second dimension of the results"
self.__target_dict = dict(zip(target_names,np.arange(results.shape[1])))
self.__candidate_dict = dict(zip(candidate_names,np.arange(results.shape[0])))
# TODO: make the interpretation of None input more consistant
def plot(self, title = None, candidate_names = None, target_names = None, bounds = None, fig = None, start_end = None, interval = 100, axis = [None, None, None, None], colors = None, font_size = 6):
data_result = self.slice(candidate_names, target_names = target_names, return_type = "instance")
data = np.average(data_result.results, axis = 1)
matplotlib.rcParams.update({'font.size': font_size})
if fig is None:
fig = plt.figure(figsize = (6.4, 4.8))
ax = fig.gca()
ax.margins(x=0)
ax.axis(axis)
if title is not None:
ax.set_title(title)
if start_end is not None:
start = int(start_end[0])
end = int(start_end[1])
label = np.linspace(start, end, (end-start)//interval+1, dtype=int)
# x_range = data.shape[1]
x_range = self.results.shape[2]
step = int(round(x_range / (len(label) - 1)))
tick_num = len(np.arange(0, x_range, step = step, dtype = int))
ax.set_xticks(np.arange(0, x_range, step = step, dtype = int))
ax.set_xticklabels(label[:tick_num])
if bounds is not None:
assert bounds in ("all", "upper", "lower"), "If defined, the parameter bounds must be one from (all, upper, lower)"
if bounds == "all" or bounds == "upper":
assert self.upperbound is not None,"The upperbound is undefined"
ax.plot(self.upperbound, label = "upperbound", color = "black", linestyle = "-")
if bounds == "all" or bounds == "lower":
assert self.lowerbound is not None,"The lowerbound is undefined"
ax.plot(self.lowerbound, label = "lowerbound", color = "black", linestyle = ":")
if len(data_result.candidate_names) > 0:
for c_ind, c_name in enumerate(data_result.candidate_names):
if colors is not None:
ax.plot(data[c_ind], label = c_name, color = colors[c_ind])
else:
ax.plot(data[c_ind], label = c_name)
ax.legend()
plt.close()
return fig
def slice(self, candidate_names = None, target_names = None, return_type = "arr"):
assert return_type in ("arr", "instance"), "The parameter return_type must be one from (arr, instance)"
if candidate_names is None:
cand_ind = np.arange(self.results.shape[0])
else:
cand_ind, missing_keys = aux.dict_arr_query(candidate_names, self.__candidate_dict)
if len(missing_keys) > 0:
warnings.warn("The following candidate names are undefined: " + str(missing_keys))
if target_names is None:
tar_ind = np.arange(self.results.shape[1])
else:
tar_ind, missing_keys = aux.dict_arr_query(target_names, self.__target_dict)
if len(missing_keys) > 0:
warnings.warn("The following target names are undefined: " + str(missing_keys))
new_results = self.results[np.ix_(cand_ind, tar_ind)].copy()
if return_type == "arr":
return self.results[np.ix_(cand_ind, tar_ind)].copy()
else:
return (RSA_results(new_results, self.target_names[tar_ind].copy(), self.candidate_names[cand_ind].copy()))
def __repr__(self):
type_str = "Type: results"
if self.name is None: name_str = "Data Name: Undefined"
else: name_str = "Data Name: " + self.name
results_str = "Result Dimension: " + str(self.results.shape)
candidate_str = "Candidate Names:"
for k in self.candidate_names:
candidate_str += "\n - " + k
return type_str + "\n" + name_str + "\n" + results_str + "\n" + candidate_str
###############################################################################
# Support functions #
###############################################################################
def corrcoef_z_transform(tri):
tri = np.subtract(1, tri)
results = np.empty(len(tri), dtype = tri.dtype)
quick_z_transform(tri, results)
return results
def invert_corrcoef(tri):
return np.subtract(1, tri)
def z_transform(tri):
results = np.empty(len(tri), dtype = tri.dtype)
quick_z_transform(tri, results)
return results
@njit(parallel = True)
def quick_z_transform(tri, results):
for i in prange(len(tri)):
results[i] = np.log((1+tri[i])/(1-tri[i]))/2
# @njit(parallel = True)
# def quick_pearsonr_tstri(ts_a, ts_b, result_ts):
# for t_ind in prange(ts_a.shape[0]):
# result_ts[t_ind] = np.corrcoef(ts_a[t_ind,:], ts_b[t_ind,:])[0,1]
def quick_pearsonr_tstri(ts_a, ts_b, result_ts):
for t_ind in range(ts_a.shape[0]):
result_ts[t_ind] = np.corrcoef(ts_a[t_ind,:], ts_b[t_ind,:])[0,1]
@njit(parallel = True)
def quick_pearsonr_tstri_b(ts_a, b, result_ts):
for t_ind in prange(ts_a.shape[0]):
result_ts[t_ind] = np.corrcoef(ts_a[t_ind,:], b)[0,1]
| 45.646277
| 202
| 0.710715
|
961528ca23a51534ae6b0771ae256cb93750af23
| 1,202
|
py
|
Python
|
kervi/core/utility/settings.py
|
kervi/kervi-core
|
3c1e3c8a17a7b4d085d8a28b99180ff2a96b0e23
|
[
"MIT"
] | null | null | null |
kervi/core/utility/settings.py
|
kervi/kervi-core
|
3c1e3c8a17a7b4d085d8a28b99180ff2a96b0e23
|
[
"MIT"
] | null | null | null |
kervi/core/utility/settings.py
|
kervi/kervi-core
|
3c1e3c8a17a7b4d085d8a28b99180ff2a96b0e23
|
[
"MIT"
] | null | null | null |
#Copyright 2017 Tim Wentlau.
#Distributed under the MIT License. See LICENSE in root of project.
from kervi.spine import Spine
class Settings(object):
"""
Class that persists settings to the Kervi database.
:param group:
To avoid name clash with other settings in the Kervi application
enter name to group your settings under.
:type group: ``str``
"""
def __init__(self, settings_group=None):
self.group = settings_group
self.spine = Spine()
def store_value(self, name, value):
"""Store a value to DB"""
self.spine.send_command("storeSetting", self.group, name, value)
def retrieve_value(self, name, default_value=None):
"""Retrieve a value from DB"""
value = self.spine.send_query("retrieveSetting", self.group, name, processes=["kervi-main"])
if value is None:
return default_value
elif isinstance(value, list) and len(value) == 0:
return default_value
elif isinstance(default_value, int):
return int(value)
elif isinstance(default_value, float):
return float(value)
else:
return value
| 30.05
| 100
| 0.633111
|
4b78ea1de292f9bb4bd79dbfd2156263d473afb2
| 2,952
|
py
|
Python
|
Packs/Cryptocurrency/Integrations/Cryptocurrency/Cryptocurrency.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 799
|
2016-08-02T06:43:14.000Z
|
2022-03-31T11:10:11.000Z
|
Packs/Cryptocurrency/Integrations/Cryptocurrency/Cryptocurrency.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 9,317
|
2016-08-07T19:00:51.000Z
|
2022-03-31T21:56:04.000Z
|
Packs/Cryptocurrency/Integrations/Cryptocurrency/Cryptocurrency.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 1,297
|
2016-08-04T13:59:00.000Z
|
2022-03-31T23:43:06.000Z
|
import demistomock as demisto
from CommonServerPython import *
import urllib3
import traceback
from typing import Dict, List
# Disable insecure warnings
urllib3.disable_warnings()
''' CONSTANTS '''
DATE_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
BITCOIN = 'bitcoin'
INTEGRATION_NAME = 'Cryptocurrency'
SCORE = {
'None': 0,
'Good': 1,
'Suspicious': 2,
'Bad': 3,
}
def get_bitcoin_reputation(addresses, reliability, reputation, score) -> List[CommandResults]:
command_results: List[CommandResults] = []
for address in addresses:
dbot_score = Common.DBotScore(
indicator=address,
indicator_type=DBotScoreType.CRYPTOCURRENCY,
integration_name=INTEGRATION_NAME, # Vendor
score=score,
reliability=reliability,
)
crypto_context = Common.Cryptocurrency(
address=address,
address_type=BITCOIN,
dbot_score=dbot_score,
)
table_data = {
'Address': address,
'Cryptocurrency Address Type': BITCOIN,
'Reputation': reputation,
}
table_name = f'{INTEGRATION_NAME} reputation for {address}'
hr = tableToMarkdown(table_name, table_data)
command_results.append(CommandResults(
outputs_prefix='Cryptocurrency',
readable_output=hr,
outputs_key_field='Address',
indicator=crypto_context,
))
return command_results
def crypto_reputation_command(args: Dict[str, str], reliability: str, reputation: str):
crypto_addresses = argToList(args.get('crypto', ''))
# For cases the command was executed by a playbook/user and the addresses received are verified
# Stripping the `bitcoin` prefix from the given addresses (if exists) then add it to match the convention.
if args.get('address_type') == BITCOIN:
bitcoin_addresses = [f'bitcoin:{address.lstrip("bitcoin:")}' for address in crypto_addresses]
else:
bitcoin_addresses = [address for address in crypto_addresses if BITCOIN in address]
score = SCORE[reputation]
result = get_bitcoin_reputation(bitcoin_addresses, reliability, reputation, score)
return result
def main():
params = demisto.params()
reliability = params['reliability']
reputation = params['reputation']
demisto.info(f'Command being called is {demisto.command()}')
try:
if demisto.command() == 'test-module':
return_results('ok')
elif demisto.command() == 'crypto':
return_results(crypto_reputation_command(demisto.args(), reliability, reputation))
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
''' ENTRY POINT '''
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| 30.75
| 110
| 0.661247
|
feee4ca44443f4ce01a11caf299d8eb18f82919e
| 2,532
|
py
|
Python
|
codeowl/code.py
|
FlorianLudwig/code-owl
|
be6518c89fb49ae600ee004504f9485f328e1090
|
[
"Apache-2.0"
] | 6
|
2017-04-15T22:13:48.000Z
|
2020-02-04T09:41:02.000Z
|
codeowl/code.py
|
FlorianLudwig/code-owl
|
be6518c89fb49ae600ee004504f9485f328e1090
|
[
"Apache-2.0"
] | null | null | null |
codeowl/code.py
|
FlorianLudwig/code-owl
|
be6518c89fb49ae600ee004504f9485f328e1090
|
[
"Apache-2.0"
] | null | null | null |
import pygments.token
import pygments.lexers
from . import score
class Token(object):
def __init__(self, pygemnts_token, line_no, line_pos):
self.type = pygemnts_token[0]
self.value = pygemnts_token[1]
self.source = pygemnts_token[1]
self.line_no = line_no
self.line_pos = line_pos
self.search_skip = False
def __getitem__(self, item):
if item == 0:
return self.type
elif item == 1:
return self.source
raise IndexError(item)
def match(self, other):
if self.type is not None and other.type is not None and self.type != other.type:
return -1
if self.value is not None and other.value is not None and self.value != other.value:
if self.value in other.value:
if self.type is pygments.token.Literal.String:
return score.IN_MATCH_IN_LITERAL_TOKEN
return score.IN_MATCH
return -1
return 0
def __str__(self):
return self.source
def __repr__(self):
return 'Token(({}, {}), {}, {})'.format(self.type, self.value, self.line_no, self.line_pos)
def parse(code, run_filter=True):
"""parse code from string or file-like object
:rtype: list[Token]"""
if not isinstance(code, basestring):
code = code.read()
lexer = pygments.lexers.get_lexer_for_filename('.py') # TODO support for other languages
# leading newlines are important as they change line numbering
lexer.stripall = False
lexer.stripnl = False
tokens = []
line_no = 1
line_pos = 1
for token in lexer.get_tokens(code):
tokens.append(Token(token, line_no, line_pos))
if token[1] == '\n':
line_no += 1
line_pos = 1
else:
line_pos += len(token[1])
if run_filter:
list(filter_tokens(tokens))
return tokens
def filter_tokens(tokens):
token_iter = iter(tokens)
while True:
token = token_iter.next()
if token.type == pygments.token.Literal.String:
token.search_skip = True
yield token
# we ignore the first and last token part of a string
yield token_iter.next()
token = token_iter.next()
token.search_skip = True
yield token
elif token.type == pygments.token.Comment:
token.value = token.value.strip('# \t') # TODO support for other languages
else:
yield token
| 30.878049
| 99
| 0.598341
|
7127bcda4cc3b303cb1967c2d719d10f0e01bd27
| 193
|
py
|
Python
|
src/sage/__init__.py
|
switzel/sage
|
7eb8510dacf61b691664cd8f1d2e75e5d473e5a0
|
[
"BSL-1.0"
] | 2
|
2018-06-30T01:37:35.000Z
|
2018-06-30T01:37:39.000Z
|
src/sage/__init__.py
|
switzel/sage
|
7eb8510dacf61b691664cd8f1d2e75e5d473e5a0
|
[
"BSL-1.0"
] | null | null | null |
src/sage/__init__.py
|
switzel/sage
|
7eb8510dacf61b691664cd8f1d2e75e5d473e5a0
|
[
"BSL-1.0"
] | 1
|
2020-07-24T12:20:37.000Z
|
2020-07-24T12:20:37.000Z
|
__all__ = ['all']
# IPython calls this when starting up
def load_ipython_extension(*args):
import sage.repl.ipython_extension
sage.repl.ipython_extension.load_ipython_extension(*args)
| 27.571429
| 61
| 0.782383
|
2b6ba5e87ab00ba722f0e171e9b5421eddc3ec87
| 4,324
|
py
|
Python
|
word_embedding/pca.py
|
junyaogz/pp4nlp
|
9f403352dcce1874d32ba775a02cbacda0904966
|
[
"Apache-2.0"
] | null | null | null |
word_embedding/pca.py
|
junyaogz/pp4nlp
|
9f403352dcce1874d32ba775a02cbacda0904966
|
[
"Apache-2.0"
] | null | null | null |
word_embedding/pca.py
|
junyaogz/pp4nlp
|
9f403352dcce1874d32ba775a02cbacda0904966
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np # Linear algebra library
import matplotlib.pyplot as plt # library for visualization
from sklearn.decomposition import PCA # PCA library
import pandas as pd # Data frame library
import math # Library for math functions
import random # Library for pseudo random numbers
n = 1 # The amount of the correlation
x = np.random.uniform(1,2,1000) # Generate 1000 samples from a uniform random variable
y = x.copy() * n # Make y = n * x
# PCA works better if the data is centered
x = x - np.mean(x) # Center x. Remove its mean
y = y - np.mean(y) # Center y. Remove its mean
data = pd.DataFrame({'x': x, 'y': y}) # Create a data frame with x and y
plt.scatter(data.x, data.y) # Plot the original correlated data in blue
pca = PCA(n_components=2) # Instantiate a PCA. Choose to get 2 output variables
# Create the transformation model for this data. Internally, it gets the rotation
# matrix and the explained variance
pcaTr = pca.fit(data)
rotatedData = pcaTr.transform(data) # Transform the data base on the rotation matrix of pcaTr
# # Create a data frame with the new variables. We call these new variables PC1 and PC2
dataPCA = pd.DataFrame(data = rotatedData, columns = ['PC1', 'PC2'])
# Plot the transformed data in orange
plt.scatter(dataPCA.PC1, dataPCA.PC2)
plt.show()
print('Eigenvectors or principal component: First row must be in the direction of [1, n]')
print(pcaTr.components_)
print()
print('Eigenvalues or explained variance')
print(pcaTr.explained_variance_)
import matplotlib.lines as mlines
import matplotlib.transforms as mtransforms
random.seed(100)
std1 = 1 # The desired standard deviation of our first random variable
std2 = 0.333 # The desired standard deviation of our second random variable
x = np.random.normal(0, std1, 1000) # Get 1000 samples from x ~ N(0, std1)
y = np.random.normal(0, std2, 1000) # Get 1000 samples from y ~ N(0, std2)
#y = y + np.random.normal(0,1,1000)*noiseLevel * np.sin(0.78)
# PCA works better if the data is centered
x = x - np.mean(x) # Center x
y = y - np.mean(y) # Center y
#Define a pair of dependent variables with a desired amount of covariance
n = 1 # Magnitude of covariance.
angle = np.arctan(1 / n) # Convert the covariance to and angle
print('angle: ', angle * 180 / math.pi)
# Create a rotation matrix using the given angle
rotationMatrix = np.array([[np.cos(angle), np.sin(angle)],
[-np.sin(angle), np.cos(angle)]])
print('rotationMatrix')
print(rotationMatrix)
xy = np.concatenate(([x] , [y]), axis=0).T # Create a matrix with columns x and y
# Transform the data using the rotation matrix. It correlates the two variables
data = np.dot(xy, rotationMatrix) # Return a nD array
# Print the rotated data
plt.scatter(data[:,0], data[:,1])
plt.show()
plt.scatter(data[:,0], data[:,1]) # Print the original data in blue
# Apply PCA. In theory, the Eigenvector matrix must be the
# inverse of the original rotationMatrix.
pca = PCA(n_components=2) # Instantiate a PCA. Choose to get 2 output variables
# Create the transformation model for this data. Internally it gets the rotation
# matrix and the explained variance
pcaTr = pca.fit(data)
# Create an array with the transformed data
dataPCA = pcaTr.transform(data)
print('Eigenvectors or principal component: First row must be in the direction of [1, n]')
print(pcaTr.components_)
print()
print('Eigenvalues or explained variance')
print(pcaTr.explained_variance_)
# Print the rotated data
plt.scatter(dataPCA[:,0], dataPCA[:,1])
# Plot the first component axe. Use the explained variance to scale the vector
plt.plot([0, rotationMatrix[0][0] * std1 * 3], [0, rotationMatrix[0][1] * std1 * 3], 'k-', color='red')
# Plot the second component axe. Use the explained variance to scale the vector
plt.plot([0, rotationMatrix[1][0] * std2 * 3], [0, rotationMatrix[1][1] * std2 * 3], 'k-', color='green')
plt.show()
nPoints = len(data)
# Plot the original data in blue
plt.scatter(data[:,0], data[:,1])
#Plot the projection along the first component in orange
plt.scatter(data[:,0], np.zeros(nPoints))
#Plot the projection along the second component in green
plt.scatter(np.zeros(nPoints), data[:,1])
plt.show()
| 35.735537
| 105
| 0.703978
|
d0713bae3d6c8ddc76c329d0b60fc133ad409a1f
| 2,281
|
py
|
Python
|
bootstrap/pentagram/parse/statement.py
|
pentagram-lang/pentagram
|
5c4dc2fc516ec2844dc71ddb778ddadec036ce55
|
[
"MIT"
] | 2
|
2018-07-28T22:41:12.000Z
|
2018-08-11T02:17:02.000Z
|
bootstrap/pentagram/parse/statement.py
|
tacit-lang/tacit
|
5c4dc2fc516ec2844dc71ddb778ddadec036ce55
|
[
"MIT"
] | 17
|
2017-12-23T02:15:03.000Z
|
2019-03-31T01:15:15.000Z
|
bootstrap/pentagram/parse/statement.py
|
tacit-lang/tacit
|
5c4dc2fc516ec2844dc71ddb778ddadec036ce55
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
from pentagram.parse.group import Group
from pentagram.parse.group import GroupComment
from pentagram.parse.group import GroupIdentifier
from pentagram.parse.group import GroupNumber
from pentagram.parse.group import GroupTerm
from pentagram.syntax import SyntaxAssignment
from pentagram.syntax import SyntaxBlock
from pentagram.syntax import SyntaxComment
from pentagram.syntax import SyntaxExpression
from pentagram.syntax import SyntaxIdentifier
from pentagram.syntax import SyntaxMethodDefinition
from pentagram.syntax import SyntaxNumber
from pentagram.syntax import SyntaxStatement
from pentagram.syntax import SyntaxTerm
def parse_statements_block(group: Group) -> SyntaxBlock:
return SyntaxBlock(
[
parse_one_statement(line.terms)
for line in group.lines
]
)
def parse_one_statement(
terms: list[GroupTerm],
) -> SyntaxStatement:
bindings: list[SyntaxIdentifier] = []
for term in terms:
if isinstance(term, GroupIdentifier):
if term.name == "=":
return SyntaxAssignment(
terms=parse_terms(
terms[len(bindings) + 1 :]
),
bindings=bindings,
)
elif term.name == "/=":
assert len(bindings) == 1
return SyntaxMethodDefinition(
binding=bindings[0],
definition=parse_one_statement(
terms[2:]
),
)
else:
bindings.append(SyntaxIdentifier(term.name))
else:
break
return SyntaxExpression(parse_terms(terms))
def parse_terms(
terms: list[GroupTerm],
) -> list[SyntaxTerm]:
return [parse_one_term(term) for term in terms]
def parse_one_term(term: GroupTerm) -> SyntaxTerm:
if isinstance(term, GroupNumber):
return SyntaxNumber(term.value)
elif isinstance(term, GroupIdentifier):
return SyntaxIdentifier(term.name)
elif isinstance(term, GroupComment):
return SyntaxComment(term.text)
elif isinstance(term, Group):
return parse_statements_block(term)
else:
raise AssertionError(term)
| 31.246575
| 60
| 0.649277
|
1ca78bd231b8dc0df4a97582ac459c7c79bc8cb2
| 3,856
|
py
|
Python
|
generated/nidcpower/nidcpower/_attributes.py
|
kurtp-ni/nimi-python
|
4f0bccce67a69ca9f46a8ab9b07dc26ca0049729
|
[
"MIT"
] | 88
|
2017-08-03T18:07:27.000Z
|
2022-01-28T13:55:06.000Z
|
generated/nidcpower/nidcpower/_attributes.py
|
kurtp-ni/nimi-python
|
4f0bccce67a69ca9f46a8ab9b07dc26ca0049729
|
[
"MIT"
] | 1,310
|
2017-07-11T18:42:44.000Z
|
2022-03-28T21:03:57.000Z
|
generated/nidcpower/nidcpower/_attributes.py
|
kurtp-ni/nimi-python
|
4f0bccce67a69ca9f46a8ab9b07dc26ca0049729
|
[
"MIT"
] | 70
|
2017-07-25T14:52:53.000Z
|
2022-03-31T14:14:23.000Z
|
# -*- coding: utf-8 -*-
# This file was generated
import nidcpower._converters as _converters
import hightime
class Attribute(object):
'''Base class for all typed attributes.'''
def __init__(self, attribute_id):
self._attribute_id = attribute_id
class AttributeViInt32(Attribute):
def __get__(self, session, session_type):
return session._get_attribute_vi_int32(self._attribute_id)
def __set__(self, session, value):
session._set_attribute_vi_int32(self._attribute_id, value)
class AttributeViInt32TimeDeltaMilliseconds(Attribute):
def __get__(self, session, session_type):
return hightime.timedelta(milliseconds=session._get_attribute_vi_int32(self._attribute_id))
def __set__(self, session, value):
session._set_attribute_vi_int32(self._attribute_id, _converters.convert_timedelta_to_milliseconds_int32(value).value)
class AttributeViInt64(Attribute):
def __get__(self, session, session_type):
return session._get_attribute_vi_int64(self._attribute_id)
def __set__(self, session, value):
session._set_attribute_vi_int64(self._attribute_id, value)
class AttributeViReal64(Attribute):
def __get__(self, session, session_type):
return session._get_attribute_vi_real64(self._attribute_id)
def __set__(self, session, value):
session._set_attribute_vi_real64(self._attribute_id, value)
class AttributeViReal64TimeDeltaSeconds(Attribute):
def __get__(self, session, session_type):
return hightime.timedelta(seconds=session._get_attribute_vi_real64(self._attribute_id))
def __set__(self, session, value):
session._set_attribute_vi_real64(self._attribute_id, _converters.convert_timedelta_to_seconds_real64(value).value)
class AttributeViString(Attribute):
def __get__(self, session, session_type):
return session._get_attribute_vi_string(self._attribute_id)
def __set__(self, session, value):
session._set_attribute_vi_string(self._attribute_id, value)
class AttributeViStringRepeatedCapability(Attribute):
def __get__(self, session, session_type):
return session._get_attribute_vi_string(self._attribute_id)
def __set__(self, session, value):
session._set_attribute_vi_string(self._attribute_id, _converters.convert_repeated_capabilities_without_prefix(value))
class AttributeViBoolean(Attribute):
def __get__(self, session, session_type):
return session._get_attribute_vi_boolean(self._attribute_id)
def __set__(self, session, value):
session._set_attribute_vi_boolean(self._attribute_id, value)
class AttributeEnum(object):
def __init__(self, underlying_attribute_meta_class, enum_meta_class, attribute_id):
self._underlying_attribute = underlying_attribute_meta_class(attribute_id)
self._attribute_type = enum_meta_class
self._attribute_id = attribute_id
def __get__(self, session, session_type):
return self._attribute_type(self._underlying_attribute.__get__(session, session_type))
def __set__(self, session, value):
if type(value) is not self._attribute_type:
raise TypeError('must be ' + str(self._attribute_type.__name__) + ' not ' + str(type(value).__name__))
return self._underlying_attribute.__set__(session, value.value)
# nitclk specific attribute type
class AttributeSessionReference(Attribute):
def __get__(self, session, session_type):
# Import here to avoid a circular dependency when initial import happens
from nidcpower.session import SessionReference
return SessionReference(session._get_attribute_vi_session(self._attribute_id))
def __set__(self, session, value):
session._set_attribute_vi_session(self._attribute_id, _converters.convert_to_nitclk_session_number(value))
| 33.241379
| 125
| 0.764782
|
3035f20451c7e5319434eaedd2b6c2251169e86c
| 1,084
|
py
|
Python
|
global_settings.py
|
zlin7/deepsz
|
2fef916108521682082e1797bd7ae26399b325c5
|
[
"MIT"
] | null | null | null |
global_settings.py
|
zlin7/deepsz
|
2fef916108521682082e1797bd7ae26399b325c5
|
[
"MIT"
] | null | null | null |
global_settings.py
|
zlin7/deepsz
|
2fef916108521682082e1797bd7ae26399b325c5
|
[
"MIT"
] | null | null | null |
import os
CUR_DIR = os.path.dirname(os.path.realpath(__file__))
DATA_PATH = 'Z:/deepsz'
CNN_MODEL_OUTPUT_DIR = os.path.abspath(os.path.join(CUR_DIR, './CNN/deepsz1')) #'/media/zhen/Research/deepsz_pytorch_2'
FULL_DATA_PATH = os.path.join(DATA_PATH, 'maps/reso0.25_small')
FULL_DATA_LABEL_PATH = os.path.join(DATA_PATH, 'maps/reso0.25_small/z0.25_mvir2e+14_label.pkl')
VARYING_DIST_DATA_PATH = os.path.join(CUR_DIR, 'data/maps/varying_dist_to_center10x')
CACHING_DIR = os.path.abspath(os.path.join(CUR_DIR, "./data/cache"))
CACHE_MAPPED_HALOS = os.path.join(CACHING_DIR, 'halos_mapped_to_cutouts.pkl')
CACHE_FULLDF = os.path.join(CACHING_DIR, 'all_component_preds_w_MF.pkl')
CACHE_FULLDF_DIST2EDGE = os.path.join(CACHING_DIR, 'all_component_preds_w_MF_dist2edge.pkl')
CACHE_FULLDF_DIST2EDGE_CAL = os.path.join(CACHING_DIR, 'all_component_preds_w_MF_dist2edge_calibrated.pkl') #This is the calibrated CNN "probabilities"
CACHE_CNNFAILURE = os.path.join(CACHING_DIR, 'CNNFailures.pkl')
MF_OUTPUT_PATH = os.path.abspath(os.path.join(CACHING_DIR, "../10mJy-ptsrcs_catalog.npz.npz"))
| 51.619048
| 151
| 0.800738
|
5f3619a90b089608b26fbc022ec11ae223471afb
| 6,908
|
py
|
Python
|
shared/OverwatchAPI.py
|
hectorleiva/overwatch-serverless-telegram-bot
|
36919ee2af6e331f70cf7b1c95a866f1c93023e4
|
[
"MIT"
] | 7
|
2017-02-28T04:20:37.000Z
|
2020-09-03T13:08:09.000Z
|
shared/OverwatchAPI.py
|
hectorleiva/overwatch-serverless-telegram-bot
|
36919ee2af6e331f70cf7b1c95a866f1c93023e4
|
[
"MIT"
] | null | null | null |
shared/OverwatchAPI.py
|
hectorleiva/overwatch-serverless-telegram-bot
|
36919ee2af6e331f70cf7b1c95a866f1c93023e4
|
[
"MIT"
] | 1
|
2019-09-27T15:33:57.000Z
|
2019-09-27T15:33:57.000Z
|
"""Class contacts that Overwatch API Server to return user information."""
class OverwatchAPI:
"""Contains all the methods and procedures to contact the Overwatch API."""
def __init__(self, logger, requests, BadgeGenerator):
"""Instatiate the necessary modules."""
self.logger = logger
self.OverwatchAPIDomain = 'https://owapi.net'
self.requests = requests
self.BadgeGenerator = BadgeGenerator(self.logger, self.requests)
self.regions = ['us', 'eu', 'any', 'kr'] # Supported regions from owapi.net
self.defaultRegion = 'us'
def prestigeFormatting(self, prestigeLevel, currentLevel):
"""Format prestige rating as stars after their current level."""
prestigeSign = str(currentLevel)
if prestigeLevel > 0:
for i in range(prestigeLevel):
prestigeSign += " *"
return prestigeSign
else:
return prestigeSign
# args is the array of text passed from the user for their battle info
def parseBattleInfoArgs(self, args):
"""Parse the user's Battle.net information.
The args being an array of text passed from the user.
"""
battleInfo = dict()
if len(args) == 1:
# Access the only argument given, which should be their battletag
info = args[0]
"""
The API can only take in '-' as the delimiter instead
of the pound-sign that is often used
"""
if "#" in info:
battleInfo['battleTagOrg'] = info
battleInfo['battleTag'] = info.replace('#', '-')
else:
battleInfo['battleTag'] = False
battleInfo['region'] = self.defaultRegion
elif len(args) > 1:
for index, info in enumerate(args):
self.logger.info('info: %s', info)
# Should be the username to search
if index == 0:
"""
The API can only take in '-' as the delimiter instead
of the pound-sign that is often used
"""
if "#" in info:
battleInfo['battleTagOrg'] = info
battleInfo['battleTag'] = info.replace('#', '-')
else:
battleInfo['battleTag'] = False
# If this exists, then the user is specifying a region value
elif index == 1:
if info in self.regions:
battleInfo['region'] = info
else:
battleInfo['region'] = self.defaultRegion
return battleInfo
def htmlFormatBattleInfo(self, title, val):
"""Format the passed title and value into html.
If there is no data returned for the specified stat information
"""
if not val:
return ""
else:
return "<i>{title}</i>: <strong>{val}</strong>\n".format(
title=title,
val=val)
def getUserStats(self, bot, update, args):
self.logger.info('update: %s', update)
self.logger.info('args: %s', args)
if not len(args):
msg = "Please enter your battletag, " \
"it should be something like `<your-name>#1234`\n"
msg += "The full command should be `/overwatch <your-name>#1234`."\
" You can also add your region"
msg += " like this: `/overwatch <your-name>#1234 us`"
return bot.send_message(chat_id=update.message.chat_id,
text=msg,
parse_mode='Markdown')
bot.send_message(chat_id=update.message.chat_id,
text="Ok, looking up the information, one moment...")
if len(args) > 2:
msg = "Sorry! I can only support at most 2 arguments. " \
"Your battletag `<your-name>#1234`"
msg += " and the region `us` or `eu`. the command should " \
"look like `<your-name>#1234"
msg += " or like `<your-name>#1234 us`."
return bot.send_message(chat_id=update.message.chat_id,
text=msg,
parse_mode='Markdown')
battleInfo = self.parseBattleInfoArgs(args)
self.logger.info('battleInfo: %s', battleInfo)
if battleInfo:
if not battleInfo['battleTag']:
msg = "Please enter your battletag, it should be " \
"something like `<your-name>#1234`\n"
msg += "The full command should be " \
"`/overwatch <your-name>#1234`"
return bot.send_message(chat_id=update.message.chat_id,
text=msg,
parse_mode='Markdown')
battleTagStr = str(battleInfo['battleTag'])
requestUrl = "{apiDomain}/api/v3/u/{battleTag}/stats".format(
apiDomain=self.OverwatchAPIDomain,
battleTag=battleTagStr
)
headers = {'user-agent': "{botname}/0.1".format(botname=bot.name)}
r = self.requests.get(requestUrl, headers=headers)
self.logger.info('the response: %s', r)
if r.status_code == 200:
response = r.json()
if battleInfo['region'] in response and response[battleInfo['region']] is not None:
gameStats = response[battleInfo['region']]['stats']
self.logger.info('Game Stats: %s', gameStats)
self.logger.info('attempting badge generator for {battleTag}'.format(
battleTag=battleTagStr
))
badge = self.BadgeGenerator.generateBadge(
gameStats,
battleTagStr
)
bot.send_photo(chat_id=update.message.chat_id,
photo=badge)
else:
bot.send_message(chat_id=update.message.chat_id,
text='Hmmm, the battletag does not exist. Battletags are case-sensitive and region specific. Please double-check that the battletag is correct!')
elif r.status_code == 500:
bot.send_message(chat_id=update.message.chat_id,
text='Seems like the API is not responding properly. Please try back later!')
else:
bot.send_message(chat_id=update.message.chat_id,
text='Hmmm, the battletag passed might not exist. Battletags are case-sensitive and region specific. Please double-check that the battletag is correct!')
| 42.641975
| 177
| 0.525622
|
64bab03a038fdd45be2564a33181cac988ec0440
| 1,089
|
py
|
Python
|
ironic_inspector/common/i18n.py
|
NaohiroTamura/ironic-inspector
|
7b7fba72de46806ce84d6d4758a2343b52b0c96d
|
[
"Apache-2.0"
] | null | null | null |
ironic_inspector/common/i18n.py
|
NaohiroTamura/ironic-inspector
|
7b7fba72de46806ce84d6d4758a2343b52b0c96d
|
[
"Apache-2.0"
] | null | null | null |
ironic_inspector/common/i18n.py
|
NaohiroTamura/ironic-inspector
|
7b7fba72de46806ce84d6d4758a2343b52b0c96d
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2015 NEC Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import oslo_i18n
_translators = oslo_i18n.TranslatorFactory(domain='ironic_inspector')
# The primary translation function using the well-known name "_"
_ = _translators.primary
# Translators for log levels.
#
# The abbreviated names are meant to reflect the usual use of a short
# name like '_'. The "L" is for "log" and the other letter comes from
# the level.
_LI = _translators.log_info
_LW = _translators.log_warning
_LE = _translators.log_error
_LC = _translators.log_critical
| 34.03125
| 75
| 0.770432
|
1ffb6260eb30589aa8b6c077c97c4a5e40952877
| 5,394
|
py
|
Python
|
tests/user_tests.py
|
Phantom-42/pywikibot
|
9bf6568598377ec58215d841b51b52c6d9248348
|
[
"MIT"
] | 3
|
2020-06-06T21:47:04.000Z
|
2021-09-08T18:22:59.000Z
|
tests/user_tests.py
|
Phantom-42/pywikibot
|
9bf6568598377ec58215d841b51b52c6d9248348
|
[
"MIT"
] | null | null | null |
tests/user_tests.py
|
Phantom-42/pywikibot
|
9bf6568598377ec58215d841b51b52c6d9248348
|
[
"MIT"
] | 1
|
2018-01-04T14:09:37.000Z
|
2018-01-04T14:09:37.000Z
|
# -*- coding: utf-8 -*-
"""Tests for the User page."""
#
# (C) Pywikibot team, 2016-2018
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
import pywikibot
from pywikibot.tools import suppress_warnings
from pywikibot import User
from tests.aspects import TestCase, unittest
class TestUserClass(TestCase):
"""Test User class."""
family = 'wikipedia'
code = 'de'
def test_registered_user(self):
"""Test registered user."""
user = User(self.site, 'Xqt')
with suppress_warnings('pywikibot.page.User.name', DeprecationWarning):
self.assertEqual(user.name(), user.username)
self.assertEqual(user.title(withNamespace=False), user.username)
self.assertTrue(user.isRegistered())
self.assertFalse(user.isAnonymous())
self.assertIsInstance(user.registration(), pywikibot.Timestamp)
self.assertGreater(user.editCount(), 0)
self.assertFalse(user.isBlocked())
self.assertTrue(user.isEmailable())
self.assertEqual(user.gender(), 'unknown')
self.assertIn('userid', user.getprops())
self.assertEqual(user.getprops()['userid'], 287832)
self.assertEqual(user.pageid, 6927779)
self.assertEqual(user.getUserPage(),
pywikibot.Page(self.site, 'Benutzer:Xqt'))
self.assertEqual(user.getUserPage(subpage='pwb'),
pywikibot.Page(self.site, 'Benutzer:Xqt/pwb'))
self.assertEqual(user.getUserTalkPage(),
pywikibot.Page(self.site, 'Benutzer Diskussion:Xqt'))
self.assertEqual(user.getUserTalkPage(subpage='pwb'),
pywikibot.Page(self.site,
'Benutzer Diskussion:Xqt/pwb'))
self.assertTrue(user.is_thankable)
contribs = user.contributions(total=10)
self.assertEqual(len(list(contribs)), 10)
self.assertTrue(all(isinstance(contrib, tuple)
for contrib in contribs))
self.assertTrue(all('user' in contrib
and contrib['user'] == user.username
for contrib in contribs))
self.assertIn('user', user.groups())
self.assertIn('edit', user.rights())
def test_registered_user_without_timestamp(self):
"""Test registered user when registration timestamp is None."""
user = User(self.site, 'Ulfb')
self.assertTrue(user.isRegistered())
self.assertFalse(user.isAnonymous())
self.assertIsNone(user.registration())
self.assertIsNone(user.getprops()['registration'])
self.assertGreater(user.editCount(), 0)
self.assertEqual(user.gender(), 'male')
self.assertIn('userid', user.getprops())
self.assertTrue(user.is_thankable)
def test_female_user(self):
"""Test female user."""
user = User(self.site, 'Alraunenstern')
self.assertTrue(user.isRegistered())
self.assertFalse(user.isAnonymous())
self.assertGreater(user.editCount(), 0)
self.assertEqual(user.gender(), 'female')
self.assertIn('userid', user.getprops())
self.assertTrue(user.is_thankable)
def test_anonymous_user(self):
"""Test registered user."""
user = User(self.site, '123.45.67.89')
with suppress_warnings('pywikibot.page.User.name', DeprecationWarning):
self.assertEqual(user.name(), user.username)
self.assertEqual(user.title(withNamespace=False), user.username)
self.assertFalse(user.isRegistered())
self.assertTrue(user.isAnonymous())
self.assertIsNone(user.registration())
self.assertFalse(user.isEmailable())
self.assertEqual(user.gender(), 'unknown')
self.assertIn('invalid', user.getprops())
self.assertFalse(user.is_thankable)
def test_unregistered_user(self):
"""Test unregistered user."""
user = User(self.site, 'This user name is not registered yet')
with suppress_warnings('pywikibot.page.User.name', DeprecationWarning):
self.assertEqual(user.name(), user.username)
self.assertEqual(user.title(withNamespace=False), user.username)
self.assertFalse(user.isRegistered())
self.assertFalse(user.isAnonymous())
self.assertIsNone(user.registration())
self.assertFalse(user.isEmailable())
self.assertEqual(user.gender(), 'unknown')
self.assertIn('missing', user.getprops())
self.assertFalse(user.is_thankable)
def test_invalid_user(self):
"""Test invalid user."""
user = User(self.site, 'Invalid char\x9f in Name')
with suppress_warnings('pywikibot.page.User.name', DeprecationWarning):
self.assertEqual(user.name(), user.username)
self.assertEqual(user.title(withNamespace=False), user.username)
self.assertFalse(user.isRegistered())
self.assertFalse(user.isAnonymous())
self.assertIsNone(user.registration())
self.assertFalse(user.isEmailable())
self.assertEqual(user.gender(), 'unknown')
self.assertIn('invalid', user.getprops())
self.assertFalse(user.is_thankable)
if __name__ == '__main__': # pragma: no cover
try:
unittest.main()
except SystemExit:
pass
| 41.175573
| 79
| 0.64238
|
29c15c0d19ab60f3305562ed97bf21008777138e
| 1,570
|
py
|
Python
|
azure-mgmt-network/azure/mgmt/network/v2017_08_01/models/topology_resource.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 1
|
2021-09-07T18:36:04.000Z
|
2021-09-07T18:36:04.000Z
|
azure-mgmt-network/azure/mgmt/network/v2017_08_01/models/topology_resource.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 54
|
2016-03-25T17:25:01.000Z
|
2018-10-22T17:27:54.000Z
|
azure-mgmt-network/azure/mgmt/network/v2017_08_01/models/topology_resource.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 2
|
2017-01-20T18:25:46.000Z
|
2017-05-12T21:31:47.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class TopologyResource(Model):
"""The network resource topology information for the given resource group.
:param name: Name of the resource.
:type name: str
:param id: ID of the resource.
:type id: str
:param location: Resource location.
:type location: str
:param associations: Holds the associations the resource has with other
resources in the resource group.
:type associations:
list[~azure.mgmt.network.v2017_08_01.models.TopologyAssociation]
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'associations': {'key': 'associations', 'type': '[TopologyAssociation]'},
}
def __init__(self, **kwargs):
super(TopologyResource, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.id = kwargs.get('id', None)
self.location = kwargs.get('location', None)
self.associations = kwargs.get('associations', None)
| 36.511628
| 81
| 0.598726
|
4c99e1cf9e20bd0e3534430b471a73cfdba0cba2
| 922
|
py
|
Python
|
prob/cointoss.py
|
ccorbell/gametheory
|
e1d25ab205634ae6638e3242ca8904e7cbea8bf4
|
[
"MIT"
] | null | null | null |
prob/cointoss.py
|
ccorbell/gametheory
|
e1d25ab205634ae6638e3242ca8904e7cbea8bf4
|
[
"MIT"
] | null | null | null |
prob/cointoss.py
|
ccorbell/gametheory
|
e1d25ab205634ae6638e3242ca8904e7cbea8bf4
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 18 23:01:56 2022
@author: Christopher Corbell
"""
import random
class CoinToss:
HT = ['H', 'T']
def __init__(self):
self.headsP = 0.5
def toss(self):
if 0.5 == self.headsP:
return random.choice(CoinToss.HT)
else:
if random.uniform(0.0, 1.0) <= self.headsP:
return 'H'
else:
return 'T'
def run(self, trials):
"""
Run a trial of some number of coin tosses
Parameters
----------
trials : int
The number of coin tosses to run.
Returns
-------
The number of heads from the trial.
"""
headCount = 0
for n in range(0, trials):
if self.toss() == 'H':
headCount += 1
return headCount
| 20.488889
| 55
| 0.466377
|
ec9a30a965ff492b9ecde7d09eab67085558b0db
| 2,047
|
py
|
Python
|
misc/upload_release.py
|
victoriacity/taichi
|
e26d9c24b788539946f8f858aae487e5367e179c
|
[
"MIT"
] | 15
|
2020-01-29T19:07:19.000Z
|
2021-05-12T02:53:22.000Z
|
misc/upload_release.py
|
rexwangcc/taichi
|
13fb869cb7d86378c999a9a226d2742cd84cb78a
|
[
"MIT"
] | null | null | null |
misc/upload_release.py
|
rexwangcc/taichi
|
13fb869cb7d86378c999a9a226d2742cd84cb78a
|
[
"MIT"
] | 2
|
2020-01-31T20:10:35.000Z
|
2021-03-16T07:51:59.000Z
|
import os
import subprocess
import sys
import requests
def upload_taichi_version():
username = os.getenv('METADATA_USERNAME')
password = os.getenv('METADATA_PASSWORD')
url = os.getenv('METADATA_URL')
filename = os.listdir('./dist')[0]
filename = filename[:len(filename) - 4]
parts = filename.split('-')
payload = {'version': parts[1], 'platform': parts[4], 'python': parts[2]}
try:
response = requests.post(f'http://{url}/add_version/detail',
json=payload,
auth=(username, password),
timeout=5)
response.raise_for_status()
except requests.exceptions.ConnectionError as err:
sys.exit('Updating latest version failed: No internet, ' + str(err))
except requests.exceptions.HTTPError as err:
sys.exit('Updating latest version failed: Server error, ' + str(err))
except requests.exceptions.Timeout as err:
sys.exit(
'Updating latest version failed: Time out when connecting server, '
+ str(err))
except requests.exceptions.RequestException as err:
sys.exit('Updating latest version failed: ' + str(err))
response = response.json()
print(response['message'])
def upload_artifact(is_taichi):
pwd_env = 'PROD_PWD' if is_taichi else 'NIGHT_PWD'
twine_password = os.getenv(pwd_env)
if not twine_password:
sys.exit(f'Missing password env var {pwd_env}')
command = ["python3", "-m", "twine", "upload"]
if not is_taichi:
command.extend(['--repository', 'testpypi'])
command.extend(
['--verbose', '-u', '__token__', '-p', twine_password, 'dist/*'])
try:
subprocess.check_call(command)
except subprocess.CalledProcessError as e:
sys.exit(f"Twine upload returns error {e.returncode}")
if __name__ == '__main__':
is_taichi = os.getenv('PROJECT_NAME', 'taichi') == 'taichi'
upload_artifact(is_taichi)
if is_taichi:
upload_taichi_version()
| 35.912281
| 79
| 0.630191
|
6bde6be7a51427f1be2a82219e9a34c96c314200
| 6,753
|
py
|
Python
|
google/cloud/aiplatform/datasets/time_series_dataset.py
|
kthytang/python-aiplatform
|
e82c1792293396045a1032df015a3700fc38609b
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/aiplatform/datasets/time_series_dataset.py
|
kthytang/python-aiplatform
|
e82c1792293396045a1032df015a3700fc38609b
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/aiplatform/datasets/time_series_dataset.py
|
kthytang/python-aiplatform
|
e82c1792293396045a1032df015a3700fc38609b
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Dict, Optional, Sequence, Tuple, Union
from google.auth import credentials as auth_credentials
from google.cloud.aiplatform import datasets
from google.cloud.aiplatform.datasets import _datasources
from google.cloud.aiplatform import initializer
from google.cloud.aiplatform import schema
from google.cloud.aiplatform import utils
class TimeSeriesDataset(datasets._ColumnNamesDataset):
"""Managed time series dataset resource for Vertex AI"""
_supported_metadata_schema_uris: Optional[Tuple[str]] = (
schema.dataset.metadata.time_series,
)
@classmethod
def create(
cls,
display_name: Optional[str] = None,
gcs_source: Optional[Union[str, Sequence[str]]] = None,
bq_source: Optional[str] = None,
project: Optional[str] = None,
location: Optional[str] = None,
credentials: Optional[auth_credentials.Credentials] = None,
request_metadata: Optional[Sequence[Tuple[str, str]]] = (),
labels: Optional[Dict[str, str]] = None,
encryption_spec_key_name: Optional[str] = None,
sync: bool = True,
create_request_timeout: Optional[float] = None,
) -> "TimeSeriesDataset":
"""Creates a new time series dataset.
Args:
display_name (str):
Optional. The user-defined name of the Dataset.
The name can be up to 128 characters long and can be consist
of any UTF-8 characters.
gcs_source (Union[str, Sequence[str]]):
Google Cloud Storage URI(-s) to the
input file(s). May contain wildcards. For more
information on wildcards, see
https://cloud.google.com/storage/docs/gsutil/addlhelp/WildcardNames.
examples:
str: "gs://bucket/file.csv"
Sequence[str]: ["gs://bucket/file1.csv", "gs://bucket/file2.csv"]
bq_source (str):
BigQuery URI to the input table.
example:
"bq://project.dataset.table_name"
project (str):
Project to upload this model to. Overrides project set in
aiplatform.init.
location (str):
Location to upload this model to. Overrides location set in
aiplatform.init.
credentials (auth_credentials.Credentials):
Custom credentials to use to upload this model. Overrides
credentials set in aiplatform.init.
request_metadata (Sequence[Tuple[str, str]]):
Strings which should be sent along with the request as metadata.
labels (Dict[str, str]):
Optional. Labels with user-defined metadata to organize your Tensorboards.
Label keys and values can be no longer than 64 characters
(Unicode codepoints), can only contain lowercase letters, numeric
characters, underscores and dashes. International characters are allowed.
No more than 64 user labels can be associated with one Tensorboard
(System labels are excluded).
See https://goo.gl/xmQnxf for more information and examples of labels.
System reserved label keys are prefixed with "aiplatform.googleapis.com/"
and are immutable.
encryption_spec_key_name (Optional[str]):
Optional. The Cloud KMS resource identifier of the customer
managed encryption key used to protect the dataset. Has the
form:
``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``.
The key needs to be in the same region as where the compute
resource is created.
If set, this Dataset and all sub-resources of this Dataset will be secured by this key.
Overrides encryption_spec_key_name set in aiplatform.init.
sync (bool):
Whether to execute this method synchronously. If False, this method
will be executed in concurrent Future and any downstream object will
be immediately returned and synced when the Future has completed.
create_request_timeout (float):
Optional. The timeout for the create request in seconds.
Returns:
time_series_dataset (TimeSeriesDataset):
Instantiated representation of the managed time series dataset resource.
"""
if not display_name:
display_name = cls._generate_display_name()
utils.validate_display_name(display_name)
if labels:
utils.validate_labels(labels)
api_client = cls._instantiate_client(location=location, credentials=credentials)
metadata_schema_uri = schema.dataset.metadata.time_series
datasource = _datasources.create_datasource(
metadata_schema_uri=metadata_schema_uri,
gcs_source=gcs_source,
bq_source=bq_source,
)
return cls._create_and_import(
api_client=api_client,
parent=initializer.global_config.common_location_path(
project=project, location=location
),
display_name=display_name,
metadata_schema_uri=metadata_schema_uri,
datasource=datasource,
project=project or initializer.global_config.project,
location=location or initializer.global_config.location,
credentials=credentials or initializer.global_config.credentials,
request_metadata=request_metadata,
labels=labels,
encryption_spec=initializer.global_config.get_encryption_spec(
encryption_spec_key_name=encryption_spec_key_name
),
sync=sync,
create_request_timeout=create_request_timeout,
)
def import_data(self):
raise NotImplementedError(
f"{self.__class__.__name__} class does not support 'import_data'"
)
| 43.850649
| 103
| 0.642381
|
ad96788ea9a67c1e7198c3965ab994645f9f741c
| 557
|
py
|
Python
|
pywebdata/services/googlemaps.py
|
pywebdata/pywebdata
|
74fb3d5adcdb549008ee04de5ae284066c0db362
|
[
"MIT"
] | null | null | null |
pywebdata/services/googlemaps.py
|
pywebdata/pywebdata
|
74fb3d5adcdb549008ee04de5ae284066c0db362
|
[
"MIT"
] | 8
|
2015-09-07T17:48:15.000Z
|
2016-01-31T19:56:28.000Z
|
pywebdata/services/googlemaps.py
|
pywebdata/pywebdata
|
74fb3d5adcdb549008ee04de5ae284066c0db362
|
[
"MIT"
] | null | null | null |
from pywebdata.baseservice import BaseService
class GoogleElevationAPI(BaseService):
name = 'google-elevation-api'
def __init__(self):
self.add_url('http://maps.googleapis.com/maps/api/elevation/json?locations=$latitude,$longitude')
self.add_input('latitude', iotype='float', required=True, min=-90., max=90., incr=1.)
self.add_input('longitude', iotype='float', required=True, min=-180., max=180., incr=1.)
self.add_output('elevation', iotype='float')
self.add_parser(staticmethod(lambda x:x['results']))
| 42.846154
| 105
| 0.692998
|
157f2ec4a94e4f0c8a4626fd168ac34efd6a698a
| 20,645
|
py
|
Python
|
google/cloud/dialogflowcx_v3beta1/services/webhooks/async_client.py
|
wuyuexin/python-dialogflow-cx
|
80f36ad67c8bb6f27dc8c2c5271451b8fea48508
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/dialogflowcx_v3beta1/services/webhooks/async_client.py
|
wuyuexin/python-dialogflow-cx
|
80f36ad67c8bb6f27dc8c2c5271451b8fea48508
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/dialogflowcx_v3beta1/services/webhooks/async_client.py
|
wuyuexin/python-dialogflow-cx
|
80f36ad67c8bb6f27dc8c2c5271451b8fea48508
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import functools
import re
from typing import Dict, Sequence, Tuple, Type, Union
import pkg_resources
import google.api_core.client_options as ClientOptions # type: ignore
from google.api_core import exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.dialogflowcx_v3beta1.services.webhooks import pagers
from google.cloud.dialogflowcx_v3beta1.types import webhook
from google.cloud.dialogflowcx_v3beta1.types import webhook as gcdc_webhook
from google.protobuf import duration_pb2 as duration # type: ignore
from google.protobuf import field_mask_pb2 as field_mask # type: ignore
from .transports.base import WebhooksTransport, DEFAULT_CLIENT_INFO
from .transports.grpc_asyncio import WebhooksGrpcAsyncIOTransport
from .client import WebhooksClient
class WebhooksAsyncClient:
"""Service for managing
[Webhooks][google.cloud.dialogflow.cx.v3beta1.Webhook].
"""
_client: WebhooksClient
DEFAULT_ENDPOINT = WebhooksClient.DEFAULT_ENDPOINT
DEFAULT_MTLS_ENDPOINT = WebhooksClient.DEFAULT_MTLS_ENDPOINT
webhook_path = staticmethod(WebhooksClient.webhook_path)
from_service_account_file = WebhooksClient.from_service_account_file
from_service_account_json = from_service_account_file
get_transport_class = functools.partial(
type(WebhooksClient).get_transport_class, type(WebhooksClient)
)
def __init__(
self,
*,
credentials: credentials.Credentials = None,
transport: Union[str, WebhooksTransport] = "grpc_asyncio",
client_options: ClientOptions = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the webhooks client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.WebhooksTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (ClientOptions): Custom options for the client. It
won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint, this is the default value for
the environment variable) and "auto" (auto switch to the default
mTLS endpoint if client SSL credentials is present). However,
the ``api_endpoint`` property takes precedence if provided.
(2) The ``client_cert_source`` property is used to provide client
SSL credentials for mutual TLS transport. If not provided, the
default SSL credentials will be used if present.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._client = WebhooksClient(
credentials=credentials,
transport=transport,
client_options=client_options,
client_info=client_info,
)
async def list_webhooks(
self,
request: webhook.ListWebhooksRequest = None,
*,
parent: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListWebhooksAsyncPager:
r"""Returns the list of all webhooks in the specified
agent.
Args:
request (:class:`~.webhook.ListWebhooksRequest`):
The request object. The request message for
[Webhooks.ListWebhooks][google.cloud.dialogflow.cx.v3beta1.Webhooks.ListWebhooks].
parent (:class:`str`):
Required. The agent to list all webhooks for. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.pagers.ListWebhooksAsyncPager:
The response message for
[Webhooks.ListWebhooks][google.cloud.dialogflow.cx.v3beta1.Webhooks.ListWebhooks].
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any([parent]):
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = webhook.ListWebhooksRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.list_webhooks,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__aiter__` convenience method.
response = pagers.ListWebhooksAsyncPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
async def get_webhook(
self,
request: webhook.GetWebhookRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> webhook.Webhook:
r"""Retrieves the specified webhook.
Args:
request (:class:`~.webhook.GetWebhookRequest`):
The request object. The request message for
[Webhooks.GetWebhook][google.cloud.dialogflow.cx.v3beta1.Webhooks.GetWebhook].
name (:class:`str`):
Required. The name of the webhook. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/webhooks/<Webhook ID>``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.webhook.Webhook:
Webhooks host the developer's
business logic. During a session,
webhooks allow the developer to use the
data extracted by Dialogflow's natural
language processing to generate dynamic
responses, validate collected data, or
trigger actions on the backend.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any([name]):
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = webhook.GetWebhookRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.get_webhook,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def create_webhook(
self,
request: gcdc_webhook.CreateWebhookRequest = None,
*,
parent: str = None,
webhook: gcdc_webhook.Webhook = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gcdc_webhook.Webhook:
r"""Creates a webhook in the specified agent.
Args:
request (:class:`~.gcdc_webhook.CreateWebhookRequest`):
The request object. The request message for
[Webhooks.CreateWebhook][google.cloud.dialogflow.cx.v3beta1.Webhooks.CreateWebhook].
parent (:class:`str`):
Required. The agent to create a webhook for. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
webhook (:class:`~.gcdc_webhook.Webhook`):
Required. The webhook to create.
This corresponds to the ``webhook`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.gcdc_webhook.Webhook:
Webhooks host the developer's
business logic. During a session,
webhooks allow the developer to use the
data extracted by Dialogflow's natural
language processing to generate dynamic
responses, validate collected data, or
trigger actions on the backend.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any([parent, webhook]):
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = gcdc_webhook.CreateWebhookRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if webhook is not None:
request.webhook = webhook
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.create_webhook,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def update_webhook(
self,
request: gcdc_webhook.UpdateWebhookRequest = None,
*,
webhook: gcdc_webhook.Webhook = None,
update_mask: field_mask.FieldMask = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gcdc_webhook.Webhook:
r"""Updates the specified webhook.
Args:
request (:class:`~.gcdc_webhook.UpdateWebhookRequest`):
The request object. The request message for
[Webhooks.UpdateWebhook][google.cloud.dialogflow.cx.v3beta1.Webhooks.UpdateWebhook].
webhook (:class:`~.gcdc_webhook.Webhook`):
Required. The webhook to update.
This corresponds to the ``webhook`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (:class:`~.field_mask.FieldMask`):
The mask to control which fields get
updated. If the mask is not present, all
fields will be updated.
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.gcdc_webhook.Webhook:
Webhooks host the developer's
business logic. During a session,
webhooks allow the developer to use the
data extracted by Dialogflow's natural
language processing to generate dynamic
responses, validate collected data, or
trigger actions on the backend.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any([webhook, update_mask]):
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = gcdc_webhook.UpdateWebhookRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if webhook is not None:
request.webhook = webhook
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.update_webhook,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("webhook.name", request.webhook.name),)
),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def delete_webhook(
self,
request: webhook.DeleteWebhookRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Deletes the specified webhook.
Args:
request (:class:`~.webhook.DeleteWebhookRequest`):
The request object. The request message for
[Webhooks.DeleteWebhook][google.cloud.dialogflow.cx.v3beta1.Webhooks.DeleteWebhook].
name (:class:`str`):
Required. The name of the webhook to delete. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/webhooks/<Webhook ID>``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any([name]):
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = webhook.DeleteWebhookRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.delete_webhook,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
await rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-dialogflowcx",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("WebhooksAsyncClient",)
| 40.480392
| 106
| 0.619908
|
3781b076ca8e4b9dddaeb4fb9eedce2b0047e240
| 5,400
|
py
|
Python
|
AugmentText/augment_translate/translate_web/translate_google.py
|
liruifeng-01/nlp_xiaojiang
|
1bbc0209c58c13861157ce6d395f8995102708cd
|
[
"MIT"
] | 3
|
2021-01-30T14:24:59.000Z
|
2021-11-30T10:02:00.000Z
|
AugmentText/augment_translate/translate_web/translate_google.py
|
liruifeng-01/nlp_xiaojiang
|
1bbc0209c58c13861157ce6d395f8995102708cd
|
[
"MIT"
] | null | null | null |
AugmentText/augment_translate/translate_web/translate_google.py
|
liruifeng-01/nlp_xiaojiang
|
1bbc0209c58c13861157ce6d395f8995102708cd
|
[
"MIT"
] | 2
|
2021-01-18T10:07:20.000Z
|
2022-01-12T10:09:47.000Z
|
# -*- coding: UTF-8 -*-
# !/usr/bin/python
# @Time :2019/3/21 14:30
# @author :Mo
# @function :回译调用谷歌翻译,模拟google token访问
import logging as logger
import urllib.parse as parse
import execjs
import requests
from nlp_xiaojiang.conf.augment_constant import language_short_google
from nlp_xiaojiang.utils.text_tools import judge_translate_english
class GoogleToken:
def __init__(self):
self.ctx = execjs.compile("""
function TL(a) {
var k = "";
var b = 406644;
var b1 = 3293161072;
var jd = ".";
var $b = "+-a^+6";
var Zb = "+-3^+b+-f";
for (var e = [], f = 0, g = 0; g < a.length; g++) {
var m = a.charCodeAt(g);
128 > m ? e[f++] = m : (2048 > m ? e[f++] = m >> 6 | 192 : (55296 == (m & 64512) && g + 1 < a.length && 56320 == (a.charCodeAt(g + 1) & 64512) ? (m = 65536 + ((m & 1023) << 10) + (a.charCodeAt(++g) & 1023),
e[f++] = m >> 18 | 240,
e[f++] = m >> 12 & 63 | 128) : e[f++] = m >> 12 | 224,
e[f++] = m >> 6 & 63 | 128),
e[f++] = m & 63 | 128)
}
a = b;
for (f = 0; f < e.length; f++) a += e[f],
a = RL(a, $b);
a = RL(a, Zb);
a ^= b1 || 0;
0 > a && (a = (a & 2147483647) + 2147483648);
a %= 1E6;
return a.toString() + jd + (a ^ b)
};
function RL(a, b) {
var t = "a";
var Yb = "+";
for (var c = 0; c < b.length - 2; c += 3) {
var d = b.charAt(c + 2),
d = d >= t ? d.charCodeAt(0) - 87 : Number(d),
d = b.charAt(c + 1) == Yb ? a >>> d: a << d;
a = b.charAt(c) == Yb ? a + d & 4294967295 : a ^ d
}
return a
}
""")
def get_google_token(self, text):
"""
获取谷歌访问token
:param text: str, input sentence
:return:
"""
return self.ctx.call("TL", text)
def open_url(url):
"""
新增header,并request访问
:param url: str, url地址
:return: str, 目标url地址返回
"""
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) '
'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36'}
req = requests.get(url=url, headers=headers)
# print('req.txt:')
# print(req.text.encode('gbk', 'ignore').decode('gbk'))
return req # .content.decode('utf-8')
def max_length(content):
"""
超过最大长度就不翻译
:param content: str, need translate
:return:
"""
if len(content) > 4891:
logger.info("翻译文本超过限制!")
return 4891
else:
return None
def translate_result(result):
"""
删去无关词
:param result: str
:return: str
"""
result_last = ''
for res in result[0]:
if res[0]:
result_last += res[0]
return result_last
def any_to_any_translate(content, from_='zh-CN', to_='en'):
"""
自定义选择
:param content: str, 4891个字, 用户输入
:param from_: str, original language
:param to_: str, target language
:return: str, result of translate
"""
max_len = max_length(content)
if max_len:
content = content[0:max_len]
tk = google_tokn.get_google_token(content)
content = parse.quote(content)
url = "http://translate.google.cn/translate_a/single?client=t&sl={0}&tl={1}" \
"&hl=zh-CN&dt=at&dt=bd&dt=ex&dt=ld&dt=md&dt=qca&dt=rw&dt=rm&dt=ss&dt=t&" \
"ie=UTF-8&oe=UTF-8&source=btn&ssel=3&tsel=3&kc=0&tk={2}&q={3}".format(from_, to_, tk, content)
result = open_url(url)
result_json = result.json()
res = translate_result(result_json)
return res
def any_to_any_translate_back(content, from_='zh-CN', to_='en'):
"""
中英,英中回译
:param content:str, 4891个字, 用户输入
:param from_: str, original language
:param to_: str, target language
:return: str, result of translate
"""
translate_content = any_to_any_translate(content, from_=from_, to_=to_)
result = any_to_any_translate(translate_content, from_=to_, to_=from_)
return result
if __name__ == '__main__':
google_tokn = GoogleToken()
while True:
# sen_org = "过路蜻蜓喜欢口袋巧克力,这是什么意思"
sen_org = "此外,李奇霖还认为,MLF期限是6个月,逆回购是7天,考虑到外汇占款流出的是长期限流动性," \
"因此,无论哪一种货币投放模式都无法替代降准,降准的期限理论上是“无穷期”的。" \
"从资金利率看,MLF资金利率在3.35%,比起降准释放的“无成本”流动性仍然偏高," \
"经济下行压力之下,实体能提供的高收益资产有限,较高的资金利率能否缓解外汇占款对信用派生的收缩作用,也是有疑虑的。" \
"“等汇率端稍见稳定后,我们能看到降准的出现,幅度约为100BP,时点预计在9月上旬。"
for language_short_google_one in language_short_google:
text_translate = any_to_any_translate_back(sen_org, from_='zh', to_=language_short_google_one)
judge = judge_translate_english(sen_org, text_translate)
if judge:
print(language_short_google_one + " " + "True")
print(text_translate.encode('gbk', 'ignore').decode('gbk'))
else:
print(language_short_google_one + " " + "False")
print(text_translate.encode('gbk', 'ignore').decode('gbk'))
#测试结果
# en False
# 我喜欢口袋巧克力,这是什么意思?
# fr False
# 我喜欢口袋巧克力,这是什么意思?
# ru False
# 我喜欢口袋糖果,这是什么意思?
# de False
# 我喜欢袋巧克力,这是什么意思?
| 31.764706
| 219
| 0.533333
|
749110ce1c8a7dce1970ca724dcd0a27d5b206d4
| 1,473
|
py
|
Python
|
caravel_acorn_prng/test_acorn_prng.py
|
ZhenleC/acorn_prng
|
5f7d3e5d0fcc9ffc3845dd7e97f55219ebd112ec
|
[
"Apache-2.0"
] | 1
|
2022-03-22T13:39:51.000Z
|
2022-03-22T13:39:51.000Z
|
caravel_acorn_prng/test_acorn_prng.py
|
ZhenleC/acorn_prng
|
5f7d3e5d0fcc9ffc3845dd7e97f55219ebd112ec
|
[
"Apache-2.0"
] | null | null | null |
caravel_acorn_prng/test_acorn_prng.py
|
ZhenleC/acorn_prng
|
5f7d3e5d0fcc9ffc3845dd7e97f55219ebd112ec
|
[
"Apache-2.0"
] | 1
|
2022-03-21T10:56:21.000Z
|
2022-03-21T10:56:21.000Z
|
import cocotb
from cocotb.clock import Clock
from cocotb.triggers import Timer, RisingEdge, FallingEdge, ClockCycles, with_timeout
async def until_signal_has_value(clk, sig, value):
while True:
await RisingEdge(clk)
if sig.value == value:
return
@cocotb.test()
async def test_acorn_prng(dut):
"""Try accessing the design."""
clock = Clock(dut.clk, 25, units="ns") # 40M
cocotb.fork(clock.start())
dut.RSTB <= 0
dut.power1 <= 0;
dut.power2 <= 0;
dut.power3 <= 0;
dut.power4 <= 0;
await ClockCycles(dut.clk, 8)
dut.power1 <= 1;
await ClockCycles(dut.clk, 8)
dut.power2 <= 1;
await ClockCycles(dut.clk, 8)
dut.power3 <= 1;
await ClockCycles(dut.clk, 8)
dut.power4 <= 1;
await ClockCycles(dut.clk, 80)
dut.RSTB <= 1
await RisingEdge(dut.uut.mprj.wrapped_acorn_prng_9.acorn_prng0.reset_out)
await FallingEdge(dut.uut.mprj.wrapped_acorn_prng_9.acorn_prng0.reset_out)
for cycle in range(1):
dut.load.value = 1
dut.select.value = 0
dut.clk.value = 0
await Timer(1, units="ns")
dut.clk.value = 1
await Timer(1, units="ns")
for cycle in range(300):
dut.load.value = 0
dut.clk.value = 0
await Timer(1, units="ns")
dut.clk.value = 1
await Timer(1, units="ns")
| 21.661765
| 85
| 0.572301
|
441ea87fa2e538a2cf98dd706bc2d3560bb8d8fb
| 8,950
|
py
|
Python
|
backend/core/users/userManager.py
|
konglomerat/roseguarden
|
781d2ec1c3af3791a0694c17de14bf01a1a20337
|
[
"MIT"
] | null | null | null |
backend/core/users/userManager.py
|
konglomerat/roseguarden
|
781d2ec1c3af3791a0694c17de14bf01a1a20337
|
[
"MIT"
] | 50
|
2021-03-28T03:06:19.000Z
|
2021-10-18T12:36:16.000Z
|
backend/core/users/userManager.py
|
konglomerat/roseguarden
|
781d2ec1c3af3791a0694c17de14bf01a1a20337
|
[
"MIT"
] | 1
|
2021-07-30T07:12:46.000Z
|
2021-07-30T07:12:46.000Z
|
"""
The roseguarden project
Copyright (C) 2018-2020 Marcus Drobisch,
This program is free software: you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation, either version 3 of the License, or (at your option) any later
version.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
this program. If not, see <http://www.gnu.org/licenses/>.
"""
__authors__ = ["Marcus Drobisch"]
__contact__ = "roseguarden@fabba.space"
__credits__ = []
__license__ = "GPLv3"
import secrets
import datetime
import hashlib
import arrow
from flask_jwt_extended import create_access_token, create_refresh_token
from core.logs import logManager
from core.common.checksum import crc8
from core.workspaces.workspaceHooks import WorkspaceHooks
class UserManager(object):
""" The UserManager ...
"""
def __init__(self, ):
# preparation to instanciate
pass
def init_manager(self, app, db, workspaceManager, config):
self.config = config
self.app = app
self.db = db
self.workspaceManager = workspaceManager
self.pinAttemptLimit = 6
logManager.info("UserManager initialized")
from core.users.models import User, Authenticator
self.user = User
self.authenticator_request = Authenticator
self.user_authenticator_cache = {}
def removeUser(self, email):
u = self.user.query.filter_by(email=email).first()
if u is not None:
self.workspaceManager.triggerWorkspaceHooks(WorkspaceHooks.REMOVEUSER, user=u)
self.db.session.delete(u)
self.db.session.commit()
def registerUser(self, userdata):
if self.checkUserExist(userdata['email']):
return None
else:
u = self.user(email=userdata['email'].strip().lower(), password=userdata['password'], isAdmin=False)
if 'firstname' in userdata:
u.firstname = userdata['firstname']
if 'lastname' in userdata:
u.lastname = userdata['lastname']
if 'organization' in userdata:
u.organization = userdata['organization']
self.workspaceManager.triggerWorkspaceHooks(WorkspaceHooks.CREATEUSER, user=u)
self.db.session.add(u)
self.db.session.commit()
return u
def updateUserPassword(self, user, newpassword):
user.password = newpassword
def updateAccessToken(self, username):
session_expiration_minutes = self.config['SYSTEM'].get('session_expiration_minutes', 15)
exp_delta = datetime.timedelta(minutes=session_expiration_minutes)
access_token = create_access_token(identity=username, expires_delta=exp_delta)
create_refresh_token(identity=username)
return access_token
def getAuthenticatorPublicKeyOrDefault(self, authenticator_private_key, authenticator_public_key):
# for empty public keys use the default scenario to create one out of the private key
if authenticator_public_key is None or authenticator_public_key == "":
key = "SERVERGENERATED:CRC8:" + str(crc8(bytearray(authenticator_private_key.encode())))
return key
else:
return authenticator_public_key
def createUserAuthenticatorRequest(self,
authenticator_private_key,
authenticator_public_key,
authenticator_type,
validity_type,
code_send_by,
code_send_to,
expire_days=3):
token = secrets.token_hex(6)
code = ':'.join(a + b for a, b in zip(token[::2], token[1::2])).upper()
a = self.authenticator_request()
a.authenticator_type = authenticator_type
a.validity_type = validity_type
a.expire_date = arrow.utcnow().shift(days=expire_days)
a.created_date = arrow.utcnow()
a.code = code
a.code_send_by = code_send_by
a.code_send_to = code_send_to
a.authenticator = authenticator_private_key
a.authenticator_public_key = self.getAuthenticatorPublicKeyOrDefault(authenticator_private_key,
authenticator_public_key)
self.db.session.add(a)
self.db.session.commit()
return code
def get_user_by_authenticator(self, authenticator_private_key, authenticator_public_key):
# the hash are stored sha512-encrypted in the volatile cache (stored in the volatile memory / RAM)
h = hashlib.sha512(authenticator_private_key.encode("utf8"))
secret_hash = str(h.hexdigest())
# check if the hash is in the volatile volatile cache
if secret_hash in self.user_authenticator_cache:
user_mail = self.user_authenticator_cache[secret_hash]
u = self.user.query.filter_by(email=user_mail).first()
if u is not None:
if u.checkAuthenticator(authenticator_private_key) is True:
return u
# get the public key from the private key. This will generate a public key
# with a default algorithm (setuped) if needed.
public_key = self.getAuthenticatorPublicKeyOrDefault(authenticator_private_key, authenticator_public_key)
# get all users with the corresponding public key
user_list = self.user.query.filter(self.user.authenticator_public_key == public_key).all()
# if no user with the given public key found,
# get all users with no or empty public key
if len(user_list) == 0:
user_list = self.user.query.filter((self.user.authenticator_public_key == "")
| (self.user.authenticator_public_key is None)).all()
# iterate through the users list, contains one of the following:
# - a list of all users with the corresponding public key
# - (if not found) a list of all users without / empty public key
for u in user_list:
# save the time consuming authenticator check for users in volatile cache
if u.email in self.user_authenticator_cache.values():
continue
# check the private key against the users authenticator
if u.checkAuthenticator(authenticator_private_key) is True:
# if found store the key in the volatile cache
self.user_authenticator_cache[secret_hash] = u.email
# if the public key is empty set a default public key out of the private key
if u.authenticator_public_key == "" or u.authenticator_public_key is None:
u.authenticator_public_key = self.getAuthenticatorPublicKeyOrDefault(
authenticator_private_key, u.authenticator_public_key)
return u
# no user found for the given private key
return None
def checkUserAuthenticatorExists(self, authenticator_private_key, authenticator_public_key):
user = self.get_user_by_authenticator(authenticator_private_key, authenticator_public_key)
if user is None:
return False
else:
return True
def getUser(self, email):
if email is None:
return None
return self.user.query.filter_by(email=email.strip().lower()).first()
def checkUserExist(self, email):
user = self.user.query.filter_by(email=email.strip().lower()).first()
if user is None:
return False
else:
return True
def getUserRemainingPinAttempts(self, email):
user = self.user.query.filter_by(email=email).first()
remaining = self.pinAttemptLimit - user.failedPinAttempts
if remaining > 0:
return remaining
else:
return 0
def checkUserPin(self, email, plaintext_pin):
user = self.user.query.filter_by(email=email).first()
if user.pinIsLocked is True:
return False
if user.checkPin(plaintext_pin) is True:
user.failedPinAttempts = 0
self.db.session.commit()
return True
else:
user.failedPinAttempts = user.failedPinAttempts + 1
if user.failedPinAttempts >= self.pinAttemptLimit:
user.pinIsLocked = True
self.db.session.commit()
return False
def checkUserPassword(self, username, password):
pass
| 42.619048
| 113
| 0.644581
|
737b534efd34159081204e522f3fa6229beb65ff
| 3,315
|
py
|
Python
|
rummage/lib/gui/controls/collapsible_pane.py
|
facelessuser/Rummage
|
74f0ce1b078eef40c3ba683dbc4638112f3b9bb7
|
[
"MIT"
] | 55
|
2015-02-15T08:17:55.000Z
|
2022-03-11T11:55:39.000Z
|
rummage/lib/gui/controls/collapsible_pane.py
|
facelessuser/Rummage
|
74f0ce1b078eef40c3ba683dbc4638112f3b9bb7
|
[
"MIT"
] | 264
|
2015-01-29T20:27:40.000Z
|
2022-03-03T04:08:48.000Z
|
rummage/lib/gui/controls/collapsible_pane.py
|
facelessuser/Rummage
|
74f0ce1b078eef40c3ba683dbc4638112f3b9bb7
|
[
"MIT"
] | 12
|
2017-08-30T22:54:20.000Z
|
2022-03-21T01:05:50.000Z
|
"""Custom collapsible pane."""
import wx
import wx.lib.agw.pycollapsiblepane as pycollapse
import wx.lib.buttons as buttons
from .. import data
class CollapsiblePane(pycollapse.PyCollapsiblePane):
"""Custom collapsible pane."""
def __init__(
self, parent, id=wx.ID_ANY, label="", pos=wx.DefaultPosition, # noqa: A002
size=wx.DefaultSize, agwStyle=wx.CP_DEFAULT_STYLE
):
"""Initialize."""
super().__init__(
parent, id, label, pos, size, 0, agwStyle
)
btn = CollapseButton(self, label)
self.SetButton(btn)
btn.Bind(wx.EVT_CHAR_HOOK, self.on_tab)
def SetBackgroundColour(self, color):
"""Set background color."""
super().SetBackgroundColour(color)
self._pButton.SetBackgroundColour(color)
def AcceptsFocus(self):
"""
Check if we should accept focus.
We should never accept focus.
"""
return False
def on_focus(self, event):
"""Focus."""
self._pButton.SetFocus()
def on_tab(self, event):
"""Handle tab."""
if event.GetUnicodeKey() == wx.WXK_TAB:
if event.ShiftDown():
self.Navigate(False)
else:
self.NavigateIn()
def workaround(self):
"""Apply workaround for macOS."""
self.GetPane().AcceptsFocus = self.AcceptsFocus
self.GetPane().GetSizer().GetItem(0).GetWindow().Bind(
wx.EVT_SET_FOCUS, self.on_focus
)
def GetBtnLabel(self):
"""Returns the button label."""
return self.GetLabel()
def Collapse(self, collapse=True):
"""Collapse."""
self._pButton.SetToggle(collapse)
super().Collapse(collapse)
class CollapseButton(buttons.GenBitmapTextToggleButton):
"""Custom button."""
labelDelta = 0 # noqa: N815
def __init__(self, parent, label):
"""Initialization."""
super().__init__(
parent, -1, bitmap=data.get_bitmap('arrow_down.png'), label=label,
style=wx.BORDER_NONE | wx.BU_EXACTFIT | wx.TAB_TRAVERSAL
)
self.Bind(wx.EVT_SYS_COLOUR_CHANGED, self.on_color_change)
self.set_colors()
self.SetUseFocusIndicator(True)
def on_color_change(self, event):
"""On color change."""
self.set_colors()
if event:
event.Skip()
def set_colors(self):
"""On color change."""
self.SetBackgroundColour(self.GetParent().GetBackgroundColour())
self.init_collapse_arrow()
def init_collapse_arrow(self):
"""Initialize collapse arrow."""
color = data.RGBA(self.GetForegroundColour().Get()[:3])
self.SetBitmapLabel(data.get_bitmap('arrow_down.png', tint=color, alpha=0.5))
self.SetBitmapSelected(data.get_bitmap('arrow_right.png', tint=color, alpha=0.5))
def SetForegroundColour(self, color):
"""Set foreground color."""
super().SetForegroundColour()
self.init_collapse_arrow()
def InitColours(self):
"""Calculate a new set of highlight and shadow colours."""
face = self.GetBackgroundColour()
self.faceDnClr = face
self.shadowPenClr = face
self.highlightPenClr = face
self.focusClr = face
| 26.733871
| 89
| 0.61086
|
e00d7204a793bcd3010f51fa44d01b4bdc4b6647
| 26,456
|
py
|
Python
|
homeassistant/config_entries.py
|
pancho-villa/home-assistant
|
ab17b22239452671f14067571f22aadb9688a3de
|
[
"Apache-2.0"
] | 1
|
2019-03-21T14:59:31.000Z
|
2019-03-21T14:59:31.000Z
|
homeassistant/config_entries.py
|
pancho-villa/home-assistant
|
ab17b22239452671f14067571f22aadb9688a3de
|
[
"Apache-2.0"
] | null | null | null |
homeassistant/config_entries.py
|
pancho-villa/home-assistant
|
ab17b22239452671f14067571f22aadb9688a3de
|
[
"Apache-2.0"
] | 1
|
2022-02-20T07:41:14.000Z
|
2022-02-20T07:41:14.000Z
|
"""The Config Manager is responsible for managing configuration for components.
The Config Manager allows for creating config entries to be consumed by
components. Each entry is created via a Config Flow Handler, as defined by each
component.
During startup, Home Assistant will setup the entries during the normal setup
of a component. It will first call the normal setup and then call the method
`async_setup_entry(hass, entry)` for each entry. The same method is called when
Home Assistant is running while a config entry is created. If the version of
the config entry does not match that of the flow handler, setup will
call the method `async_migrate_entry(hass, entry)` with the expectation that
the entry be brought to the current version. Return `True` to indicate
migration was successful, otherwise `False`.
## Config Flows
A component needs to define a Config Handler to allow the user to create config
entries for that component. A config flow will manage the creation of entries
from user input, discovery or other sources (like hassio).
When a config flow is started for a domain, the handler will be instantiated
and receives a unique id. The instance of this handler will be reused for every
interaction of the user with this flow. This makes it possible to store
instance variables on the handler.
Before instantiating the handler, Home Assistant will make sure to load all
dependencies and install the requirements of the component.
At a minimum, each config flow will have to define a version number and the
'user' step.
@config_entries.HANDLERS.register(DOMAIN)
class ExampleConfigFlow(config_entries.ConfigFlow):
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_PUSH
async def async_step_user(self, user_input=None):
…
The 'user' step is the first step of a flow and is called when a user
starts a new flow. Each step has three different possible results: "Show Form",
"Abort" and "Create Entry".
> Note: prior 0.76, the default step is 'init' step, some config flows still
keep 'init' step to avoid break localization. All new config flow should use
'user' step.
### Show Form
This will show a form to the user to fill in. You define the current step,
a title, a description and the schema of the data that needs to be returned.
async def async_step_init(self, user_input=None):
# Use OrderedDict to guarantee order of the form shown to the user
data_schema = OrderedDict()
data_schema[vol.Required('username')] = str
data_schema[vol.Required('password')] = str
return self.async_show_form(
step_id='user',
title='Account Info',
data_schema=vol.Schema(data_schema)
)
After the user has filled in the form, the step method will be called again and
the user input is passed in. If the validation of the user input fails , you
can return a dictionary with errors. Each key in the dictionary refers to a
field name that contains the error. Use the key 'base' if you want to show a
generic error.
async def async_step_init(self, user_input=None):
errors = None
if user_input is not None:
# Validate user input
if valid:
return self.create_entry(…)
errors['base'] = 'Unable to reach authentication server.'
return self.async_show_form(…)
If the user input passes validation, you can again return one of the three
return values. If you want to navigate the user to the next step, return the
return value of that step:
return await self.async_step_account()
### Abort
When the result is "Abort", a message will be shown to the user and the
configuration flow is finished.
return self.async_abort(
reason='This device is not supported by Home Assistant.'
)
### Create Entry
When the result is "Create Entry", an entry will be created and stored in Home
Assistant, a success message is shown to the user and the flow is finished.
## Initializing a config flow from an external source
You might want to initialize a config flow programmatically. For example, if
we discover a device on the network that requires user interaction to finish
setup. To do so, pass a source parameter and optional user input to the init
method:
await hass.config_entries.flow.async_init(
'hue', context={'source': 'discovery'}, data=discovery_info)
The config flow handler will need to add a step to support the source. The step
should follow the same return values as a normal step.
async def async_step_discovery(info):
If the result of the step is to show a form, the user will be able to continue
the flow from the config panel.
"""
import asyncio
import logging
import functools
import uuid
from typing import Callable, Dict, List, Optional, Set # noqa pylint: disable=unused-import
import weakref
from homeassistant import data_entry_flow
from homeassistant.core import callback, HomeAssistant
from homeassistant.exceptions import HomeAssistantError, ConfigEntryNotReady
from homeassistant.setup import async_setup_component, async_process_deps_reqs
from homeassistant.util.decorator import Registry
_LOGGER = logging.getLogger(__name__)
_UNDEF = object()
SOURCE_USER = 'user'
SOURCE_DISCOVERY = 'discovery'
SOURCE_IMPORT = 'import'
HANDLERS = Registry()
# Components that have config flows. In future we will auto-generate this list.
FLOWS = [
'ambient_station',
'cast',
'daikin',
'deconz',
'dialogflow',
'esphome',
'emulated_roku',
'geofency',
'gpslogger',
'hangouts',
'homematicip_cloud',
'hue',
'ifttt',
'ios',
'ipma',
'lifx',
'locative',
'luftdaten',
'mailgun',
'mobile_app',
'mqtt',
'nest',
'openuv',
'owntracks',
'point',
'ps4',
'rainmachine',
'simplisafe',
'smartthings',
'smhi',
'sonos',
'tellduslive',
'toon',
'tplink',
'tradfri',
'twilio',
'unifi',
'upnp',
'zha',
'zone',
'zwave',
]
STORAGE_KEY = 'core.config_entries'
STORAGE_VERSION = 1
# Deprecated since 0.73
PATH_CONFIG = '.config_entries.json'
SAVE_DELAY = 1
# The config entry has been set up successfully
ENTRY_STATE_LOADED = 'loaded'
# There was an error while trying to set up this config entry
ENTRY_STATE_SETUP_ERROR = 'setup_error'
# There was an error while trying to migrate the config entry to a new version
ENTRY_STATE_MIGRATION_ERROR = 'migration_error'
# The config entry was not ready to be set up yet, but might be later
ENTRY_STATE_SETUP_RETRY = 'setup_retry'
# The config entry has not been loaded
ENTRY_STATE_NOT_LOADED = 'not_loaded'
# An error occurred when trying to unload the entry
ENTRY_STATE_FAILED_UNLOAD = 'failed_unload'
UNRECOVERABLE_STATES = (
ENTRY_STATE_MIGRATION_ERROR,
ENTRY_STATE_FAILED_UNLOAD,
)
DISCOVERY_NOTIFICATION_ID = 'config_entry_discovery'
DISCOVERY_SOURCES = (
SOURCE_DISCOVERY,
SOURCE_IMPORT,
)
EVENT_FLOW_DISCOVERED = 'config_entry_discovered'
CONN_CLASS_CLOUD_PUSH = 'cloud_push'
CONN_CLASS_CLOUD_POLL = 'cloud_poll'
CONN_CLASS_LOCAL_PUSH = 'local_push'
CONN_CLASS_LOCAL_POLL = 'local_poll'
CONN_CLASS_ASSUMED = 'assumed'
CONN_CLASS_UNKNOWN = 'unknown'
class ConfigError(HomeAssistantError):
"""Error while configuring an account."""
class UnknownEntry(ConfigError):
"""Unknown entry specified."""
class OperationNotAllowed(ConfigError):
"""Raised when a config entry operation is not allowed."""
class ConfigEntry:
"""Hold a configuration entry."""
__slots__ = ('entry_id', 'version', 'domain', 'title', 'data', 'options',
'source', 'connection_class', 'state', '_setup_lock',
'update_listeners', '_async_cancel_retry_setup')
def __init__(self, version: int, domain: str, title: str, data: dict,
source: str, connection_class: str,
options: Optional[dict] = None,
entry_id: Optional[str] = None,
state: str = ENTRY_STATE_NOT_LOADED) -> None:
"""Initialize a config entry."""
# Unique id of the config entry
self.entry_id = entry_id or uuid.uuid4().hex
# Version of the configuration.
self.version = version
# Domain the configuration belongs to
self.domain = domain
# Title of the configuration
self.title = title
# Config data
self.data = data
# Entry options
self.options = options or {}
# Source of the configuration (user, discovery, cloud)
self.source = source
# Connection class
self.connection_class = connection_class
# State of the entry (LOADED, NOT_LOADED)
self.state = state
# Listeners to call on update
self.update_listeners = [] # type: list
# Function to cancel a scheduled retry
self._async_cancel_retry_setup = None
async def async_setup(
self, hass: HomeAssistant, *, component=None, tries=0) -> None:
"""Set up an entry."""
if component is None:
component = getattr(hass.components, self.domain)
# Perform migration
if component.DOMAIN == self.domain:
if not await self.async_migrate(hass):
self.state = ENTRY_STATE_MIGRATION_ERROR
return
try:
result = await component.async_setup_entry(hass, self)
if not isinstance(result, bool):
_LOGGER.error('%s.async_setup_entry did not return boolean',
component.DOMAIN)
result = False
except ConfigEntryNotReady:
self.state = ENTRY_STATE_SETUP_RETRY
wait_time = 2**min(tries, 4) * 5
tries += 1
_LOGGER.warning(
'Config entry for %s not ready yet. Retrying in %d seconds.',
self.domain, wait_time)
async def setup_again(now):
"""Run setup again."""
self._async_cancel_retry_setup = None
await self.async_setup(hass, component=component, tries=tries)
self._async_cancel_retry_setup = \
hass.helpers.event.async_call_later(wait_time, setup_again)
return
except Exception: # pylint: disable=broad-except
_LOGGER.exception('Error setting up entry %s for %s',
self.title, component.DOMAIN)
result = False
# Only store setup result as state if it was not forwarded.
if self.domain != component.DOMAIN:
return
if result:
self.state = ENTRY_STATE_LOADED
else:
self.state = ENTRY_STATE_SETUP_ERROR
async def async_unload(self, hass, *, component=None) -> bool:
"""Unload an entry.
Returns if unload is possible and was successful.
"""
if component is None:
component = getattr(hass.components, self.domain)
if component.DOMAIN == self.domain:
if self.state in UNRECOVERABLE_STATES:
return False
if self.state != ENTRY_STATE_LOADED:
if self._async_cancel_retry_setup is not None:
self._async_cancel_retry_setup()
self._async_cancel_retry_setup = None
self.state = ENTRY_STATE_NOT_LOADED
return True
supports_unload = hasattr(component, 'async_unload_entry')
if not supports_unload:
if component.DOMAIN == self.domain:
self.state = ENTRY_STATE_FAILED_UNLOAD
return False
try:
result = await component.async_unload_entry(hass, self)
assert isinstance(result, bool)
# Only adjust state if we unloaded the component
if result and component.DOMAIN == self.domain:
self.state = ENTRY_STATE_NOT_LOADED
return result
except Exception: # pylint: disable=broad-except
_LOGGER.exception('Error unloading entry %s for %s',
self.title, component.DOMAIN)
if component.DOMAIN == self.domain:
self.state = ENTRY_STATE_FAILED_UNLOAD
return False
async def async_remove(self, hass: HomeAssistant) -> None:
"""Invoke remove callback on component."""
component = getattr(hass.components, self.domain)
if not hasattr(component, 'async_remove_entry'):
return
try:
await component.async_remove_entry(hass, self)
except Exception: # pylint: disable=broad-except
_LOGGER.exception('Error calling entry remove callback %s for %s',
self.title, component.DOMAIN)
async def async_migrate(self, hass: HomeAssistant) -> bool:
"""Migrate an entry.
Returns True if config entry is up-to-date or has been migrated.
"""
handler = HANDLERS.get(self.domain)
if handler is None:
_LOGGER.error("Flow handler not found for entry %s for %s",
self.title, self.domain)
return False
# Handler may be a partial
while isinstance(handler, functools.partial):
handler = handler.func
if self.version == handler.VERSION:
return True
component = getattr(hass.components, self.domain)
supports_migrate = hasattr(component, 'async_migrate_entry')
if not supports_migrate:
_LOGGER.error("Migration handler not found for entry %s for %s",
self.title, self.domain)
return False
try:
result = await component.async_migrate_entry(hass, self)
if not isinstance(result, bool):
_LOGGER.error('%s.async_migrate_entry did not return boolean',
self.domain)
return False
if result:
# pylint: disable=protected-access
hass.config_entries._async_schedule_save() # type: ignore
return result
except Exception: # pylint: disable=broad-except
_LOGGER.exception('Error migrating entry %s for %s',
self.title, component.DOMAIN)
return False
def add_update_listener(self, listener: Callable) -> Callable:
"""Listen for when entry is updated.
Listener: Callback function(hass, entry)
Returns function to unlisten.
"""
weak_listener = weakref.ref(listener)
self.update_listeners.append(weak_listener)
return lambda: self.update_listeners.remove(weak_listener)
def as_dict(self):
"""Return dictionary version of this entry."""
return {
'entry_id': self.entry_id,
'version': self.version,
'domain': self.domain,
'title': self.title,
'data': self.data,
'options': self.options,
'source': self.source,
'connection_class': self.connection_class,
}
class ConfigEntries:
"""Manage the configuration entries.
An instance of this object is available via `hass.config_entries`.
"""
def __init__(self, hass: HomeAssistant, hass_config: dict) -> None:
"""Initialize the entry manager."""
self.hass = hass
self.flow = data_entry_flow.FlowManager(
hass, self._async_create_flow, self._async_finish_flow)
self.options = OptionsFlowManager(hass)
self._hass_config = hass_config
self._entries = [] # type: List[ConfigEntry]
self._store = hass.helpers.storage.Store(STORAGE_VERSION, STORAGE_KEY)
@callback
def async_domains(self) -> List[str]:
"""Return domains for which we have entries."""
seen = set() # type: Set[str]
result = []
for entry in self._entries:
if entry.domain not in seen:
seen.add(entry.domain)
result.append(entry.domain)
return result
@callback
def async_get_entry(self, entry_id: str) -> Optional[ConfigEntry]:
"""Return entry with matching entry_id."""
for entry in self._entries:
if entry_id == entry.entry_id:
return entry
return None
@callback
def async_entries(self, domain: Optional[str] = None) -> List[ConfigEntry]:
"""Return all entries or entries for a specific domain."""
if domain is None:
return list(self._entries)
return [entry for entry in self._entries if entry.domain == domain]
async def async_remove(self, entry_id):
"""Remove an entry."""
entry = self.async_get_entry(entry_id)
if entry is None:
raise UnknownEntry
if entry.state in UNRECOVERABLE_STATES:
unload_success = entry.state != ENTRY_STATE_FAILED_UNLOAD
else:
unload_success = await self.async_unload(entry_id)
await entry.async_remove(self.hass)
self._entries.remove(entry)
self._async_schedule_save()
dev_reg, ent_reg = await asyncio.gather(
self.hass.helpers.device_registry.async_get_registry(),
self.hass.helpers.entity_registry.async_get_registry(),
)
dev_reg.async_clear_config_entry(entry_id)
ent_reg.async_clear_config_entry(entry_id)
return {
'require_restart': not unload_success
}
async def async_initialize(self) -> None:
"""Initialize config entry config."""
# Migrating for config entries stored before 0.73
config = await self.hass.helpers.storage.async_migrator(
self.hass.config.path(PATH_CONFIG), self._store,
old_conf_migrate_func=_old_conf_migrator
)
if config is None:
self._entries = []
return
self._entries = [
ConfigEntry(
version=entry['version'],
domain=entry['domain'],
entry_id=entry['entry_id'],
data=entry['data'],
source=entry['source'],
title=entry['title'],
# New in 0.79
connection_class=entry.get('connection_class',
CONN_CLASS_UNKNOWN),
# New in 0.89
options=entry.get('options'))
for entry in config['entries']]
async def async_setup(self, entry_id: str) -> bool:
"""Set up a config entry.
Return True if entry has been successfully loaded.
"""
entry = self.async_get_entry(entry_id)
if entry is None:
raise UnknownEntry
if entry.state != ENTRY_STATE_NOT_LOADED:
raise OperationNotAllowed
# Setup Component if not set up yet
if entry.domain in self.hass.config.components:
await entry.async_setup(self.hass)
else:
# Setting up the component will set up all its config entries
result = await async_setup_component(
self.hass, entry.domain, self._hass_config)
if not result:
return result
return entry.state == ENTRY_STATE_LOADED
async def async_unload(self, entry_id: str) -> bool:
"""Unload a config entry."""
entry = self.async_get_entry(entry_id)
if entry is None:
raise UnknownEntry
if entry.state in UNRECOVERABLE_STATES:
raise OperationNotAllowed
return await entry.async_unload(self.hass)
async def async_reload(self, entry_id: str) -> bool:
"""Reload an entry.
If an entry was not loaded, will just load.
"""
unload_result = await self.async_unload(entry_id)
if not unload_result:
return unload_result
return await self.async_setup(entry_id)
@callback
def async_update_entry(self, entry, *, data=_UNDEF, options=_UNDEF):
"""Update a config entry."""
if data is not _UNDEF:
entry.data = data
if options is not _UNDEF:
entry.options = options
if data is not _UNDEF or options is not _UNDEF:
for listener_ref in entry.update_listeners:
listener = listener_ref()
self.hass.async_create_task(listener(self.hass, entry))
self._async_schedule_save()
async def async_forward_entry_setup(self, entry, component):
"""Forward the setup of an entry to a different component.
By default an entry is setup with the component it belongs to. If that
component also has related platforms, the component will have to
forward the entry to be setup by that component.
You don't want to await this coroutine if it is called as part of the
setup of a component, because it can cause a deadlock.
"""
# Setup Component if not set up yet
if component not in self.hass.config.components:
result = await async_setup_component(
self.hass, component, self._hass_config)
if not result:
return False
await entry.async_setup(
self.hass, component=getattr(self.hass.components, component))
async def async_forward_entry_unload(self, entry, component):
"""Forward the unloading of an entry to a different component."""
# It was never loaded.
if component not in self.hass.config.components:
return True
return await entry.async_unload(
self.hass, component=getattr(self.hass.components, component))
async def _async_finish_flow(self, flow, result):
"""Finish a config flow and add an entry."""
# Remove notification if no other discovery config entries in progress
if not any(ent['context']['source'] in DISCOVERY_SOURCES for ent
in self.hass.config_entries.flow.async_progress()
if ent['flow_id'] != flow.flow_id):
self.hass.components.persistent_notification.async_dismiss(
DISCOVERY_NOTIFICATION_ID)
if result['type'] != data_entry_flow.RESULT_TYPE_CREATE_ENTRY:
return result
entry = ConfigEntry(
version=result['version'],
domain=result['handler'],
title=result['title'],
data=result['data'],
options={},
source=flow.context['source'],
connection_class=flow.CONNECTION_CLASS,
)
self._entries.append(entry)
self._async_schedule_save()
await self.async_setup(entry.entry_id)
result['result'] = entry
return result
async def _async_create_flow(self, handler_key, *, context, data):
"""Create a flow for specified handler.
Handler key is the domain of the component that we want to set up.
"""
component = getattr(self.hass.components, handler_key)
handler = HANDLERS.get(handler_key)
if handler is None:
raise data_entry_flow.UnknownHandler
source = context['source']
# Make sure requirements and dependencies of component are resolved
await async_process_deps_reqs(
self.hass, self._hass_config, handler, component)
# Create notification.
if source in DISCOVERY_SOURCES:
self.hass.bus.async_fire(EVENT_FLOW_DISCOVERED)
self.hass.components.persistent_notification.async_create(
title='New devices discovered',
message=("We have discovered new devices on your network. "
"[Check it out](/config/integrations)"),
notification_id=DISCOVERY_NOTIFICATION_ID
)
flow = handler()
flow.init_step = source
return flow
def _async_schedule_save(self) -> None:
"""Save the entity registry to a file."""
self._store.async_delay_save(self._data_to_save, SAVE_DELAY)
@callback
def _data_to_save(self):
"""Return data to save."""
return {
'entries': [entry.as_dict() for entry in self._entries]
}
async def _old_conf_migrator(old_config):
"""Migrate the pre-0.73 config format to the latest version."""
return {'entries': old_config}
class ConfigFlow(data_entry_flow.FlowHandler):
"""Base class for config flows with some helpers."""
CONNECTION_CLASS = CONN_CLASS_UNKNOWN
@callback
def _async_current_entries(self):
"""Return current entries."""
return self.hass.config_entries.async_entries(self.handler)
@callback
def _async_in_progress(self):
"""Return other in progress flows for current domain."""
return [flw for flw in self.hass.config_entries.flow.async_progress()
if flw['handler'] == self.handler and
flw['flow_id'] != self.flow_id]
class OptionsFlowManager:
"""Flow to set options for a configuration entry."""
def __init__(self, hass: HomeAssistant) -> None:
"""Initialize the options manager."""
self.hass = hass
self.flow = data_entry_flow.FlowManager(
hass, self._async_create_flow, self._async_finish_flow)
async def _async_create_flow(self, entry_id, *, context, data):
"""Create an options flow for a config entry.
Entry_id and flow.handler is the same thing to map entry with flow.
"""
entry = self.hass.config_entries.async_get_entry(entry_id)
if entry is None:
return
flow = HANDLERS[entry.domain].async_get_options_flow(
entry.data, entry.options)
return flow
async def _async_finish_flow(self, flow, result):
"""Finish an options flow and update options for configuration entry.
Flow.handler and entry_id is the same thing to map flow with entry.
"""
entry = self.hass.config_entries.async_get_entry(flow.handler)
if entry is None:
return
self.hass.config_entries.async_update_entry(
entry, options=result['data'])
result['result'] = True
return result
| 33.787995
| 92
| 0.639741
|
5064b00a7ad900dde849d387ba62aad633897e0d
| 2,975
|
py
|
Python
|
homeassistant/components/hlk_sw16/config_flow.py
|
tbarbette/core
|
8e58c3aa7bc8d2c2b09b6bd329daa1c092d52d3c
|
[
"Apache-2.0"
] | 6
|
2017-08-02T19:26:39.000Z
|
2020-03-14T22:47:41.000Z
|
homeassistant/components/hlk_sw16/config_flow.py
|
tbarbette/core
|
8e58c3aa7bc8d2c2b09b6bd329daa1c092d52d3c
|
[
"Apache-2.0"
] | 60
|
2020-07-06T15:10:30.000Z
|
2022-03-31T06:01:46.000Z
|
homeassistant/components/hlk_sw16/config_flow.py
|
tbarbette/core
|
8e58c3aa7bc8d2c2b09b6bd329daa1c092d52d3c
|
[
"Apache-2.0"
] | 14
|
2018-08-19T16:28:26.000Z
|
2021-09-02T18:26:53.000Z
|
"""Config flow for HLK-SW16."""
import asyncio
from hlk_sw16 import create_hlk_sw16_connection
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import CONF_HOST, CONF_PORT
from homeassistant.core import HomeAssistant
from .const import (
CONNECTION_TIMEOUT,
DEFAULT_KEEP_ALIVE_INTERVAL,
DEFAULT_PORT,
DEFAULT_RECONNECT_INTERVAL,
DOMAIN,
)
from .errors import AlreadyConfigured, CannotConnect
DATA_SCHEMA = vol.Schema(
{
vol.Required(CONF_HOST): str,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): vol.Coerce(int),
}
)
async def connect_client(hass, user_input):
"""Connect the HLK-SW16 client."""
client_aw = create_hlk_sw16_connection(
host=user_input[CONF_HOST],
port=user_input[CONF_PORT],
loop=hass.loop,
timeout=CONNECTION_TIMEOUT,
reconnect_interval=DEFAULT_RECONNECT_INTERVAL,
keep_alive_interval=DEFAULT_KEEP_ALIVE_INTERVAL,
)
return await asyncio.wait_for(client_aw, timeout=CONNECTION_TIMEOUT)
async def validate_input(hass: HomeAssistant, user_input):
"""Validate the user input allows us to connect."""
for entry in hass.config_entries.async_entries(DOMAIN):
if (
entry.data[CONF_HOST] == user_input[CONF_HOST]
and entry.data[CONF_PORT] == user_input[CONF_PORT]
):
raise AlreadyConfigured
try:
client = await connect_client(hass, user_input)
except asyncio.TimeoutError as err:
raise CannotConnect from err
try:
def disconnect_callback():
if client.in_transaction:
client.active_transaction.set_exception(CannotConnect)
client.disconnect_callback = disconnect_callback
await client.status()
except CannotConnect:
client.disconnect_callback = None
client.stop()
raise
else:
client.disconnect_callback = None
client.stop()
class SW16FlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a HLK-SW16 config flow."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_PUSH
async def async_step_import(self, user_input):
"""Handle import."""
return await self.async_step_user(user_input)
async def async_step_user(self, user_input=None):
"""Handle the initial step."""
errors = {}
if user_input is not None:
try:
await validate_input(self.hass, user_input)
address = f"{user_input[CONF_HOST]}:{user_input[CONF_PORT]}"
return self.async_create_entry(title=address, data=user_input)
except AlreadyConfigured:
errors["base"] = "already_configured"
except CannotConnect:
errors["base"] = "cannot_connect"
return self.async_show_form(
step_id="user", data_schema=DATA_SCHEMA, errors=errors
)
| 30.670103
| 78
| 0.675294
|
bc29ed30255ded2e021cdce8189262296f72bcdb
| 1,238
|
py
|
Python
|
tests/core/test_io.py
|
mpaRD/snn_toolbox
|
faadbeb2db98ad04ab37707243d67bae28e8110c
|
[
"MIT"
] | 1
|
2021-05-13T15:39:19.000Z
|
2021-05-13T15:39:19.000Z
|
tests/core/test_io.py
|
mpaRD/snn_toolbox
|
faadbeb2db98ad04ab37707243d67bae28e8110c
|
[
"MIT"
] | null | null | null |
tests/core/test_io.py
|
mpaRD/snn_toolbox
|
faadbeb2db98ad04ab37707243d67bae28e8110c
|
[
"MIT"
] | 1
|
2020-01-14T21:22:18.000Z
|
2020-01-14T21:22:18.000Z
|
# coding=utf-8
import os
import numpy as np
from snntoolbox.datasets.utils import get_dataset
class TestGetDataset:
"""Test obtaining the dataset from disk in correct format."""
def test_get_dataset_from_npz(self, _config):
normset, testset = get_dataset(_config)
assert all([normset, testset])
def test_get_dataset_from_png(self, _config):
try:
import matplotlib.pyplot as plt
except ImportError:
return
datapath = _config.get('paths', 'dataset_path')
classpath = os.path.join(datapath, 'class_0')
os.mkdir(classpath)
data = np.random.random_sample((10, 10, 3))
plt.imsave(os.path.join(classpath, 'image_0.png'), data)
_config.read_dict({
'input': {'dataset_format': 'png',
'dataflow_kwargs': "{'target_size': (11, 12)}",
'datagen_kwargs': "{'rescale': 0.003922,"
" 'featurewise_center': True,"
" 'featurewise_std_normalization':"
" True}"}})
normset, testset = get_dataset(_config)
assert all([normset, testset])
| 32.578947
| 76
| 0.558158
|
309920c428ebe8d4ef33adc78b4e3ca418b2fba0
| 2,313
|
py
|
Python
|
src/drivers/escaper.py
|
iurii-iurii/clickhouse-sqlalchemy
|
bb3d1f9d1573566e7d0b6a14a6cf09369948cc15
|
[
"MIT"
] | null | null | null |
src/drivers/escaper.py
|
iurii-iurii/clickhouse-sqlalchemy
|
bb3d1f9d1573566e7d0b6a14a6cf09369948cc15
|
[
"MIT"
] | null | null | null |
src/drivers/escaper.py
|
iurii-iurii/clickhouse-sqlalchemy
|
bb3d1f9d1573566e7d0b6a14a6cf09369948cc15
|
[
"MIT"
] | null | null | null |
from datetime import date, datetime
from decimal import Decimal
import six
class Escaper(object):
escape_chars = {
"\b": "\\b",
"\f": "\\f",
"\r": "\\r",
"\n": "\\n",
"\t": "\\t",
"\0": "\\0",
"\\": "\\\\",
"'": "\\'"
}
def __init__(self, tz=None, escapers=None):
"""
:param tz: clickhouse server timezone
:param escapers: functions dict to replace standart
functions preparing values for ClickHouse
"""
self.tz = tz
self.escapers = [
(bool, self.escape_bool),
(six.integer_types + (float, ), self.escape_number),
(datetime, self.escape_datetime),
(date, self.escape_date),
(Decimal, self.escape_decimal),
(six.string_types, self.escape_string),
]
if escapers:
self.escapers.update(escapers)
def escape_string(self, value):
value = ''.join(self.escape_chars.get(c, c) for c in value)
return "'" + value + "'"
def escape(self, parameters):
if isinstance(parameters, dict):
return {k: self.escape_item(v) for k, v in parameters.items()}
elif isinstance(parameters, (list, tuple)):
return [self.escape_item(x) for x in parameters]
else:
raise Exception("Unsupported param format: {}".format(parameters))
def escape_number(self, item):
return item
def escape_date(self, item):
return self.escape_string(item.strftime('%Y-%m-%d'))
def escape_datetime(self, item):
if item.utcoffset() is not None and self.tz:
item = item.astimezone(self.tz)
return self.escape_string(item.strftime('%Y-%m-%d %H:%M:%S'))
def escape_decimal(self, item):
return float(item)
def escape_bool(self, item):
return '1' if item else '0'
def escape_item(self, item):
if item is None:
return 'NULL'
for _type, func in self.escapers:
if isinstance(item, _type):
return func(item)
if isinstance(item, (list, tuple)):
return [self.escape_item(x) for x in item]
else:
raise Exception("Unsupported object {}".format(item))
| 29.278481
| 78
| 0.548206
|
b29d4b907ad2a9d847a3487d177e58c264f8ac83
| 1,813
|
py
|
Python
|
riotctrl/tests/utils/application/ctrl.py
|
benpicco/riotctrl
|
a3d35354b2977d3f26fb59e65c03c73724c9abe9
|
[
"MIT"
] | 7
|
2020-07-03T15:38:14.000Z
|
2022-03-30T10:49:33.000Z
|
riotctrl/tests/utils/application/ctrl.py
|
benpicco/riotctrl
|
a3d35354b2977d3f26fb59e65c03c73724c9abe9
|
[
"MIT"
] | 26
|
2020-07-07T08:13:46.000Z
|
2021-12-08T08:48:12.000Z
|
riotctrl/tests/utils/application/ctrl.py
|
benpicco/riotctrl
|
a3d35354b2977d3f26fb59e65c03c73724c9abe9
|
[
"MIT"
] | 7
|
2020-07-07T07:12:25.000Z
|
2021-12-07T20:47:19.000Z
|
#! /usr/bin/env python3
"""Wrap an application to behave like a board firmware.
+ Start a command given as argument
+ Handle 'reset' the firmware when receiving `SIGUSR1`
Ideas for extensions:
* resetting or not on reset
* See how to implement loosing some of the output on first startup
"""
import sys
import signal
import threading
import argparse
import subprocess
PARSER = argparse.ArgumentParser()
PARSER.add_argument("argument", nargs="+", default=[])
# Signals sent by 'pexpect' + SIGTERM
FORWARDED_SIGNALS = (signal.SIGHUP, signal.SIGCONT, signal.SIGINT, signal.SIGTERM)
def forward_signal(signum, proc):
"""Forward signal to child."""
if not proc.poll():
proc.send_signal(signum)
def _run_cmd(args, termonsig=signal.SIGUSR1, **popenkwargs):
"""Run a subprocess of `args`.
It will be terminated on `termonsig` signal.
:param args: command arguments
:param termonsig: terminate the process on `termonsig` signal
:param **popenkwargs: Popen kwargs
:return: True if process should be restarted
"""
restart_process = threading.Event()
proc = subprocess.Popen(args, **popenkwargs)
# Forward cleanup processes to child
for sig in FORWARDED_SIGNALS:
signal.signal(sig, lambda signum, _: forward_signal(signum, proc))
# set 'termonsig' handler for reset
def _reset(*_):
"""Terminate process and set the 'restart_process' flag."""
restart_process.set()
proc.terminate()
signal.signal(termonsig, _reset)
proc.wait()
return restart_process.is_set()
def main():
"""Run an application in a loop.
On 'SIGUSR1' the application will be reset.
"""
args = PARSER.parse_args()
while _run_cmd(args.argument):
pass
if __name__ == "__main__":
sys.exit(main())
| 24.173333
| 82
| 0.692223
|
9c142ec58af59572bd2c6e3a2432fe8bdff6adb0
| 5,158
|
py
|
Python
|
python_modules/libraries/dagster-aws/dagster_aws_tests/emr_tests/test_pyspark.py
|
vatervonacht/dagster
|
595d78c883ef20618052ac1575fe46cde51fd541
|
[
"Apache-2.0"
] | 1
|
2022-02-07T18:07:36.000Z
|
2022-02-07T18:07:36.000Z
|
python_modules/libraries/dagster-aws/dagster_aws_tests/emr_tests/test_pyspark.py
|
vatervonacht/dagster
|
595d78c883ef20618052ac1575fe46cde51fd541
|
[
"Apache-2.0"
] | null | null | null |
python_modules/libraries/dagster-aws/dagster_aws_tests/emr_tests/test_pyspark.py
|
vatervonacht/dagster
|
595d78c883ef20618052ac1575fe46cde51fd541
|
[
"Apache-2.0"
] | null | null | null |
import os
import pytest
from dagster_aws.emr import EmrJobRunner, emr_pyspark_resource
from dagster_pyspark import pyspark_resource, pyspark_solid
from moto import mock_emr
from dagster import (
DagsterInvalidDefinitionError,
ModeDefinition,
RunConfig,
execute_pipeline,
pipeline,
)
from dagster.seven import mock
from dagster.utils.test import create_test_pipeline_execution_context
@pyspark_solid
def example_solid(context):
list_p = [('John', 19), ('Jennifer', 29), ('Adam', 35), ('Henry', 50)]
rdd = context.resources.pyspark.spark_context.parallelize(list_p)
res = rdd.take(2)
for name, age in res:
context.log.info('%s: %d' % (name, age))
@pyspark_solid(name='blah', description='this is a test', config={'foo': str, 'bar': int})
def other_example_solid(context):
list_p = [('John', 19), ('Jennifer', 29), ('Adam', 35), ('Henry', 50)]
rdd = context.resources.pyspark.spark_context.parallelize(list_p)
res = rdd.take(2)
for name, age in res:
context.log.info('%s: %d' % (name, age))
@pipeline(
mode_defs=[
ModeDefinition('prod', resource_defs={'pyspark': emr_pyspark_resource}),
ModeDefinition('local', resource_defs={'pyspark': pyspark_resource}),
]
)
def example_pipe():
example_solid()
other_example_solid()
def test_local():
result = execute_pipeline(
example_pipe,
environment_dict={'solids': {'blah': {'config': {'foo': 'a string', 'bar': 123}}},},
run_config=RunConfig(mode='local'),
)
assert result.success
@mock_emr
@mock.patch('dagster_aws.emr.emr.EmrJobRunner.wait_for_steps_to_complete')
def test_pyspark_emr(mock_wait):
run_job_flow_args = dict(
Instances={
'InstanceCount': 1,
'KeepJobFlowAliveWhenNoSteps': True,
'MasterInstanceType': 'c3.medium',
'Placement': {'AvailabilityZone': 'us-west-1a'},
'SlaveInstanceType': 'c3.xlarge',
},
JobFlowRole='EMR_EC2_DefaultRole',
LogUri='s3://mybucket/log',
Name='cluster',
ServiceRole='EMR_DefaultRole',
VisibleToAllUsers=True,
)
# Doing cluster setup outside of a solid here, because run_job_flow is not yet plumbed through
# to the pyspark EMR resource.
job_runner = EmrJobRunner(region='us-west-1')
context = create_test_pipeline_execution_context()
cluster_id = job_runner.run_job_flow(context, run_job_flow_args)
result = execute_pipeline(
example_pipe,
environment_dict={
'solids': {'blah': {'config': {'foo': 'a string', 'bar': 123}}},
'resources': {
'pyspark': {
'config': {
'pipeline_file': __file__,
'pipeline_fn_name': 'example_pipe',
'cluster_id': cluster_id,
'staging_bucket': 'dagster-scratch-80542c2',
'region_name': 'us-west-1',
}
}
},
},
run_config=RunConfig(mode='prod'),
)
assert result.success
assert mock_wait.called_once
def test_bad_requirements_txt():
with pytest.raises(DagsterInvalidDefinitionError) as exc_info:
execute_pipeline(
example_pipe,
environment_dict={
'solids': {'blah': {'config': {'foo': 'a string', 'bar': 123}}},
'resources': {
'pyspark': {
'config': {
'requirements_file_path': 'DOES_NOT_EXIST',
'pipeline_file': __file__,
'pipeline_fn_name': 'example_pipe',
'cluster_id': 'some_cluster_id',
'staging_bucket': 'dagster-scratch-80542c2',
'region_name': 'us-west-1',
}
}
},
},
run_config=RunConfig(mode='prod'),
)
assert 'The requirements.txt file that was specified does not exist' in str(exc_info.value)
# We have to manually stop the pyspark context here because we interrupted before resources
# were cleaned up, and so stop() was never called on the spark session.
from pyspark.sql import SparkSession
SparkSession.builder.getOrCreate().stop()
@pytest.mark.skip
def test_do_it_live_emr():
result = execute_pipeline(
example_pipe,
environment_dict={
'solids': {'blah': {'config': {'foo': 'a string', 'bar': 123}}},
'resources': {
'pyspark': {
'config': {
'pipeline_file': __file__,
'pipeline_fn_name': 'example_pipe',
'cluster_id': os.environ.get('AWS_EMR_JOB_FLOW_ID'),
'staging_bucket': 'dagster-scratch-80542c2',
'region_name': 'us-west-1',
}
}
},
},
run_config=RunConfig(mode='prod'),
)
assert result.success
| 33.493506
| 98
| 0.567856
|
1eca6c0466c45c0dec10b43a60e06687592e02f8
| 5,143
|
py
|
Python
|
redash/query_runner/mssql.py
|
wings-xue/redash_chi
|
243df22b5d55f12702cff21b69fa48e478d9ca11
|
[
"BSD-2-Clause"
] | 65
|
2020-07-17T09:34:42.000Z
|
2022-03-25T09:33:32.000Z
|
redash/query_runner/mssql.py
|
wings-xue/redash_chi
|
243df22b5d55f12702cff21b69fa48e478d9ca11
|
[
"BSD-2-Clause"
] | null | null | null |
redash/query_runner/mssql.py
|
wings-xue/redash_chi
|
243df22b5d55f12702cff21b69fa48e478d9ca11
|
[
"BSD-2-Clause"
] | 29
|
2020-08-13T16:02:26.000Z
|
2022-02-17T01:31:05.000Z
|
import logging
import sys
import uuid
from redash.query_runner import *
from redash.utils import json_dumps, json_loads
logger = logging.getLogger(__name__)
try:
import pymssql
enabled = True
except ImportError:
enabled = False
# from _mssql.pyx ## DB-API type definitions & http://www.freetds.org/tds.html#types ##
types_map = {
1: TYPE_STRING,
2: TYPE_STRING,
# Type #3 supposed to be an integer, but in some cases decimals are returned
# with this type. To be on safe side, marking it as float.
3: TYPE_FLOAT,
4: TYPE_DATETIME,
5: TYPE_FLOAT,
}
class SqlServer(BaseSQLQueryRunner):
should_annotate_query = False
noop_query = "SELECT 1"
@classmethod
def configuration_schema(cls):
return {
"type": "object",
"properties": {
"user": {"type": "string", "title": "用户"},
"password": {"type": "string", "title": "密码"},
"server": {"type": "string", "title": "服务器", "default": "127.0.0.1"},
"port": {"type": "number", "title": "端口", "default": 1433},
"tds_version": {
"type": "string",
"default": "7.0",
"title": "TDS版本",
},
"charset": {
"type": "string",
"default": "UTF-8",
"title": "字符集",
},
"db": {"type": "string", "title": "数据库"},
},
"required": ["db"],
"secret": ["password"],
}
@classmethod
def enabled(cls):
return enabled
@classmethod
def name(cls):
return "Microsoft SQL Server"
@classmethod
def type(cls):
return "mssql"
def _get_tables(self, schema):
query = """
SELECT table_schema, table_name, column_name
FROM INFORMATION_SCHEMA.COLUMNS
WHERE table_schema NOT IN ('guest','INFORMATION_SCHEMA','sys','db_owner','db_accessadmin'
,'db_securityadmin','db_ddladmin','db_backupoperator','db_datareader'
,'db_datawriter','db_denydatareader','db_denydatawriter'
);
"""
results, error = self.run_query(query, None)
if error is not None:
raise Exception("Failed getting schema.")
results = json_loads(results)
for row in results["rows"]:
if row["table_schema"] != self.configuration["db"]:
table_name = "{}.{}".format(row["table_schema"], row["table_name"])
else:
table_name = row["table_name"]
if table_name not in schema:
schema[table_name] = {"name": table_name, "columns": []}
schema[table_name]["columns"].append(row["column_name"])
return list(schema.values())
def run_query(self, query, user):
connection = None
try:
server = self.configuration.get("server", "")
user = self.configuration.get("user", "")
password = self.configuration.get("password", "")
db = self.configuration["db"]
port = self.configuration.get("port", 1433)
tds_version = self.configuration.get("tds_version", "7.0")
charset = self.configuration.get("charset", "UTF-8")
if port != 1433:
server = server + ":" + str(port)
connection = pymssql.connect(
server=server,
user=user,
password=password,
database=db,
tds_version=tds_version,
charset=charset,
)
if isinstance(query, str):
query = query.encode(charset)
cursor = connection.cursor()
logger.debug("SqlServer running query: %s", query)
cursor.execute(query)
data = cursor.fetchall()
if cursor.description is not None:
columns = self.fetch_columns(
[(i[0], types_map.get(i[1], None)) for i in cursor.description]
)
rows = [
dict(zip((column["name"] for column in columns), row))
for row in data
]
data = {"columns": columns, "rows": rows}
json_data = json_dumps(data)
error = None
else:
error = "No data was returned."
json_data = None
cursor.close()
except pymssql.Error as e:
try:
# Query errors are at `args[1]`
error = e.args[1]
except IndexError:
# Connection errors are `args[0][1]`
error = e.args[0][1]
json_data = None
except (KeyboardInterrupt, JobTimeoutException):
connection.cancel()
raise
finally:
if connection:
connection.close()
return json_data, error
register(SqlServer)
| 30.431953
| 103
| 0.504569
|
dd315ca2abe75745a2566469ec874121e5f02769
| 726
|
py
|
Python
|
tests/unit/html/test_player_season_box_scores_table.py
|
tomkennedy22/basketball_reference_web_scraper
|
d4c3841769fa77400b479d6b5786d4ca1076326c
|
[
"MIT"
] | 1
|
2021-01-09T04:34:47.000Z
|
2021-01-09T04:34:47.000Z
|
tests/unit/html/test_player_season_box_scores_table.py
|
tomkennedy22/basketball_reference_web_scraper
|
d4c3841769fa77400b479d6b5786d4ca1076326c
|
[
"MIT"
] | null | null | null |
tests/unit/html/test_player_season_box_scores_table.py
|
tomkennedy22/basketball_reference_web_scraper
|
d4c3841769fa77400b479d6b5786d4ca1076326c
|
[
"MIT"
] | 1
|
2020-06-14T08:51:55.000Z
|
2020-06-14T08:51:55.000Z
|
from unittest import TestCase
from unittest.mock import MagicMock
from basketball_reference_web_scraper.html import PlayerSeasonBoxScoresTable
class TestPlayerSeasonBoxScoresTable(TestCase):
def setUp(self):
self.html = MagicMock()
def test_rows_query_raises_not_implemented_error(self):
table = PlayerSeasonBoxScoresTable(html=self.html)
self.assertRaises(
NotImplementedError,
lambda: table.rows_query,
)
def test_rows_raises_not_implemented_error_when_rows_query_is_not_overridden(self):
table = PlayerSeasonBoxScoresTable(html=self.html)
self.assertRaises(
NotImplementedError,
lambda: table.rows,
)
| 29.04
| 87
| 0.720386
|
0bb58b6c72274efe33b0c67cfc6a3a4ba90b83f5
| 521
|
py
|
Python
|
docs/astropy_sphinx/setup.py
|
jacobic/redpipes
|
8da07f79e5f93441fdf12e3346722cc56d021525
|
[
"MIT"
] | 1
|
2018-01-06T00:35:00.000Z
|
2018-01-06T00:35:00.000Z
|
docs/astropy_sphinx/setup.py
|
jacobic/redpipes
|
8da07f79e5f93441fdf12e3346722cc56d021525
|
[
"MIT"
] | null | null | null |
docs/astropy_sphinx/setup.py
|
jacobic/redpipes
|
8da07f79e5f93441fdf12e3346722cc56d021525
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
from astropy_sphinx_theme import __version__
setup(
name="astropy-sphinx-theme",
version=__version__,
use_2to3=False,
description="The sphinx theme for Astropy.",
long_description="The documentation theme for the Astropy project and affiliated packages.",
author="The Astropy Developers",
author_email="astropy@scipy.org",
install_requires=[
"setuptools",
],
packages=['astropy_sphinx_theme'],
include_package_data=True,
)
| 27.421053
| 96
| 0.731286
|
723dfe297914b5a67d0e0b46f43ea0b5cebabd2c
| 5,624
|
py
|
Python
|
programvaruprojekt/Trainingmaterial_app/file_handler.py
|
riksantikvarieambetet/Validation-of-object-type-with-machinelearning
|
8ad6fc37d2cfc276bdd598ad7d3f2888d7664303
|
[
"Apache-2.0"
] | null | null | null |
programvaruprojekt/Trainingmaterial_app/file_handler.py
|
riksantikvarieambetet/Validation-of-object-type-with-machinelearning
|
8ad6fc37d2cfc276bdd598ad7d3f2888d7664303
|
[
"Apache-2.0"
] | null | null | null |
programvaruprojekt/Trainingmaterial_app/file_handler.py
|
riksantikvarieambetet/Validation-of-object-type-with-machinelearning
|
8ad6fc37d2cfc276bdd598ad7d3f2888d7664303
|
[
"Apache-2.0"
] | 2
|
2019-06-05T07:53:35.000Z
|
2019-06-13T14:43:39.000Z
|
import os
import re
import csv
import time
import requests
import datetime
import urllib.request
from PIL import Image
from tqdm import tqdm
from pathlib import Path
from django.template.defaultfilters import slugify
"""[summary]
This handels everything with files
Author: Petter Gullin & Daniel Persson 2019-05-21
"""
#Path we want files saved in
SAVE_PATH = Path.cwd().parent / 'trainingdata'
PATH_IMG = SAVE_PATH / "trainingdata_images"
PATH_CSV = SAVE_PATH / "trainingdata_csv"
def create_folders():
"""[summary]
Creates folders for csv and images to save files in
"""
time = datetime.datetime.now().strftime(r"%Y%m%d_%H%M%S")
# Name on our main csv folder
path_csv = PATH_CSV / time
try:
os.makedirs(path_csv)
except:
pass
# Name on our main image folder
path_image = PATH_IMG / time
try:
os.makedirs(path_image)
except:
pass
return path_csv,path_image
def remove_corrupt_files(image_folder):
"""[summary]
https://opensource.com/article/17/2/python-tricks-artists
Args:
path_image ([type]): [description]
"""
path_image = PATH_IMG / image_folder
for filename in os.listdir(path_image):
try:
img = Image.open(path_image / filename)
img.verify()
except (IOError, SyntaxError):
os.remove(path_image+filename)
print('Removed corrupt file:',filename)
def save_all_files(items_dict,path_file):
"""[summary]
Loop throught the whole dictonary and saves every file from an url (thumbnail in this case)
Args:
items_dict ([DICT]): [All our items from K-samsok]
path_file ([STR]): [The path were to save the file]
"""
#tqdm here to show progressbar
for record in tqdm(items_dict):
itemId_link = itemId_get_num(record.get("itemId"))
save_file(record.get("thumbnail"),itemId_link,path_file)
def itemId_get_num(itemId_link):
"""[summary]
Gets the itemId numbers from the full link that K-samsok gets
Args:
itemId_link ([String]): [The full itemId link from k-samsok]
"""
itemId_num = itemId_link.split("/")
return itemId_num.pop()
def list_data_csvfile(csv_folder_name):
"""[summary]
Downloads images from a csvfile
Args:
csv_folder_name ([STR]): [The name of the folder where the csv file is located]
"""
thumbnail_list = []
#The path to the csv file
csv_path_folder = PATH_CSV / csv_folder_name
csv_path = csv_path_folder / "image_data.csv"
with open (csv_path,mode="r") as csv_file:
#Skips the header for our list
csv_reader = csv.reader(csv_file)
next(csv_reader)
for row in csv_reader:
thumbnail_dict = {
"itemId": row[0],
"serviceOrganization":row[1],
"thumbnail": row[2],
"itemType": row[3],
"kringlaLink": row[4]
}
thumbnail_list.append(thumbnail_dict)
csv_file.close()
return thumbnail_list
def downloaded_from_csv(csv_folder_name):
"""[summary]
Download files from a csv file
Args:
csv_folder_name ([STR]): [Name of folder where csv is located]
"""
#Name on our downloaded from csv folder
folder_name = csv_folder_name+"-downloaded_from_csv"
#Where we save the downloaded files
path_file = PATH_IMG / folder_name
#Create folder
os.makedirs(path_file)
save_all_files(list_data_csvfile(csv_folder_name),path_file)
def remove_data_csv(csv_folder_name):
"""[summary]
Removes imagedata in csv file if it does not exists in folder
Args:
csv_folder_name ([STR]): [Name of the folder were the csv file is located]
"""
csv_data = list_data_csvfile(csv_folder_name)
#The path to the csv file
csv_path = PATH_CSV / csv_folder_name
image_path = PATH_IMG / csv_folder_name
image_folder = os.listdir(image_path)
csv_data_final = []
for data in csv_data:
itemId = slugify(itemId_get_num(data.get('itemId')))+".jpeg"
if itemId in image_folder:
csv_data_final.append(data)
else:
print("removed",data["itemId"])
csvfile_from_dict(csv_data_final,csv_path)
def save_file(url,itemId,path):
"""[summary]
Download a file to a specifed folder
Args:
url ([STR]): [Link to the file thats downloaded]
itemId ([STR]): [Our itemId of the saved file]
path ([STR]): [The path were want the file saved]
"""
filename = slugify(itemId)+".jpeg"
file_path = path / filename
try:
urllib.request.urlretrieve(url, file_path)
except:
print("Error: Host time out, download what I can\n")
def csvfile_from_dict(dict_data,path):
"""[summary]
Create a csv file from the given dictonary
Args:
dict_data ([DICT]): [Our data from k-samsok]
path ([STR]): [The path we want to save the csv file]
"""
csv_columns = ["itemId","serviceOrganization","thumbnail","itemType","kringlaLink"]
filename = path / "image_data.csv"
if("image_data.csv" in os.listdir(path)):
os.remove(filename)
with open(filename, 'w',newline='',encoding='utf-8') as csvFile:
writer = csv.DictWriter(csvFile,fieldnames=csv_columns)
writer.writeheader()
writer.writerows(dict_data)
csvFile.close()
| 33.082353
| 96
| 0.628912
|
47f90f2caf6513d0b7fd9a651df659e5cf42bb89
| 29,737
|
py
|
Python
|
python/ccxt/async/liqui.py
|
cbp123/ccxt
|
d2e7255a04bb27baaa7b3700d142ecc9a46281ad
|
[
"MIT"
] | null | null | null |
python/ccxt/async/liqui.py
|
cbp123/ccxt
|
d2e7255a04bb27baaa7b3700d142ecc9a46281ad
|
[
"MIT"
] | null | null | null |
python/ccxt/async/liqui.py
|
cbp123/ccxt
|
d2e7255a04bb27baaa7b3700d142ecc9a46281ad
|
[
"MIT"
] | 1
|
2021-07-20T10:37:02.000Z
|
2021-07-20T10:37:02.000Z
|
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async.base.exchange import Exchange
# -----------------------------------------------------------------------------
try:
basestring # Python 3
except NameError:
basestring = str # Python 2
import hashlib
import json
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import DDoSProtection
class liqui (Exchange):
def describe(self):
return self.deep_extend(super(liqui, self).describe(), {
'id': 'liqui',
'name': 'Liqui',
'countries': 'UA',
'rateLimit': 3000,
'version': '3',
'userAgent': self.userAgents['chrome'],
'has': {
'CORS': False,
'createMarketOrder': False,
'fetchOrderBooks': True,
'fetchOrder': True,
'fetchOrders': 'emulated',
'fetchOpenOrders': True,
'fetchClosedOrders': 'emulated',
'fetchTickers': True,
'fetchMyTrades': True,
'withdraw': True,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/27982022-75aea828-63a0-11e7-9511-ca584a8edd74.jpg',
'api': {
'public': 'https://api.liqui.io/api',
'private': 'https://api.liqui.io/tapi',
'web': 'https://liqui.io',
'cacheapi': 'https://cacheapi.liqui.io/Market',
'webapi': 'https://webapi.liqui.io/Market',
'charts': 'https://charts.liqui.io/chart',
},
'www': 'https://liqui.io',
'doc': 'https://liqui.io/api',
'fees': 'https://liqui.io/fee',
},
'api': {
'public': {
'get': [
'info',
'ticker/{pair}',
'depth/{pair}',
'trades/{pair}',
],
},
'private': {
'post': [
'getInfo',
'Trade',
'ActiveOrders',
'OrderInfo',
'CancelOrder',
'TradeHistory',
'CoinDepositAddress',
'WithdrawCoin',
'CreateCoupon',
'RedeemCoupon',
],
},
'web': {
'get': [
'User/Balances',
],
'post': [
'User/Login/',
'User/Session/Activate/',
],
},
'cacheapi': {
'get': [
'Pairs',
'Currencies',
'depth', # ?id=228
'Tickers',
],
},
'webapi': {
'get': [
'Last', # ?id=228
'Info',
],
},
'charts': {
'get': [
'config',
'history', # ?symbol=228&resolution=15&from=1524002997&to=1524011997'
'symbols', # ?symbol=228
'time',
],
},
},
'fees': {
'trading': {
'maker': 0.001,
'taker': 0.0025,
},
'funding': {
'tierBased': False,
'percentage': False,
'withdraw': {},
'deposit': {},
},
},
'commonCurrencies': {
'DSH': 'DASH',
},
'exceptions': {
'803': InvalidOrder, # "Count could not be less than 0.001."(selling below minAmount)
'804': InvalidOrder, # "Count could not be more than 10000."(buying above maxAmount)
'805': InvalidOrder, # "price could not be less than X."(minPrice violation on buy & sell)
'806': InvalidOrder, # "price could not be more than X."(maxPrice violation on buy & sell)
'807': InvalidOrder, # "cost could not be less than X."(minCost violation on buy & sell)
'831': InsufficientFunds, # "Not enougth X to create buy order."(buying with balance.quote < order.cost)
'832': InsufficientFunds, # "Not enougth X to create sell order."(selling with balance.base < order.amount)
'833': OrderNotFound, # "Order with id X was not found."(cancelling non-existent, closed and cancelled order)
},
})
def calculate_fee(self, symbol, type, side, amount, price, takerOrMaker='taker', params={}):
market = self.markets[symbol]
key = 'quote'
rate = market[takerOrMaker]
cost = float(self.cost_to_precision(symbol, amount * rate))
if side == 'sell':
cost *= price
else:
key = 'base'
return {
'type': takerOrMaker,
'currency': market[key],
'rate': rate,
'cost': cost,
}
def get_base_quote_from_market_id(self, id):
uppercase = id.upper()
base, quote = uppercase.split('_')
base = self.common_currency_code(base)
quote = self.common_currency_code(quote)
return [base, quote]
async def fetch_markets(self):
response = await self.publicGetInfo()
markets = response['pairs']
keys = list(markets.keys())
result = []
for p in range(0, len(keys)):
id = keys[p]
market = markets[id]
base, quote = self.get_base_quote_from_market_id(id)
symbol = base + '/' + quote
precision = {
'amount': self.safe_integer(market, 'decimal_places'),
'price': self.safe_integer(market, 'decimal_places'),
}
amountLimits = {
'min': self.safe_float(market, 'min_amount'),
'max': self.safe_float(market, 'max_amount'),
}
priceLimits = {
'min': self.safe_float(market, 'min_price'),
'max': self.safe_float(market, 'max_price'),
}
costLimits = {
'min': self.safe_float(market, 'min_total'),
}
limits = {
'amount': amountLimits,
'price': priceLimits,
'cost': costLimits,
}
hidden = self.safe_integer(market, 'hidden')
active = (hidden == 0)
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'active': active,
'taker': market['fee'] / 100,
'lot': amountLimits['min'],
'precision': precision,
'limits': limits,
'info': market,
})
return result
async def fetch_balance(self, params={}):
await self.load_markets()
response = await self.privatePostGetInfo()
balances = response['return']
result = {'info': balances}
funds = balances['funds']
currencies = list(funds.keys())
for c in range(0, len(currencies)):
currency = currencies[c]
uppercase = currency.upper()
uppercase = self.common_currency_code(uppercase)
total = None
used = None
if balances['open_orders'] == 0:
total = funds[currency]
used = 0.0
account = {
'free': funds[currency],
'used': used,
'total': total,
}
result[uppercase] = account
return self.parse_balance(result)
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'pair': market['id'],
}
if limit is not None:
request['limit'] = limit # default = 150, max = 2000
response = await self.publicGetDepthPair(self.extend(request, params))
market_id_in_reponse = (market['id'] in list(response.keys()))
if not market_id_in_reponse:
raise ExchangeError(self.id + ' ' + market['symbol'] + ' order book is empty or not available')
orderbook = response[market['id']]
return self.parse_order_book(orderbook)
async def fetch_order_books(self, symbols=None, params={}):
await self.load_markets()
ids = None
if not symbols:
ids = '-'.join(self.ids)
# max URL length is 2083 symbols, including http schema, hostname, tld, etc...
if len(ids) > 2048:
numIds = len(self.ids)
raise ExchangeError(self.id + ' has ' + str(numIds) + ' symbols exceeding max URL length, you are required to specify a list of symbols in the first argument to fetchOrderBooks')
else:
ids = self.market_ids(symbols)
ids = '-'.join(ids)
response = await self.publicGetDepthPair(self.extend({
'pair': ids,
}, params))
result = {}
ids = list(response.keys())
for i in range(0, len(ids)):
id = ids[i]
symbol = id
if id in self.markets_by_id:
market = self.markets_by_id[id]
symbol = market['symbol']
result[symbol] = self.parse_order_book(response[id])
return result
def parse_ticker(self, ticker, market=None):
timestamp = ticker['updated'] * 1000
symbol = None
if market:
symbol = market['symbol']
last = self.safe_float(ticker, 'last')
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(ticker, 'high'),
'low': self.safe_float(ticker, 'low'),
'bid': self.safe_float(ticker, 'buy'),
'bidVolume': None,
'ask': self.safe_float(ticker, 'sell'),
'askVolume': None,
'vwap': None,
'open': None,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': self.safe_float(ticker, 'avg'),
'baseVolume': self.safe_float(ticker, 'vol_cur'),
'quoteVolume': self.safe_float(ticker, 'vol'),
'info': ticker,
}
async def fetch_tickers(self, symbols=None, params={}):
await self.load_markets()
ids = None
if not symbols:
ids = '-'.join(self.ids)
# max URL length is 2083 symbols, including http schema, hostname, tld, etc...
if len(ids) > 2048:
numIds = len(self.ids)
raise ExchangeError(self.id + ' has ' + str(numIds) + ' symbols exceeding max URL length, you are required to specify a list of symbols in the first argument to fetchTickers')
else:
ids = self.market_ids(symbols)
ids = '-'.join(ids)
tickers = await self.publicGetTickerPair(self.extend({
'pair': ids,
}, params))
result = {}
keys = list(tickers.keys())
for k in range(0, len(keys)):
id = keys[k]
ticker = tickers[id]
symbol = id
market = None
if id in self.markets_by_id:
market = self.markets_by_id[id]
symbol = market['symbol']
result[symbol] = self.parse_ticker(ticker, market)
return result
async def fetch_ticker(self, symbol, params={}):
tickers = await self.fetch_tickers([symbol], params)
return tickers[symbol]
def parse_trade(self, trade, market=None):
timestamp = int(trade['timestamp']) * 1000
side = trade['type']
if side == 'ask':
side = 'sell'
if side == 'bid':
side = 'buy'
price = self.safe_float(trade, 'price')
if 'rate' in trade:
price = self.safe_float(trade, 'rate')
id = self.safe_string(trade, 'tid')
if 'trade_id' in trade:
id = self.safe_string(trade, 'trade_id')
order = self.safe_string(trade, self.get_order_id_key())
if 'pair' in trade:
marketId = trade['pair']
market = self.markets_by_id[marketId]
symbol = None
if market:
symbol = market['symbol']
amount = trade['amount']
type = 'limit' # all trades are still limit trades
isYourOrder = self.safe_value(trade, 'is_your_order')
takerOrMaker = 'taker'
if isYourOrder is not None:
if isYourOrder:
takerOrMaker = 'maker'
fee = self.calculate_fee(symbol, type, side, amount, price, takerOrMaker)
return {
'id': id,
'order': order,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'type': type,
'side': side,
'price': price,
'amount': amount,
'fee': fee,
'info': trade,
}
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'pair': market['id'],
}
if limit is not None:
request['limit'] = limit
response = await self.publicGetTradesPair(self.extend(request, params))
return self.parse_trades(response[market['id']], market, since, limit)
async def create_order(self, symbol, type, side, amount, price=None, params={}):
if type == 'market':
raise ExchangeError(self.id + ' allows limit orders only')
await self.load_markets()
market = self.market(symbol)
request = {
'pair': market['id'],
'type': side,
'amount': self.amount_to_precision(symbol, amount),
'rate': self.price_to_precision(symbol, price),
}
response = await self.privatePostTrade(self.extend(request, params))
id = self.safe_string(response['return'], self.get_order_id_key())
timestamp = self.milliseconds()
price = float(price)
amount = float(amount)
status = 'open'
if id == '0':
id = self.safe_string(response['return'], 'init_order_id')
status = 'closed'
filled = self.safe_float(response['return'], 'received', 0.0)
remaining = self.safe_float(response['return'], 'remains', amount)
order = {
'id': id,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'status': status,
'symbol': symbol,
'type': type,
'side': side,
'price': price,
'cost': price * filled,
'amount': amount,
'remaining': remaining,
'filled': filled,
'fee': None,
# 'trades': self.parse_trades(order['trades'], market),
}
self.orders[id] = order
return self.extend({'info': response}, order)
def get_order_id_key(self):
return 'order_id'
async def cancel_order(self, id, symbol=None, params={}):
await self.load_markets()
response = None
request = {}
idKey = self.get_order_id_key()
request[idKey] = id
response = await self.privatePostCancelOrder(self.extend(request, params))
if id in self.orders:
self.orders[id]['status'] = 'canceled'
return response
def parse_order_status(self, status):
statuses = {
'0': 'open',
'1': 'closed',
'2': 'canceled',
'3': 'canceled', # or partially-filled and still open? https://github.com/ccxt/ccxt/issues/1594
}
if status in statuses:
return statuses[status]
return status
def parse_order(self, order, market=None):
id = str(order['id'])
status = self.safe_string(order, 'status')
if status != 'None':
status = self.parse_order_status(status)
timestamp = int(order['timestamp_created']) * 1000
symbol = None
if not market:
market = self.markets_by_id[order['pair']]
if market:
symbol = market['symbol']
remaining = None
amount = None
price = self.safe_float(order, 'rate')
filled = None
cost = None
if 'start_amount' in order:
amount = self.safe_float(order, 'start_amount')
remaining = self.safe_float(order, 'amount')
else:
remaining = self.safe_float(order, 'amount')
if id in self.orders:
amount = self.orders[id]['amount']
if amount is not None:
if remaining is not None:
filled = amount - remaining
cost = price * filled
fee = None
result = {
'info': order,
'id': id,
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'type': 'limit',
'side': order['type'],
'price': price,
'cost': cost,
'amount': amount,
'remaining': remaining,
'filled': filled,
'status': status,
'fee': fee,
}
return result
def parse_orders(self, orders, market=None, since=None, limit=None):
ids = list(orders.keys())
result = []
for i in range(0, len(ids)):
id = ids[i]
order = orders[id]
extended = self.extend(order, {'id': id})
result.append(self.parse_order(extended, market))
return self.filter_by_since_limit(result, since, limit)
async def fetch_order(self, id, symbol=None, params={}):
await self.load_markets()
response = await self.privatePostOrderInfo(self.extend({
'order_id': int(id),
}, params))
id = str(id)
newOrder = self.parse_order(self.extend({'id': id}, response['return'][id]))
oldOrder = self.orders[id] if (id in list(self.orders.keys())) else {}
self.orders[id] = self.extend(oldOrder, newOrder)
return self.orders[id]
def update_cached_orders(self, openOrders, symbol):
# update local cache with open orders
for j in range(0, len(openOrders)):
id = openOrders[j]['id']
self.orders[id] = openOrders[j]
openOrdersIndexedById = self.index_by(openOrders, 'id')
cachedOrderIds = list(self.orders.keys())
result = []
for k in range(0, len(cachedOrderIds)):
# match each cached order to an order in the open orders array
# possible reasons why a cached order may be missing in the open orders array:
# - order was closed or canceled -> update cache
# - symbol mismatch(e.g. cached BTC/USDT, fetched ETH/USDT) -> skip
id = cachedOrderIds[k]
order = self.orders[id]
result.append(order)
if not(id in list(openOrdersIndexedById.keys())):
# cached order is not in open orders array
# if we fetched orders by symbol and it doesn't match the cached order -> won't update the cached order
if symbol is not None and symbol != order['symbol']:
continue
# order is cached but not present in the list of open orders -> mark the cached order as closed
if order['status'] == 'open':
order = self.extend(order, {
'status': 'closed', # likewise it might have been canceled externally(unnoticed by "us")
'cost': None,
'filled': order['amount'],
'remaining': 0.0,
})
if order['cost'] is None:
if order['filled'] is not None:
order['cost'] = order['filled'] * order['price']
self.orders[id] = order
return result
async def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
if 'fetchOrdersRequiresSymbol' in self.options:
if self.options['fetchOrdersRequiresSymbol']:
if symbol is None:
raise ExchangeError(self.id + ' fetchOrders requires a symbol argument')
await self.load_markets()
request = {}
market = None
if symbol is not None:
market = self.market(symbol)
request['pair'] = market['id']
response = await self.privatePostActiveOrders(self.extend(request, params))
# liqui etc can only return 'open' orders(i.e. no way to fetch 'closed' orders)
openOrders = []
if 'return' in response:
openOrders = self.parse_orders(response['return'], market)
allOrders = self.update_cached_orders(openOrders, symbol)
result = self.filter_by_symbol(allOrders, symbol)
return self.filter_by_since_limit(result, since, limit)
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
orders = await self.fetch_orders(symbol, since, limit, params)
return self.filter_by(orders, 'status', 'open')
async def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
orders = await self.fetch_orders(symbol, since, limit, params)
return self.filter_by(orders, 'status', 'closed')
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
market = None
request = {
# 'from': 123456789, # trade ID, from which the display starts numerical 0(test result: liqui ignores self field)
# 'count': 1000, # the number of trades for display numerical, default = 1000
# 'from_id': trade ID, from which the display starts numerical 0
# 'end_id': trade ID on which the display ends numerical ∞
# 'order': 'ASC', # sorting, default = DESC(test result: liqui ignores self field, most recent trade always goes last)
# 'since': 1234567890, # UTC start time, default = 0(test result: liqui ignores self field)
# 'end': 1234567890, # UTC end time, default = ∞(test result: liqui ignores self field)
# 'pair': 'eth_btc', # default = all markets
}
if symbol is not None:
market = self.market(symbol)
request['pair'] = market['id']
if limit is not None:
request['count'] = int(limit)
if since is not None:
request['since'] = int(since / 1000)
response = await self.privatePostTradeHistory(self.extend(request, params))
trades = []
if 'return' in response:
trades = response['return']
return self.parse_trades(trades, market, since, limit)
async def withdraw(self, currency, amount, address, tag=None, params={}):
self.check_address(address)
await self.load_markets()
response = await self.privatePostWithdrawCoin(self.extend({
'coinName': currency,
'amount': float(amount),
'address': address,
}, params))
return {
'info': response,
'id': response['return']['tId'],
}
def sign_body_with_secret(self, body):
return self.hmac(self.encode(body), self.encode(self.secret), hashlib.sha512)
def get_version_string(self):
return '/' + self.version
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api'][api]
query = self.omit(params, self.extract_params(path))
if api == 'private':
self.check_required_credentials()
nonce = self.nonce()
body = self.urlencode(self.extend({
'nonce': nonce,
'method': path,
}, query))
signature = self.sign_body_with_secret(body)
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
'Key': self.apiKey,
'Sign': signature,
}
elif api == 'public':
url += self.get_version_string() + '/' + self.implode_params(path, params)
if query:
url += '?' + self.urlencode(query)
else:
url += '/' + self.implode_params(path, params)
if method == 'GET':
if query:
url += '?' + self.urlencode(query)
else:
if query:
body = self.json(query)
headers = {
'Content-Type': 'application/json',
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, httpCode, reason, url, method, headers, body):
if not isinstance(body, basestring):
return # fallback to default error handler
if len(body) < 2:
return # fallback to default error handler
if (body[0] == '{') or (body[0] == '['):
response = json.loads(body)
if 'success' in response:
#
# 1 - Liqui only returns the integer 'success' key from their private API
#
# {"success": 1, ...} httpCode == 200
# {"success": 0, ...} httpCode == 200
#
# 2 - However, exchanges derived from Liqui, can return non-integers
#
# It can be a numeric string
# {"sucesss": "1", ...}
# {"sucesss": "0", ...}, httpCode >= 200(can be 403, 502, etc)
#
# Or just a string
# {"success": "true", ...}
# {"success": "false", ...}, httpCode >= 200
#
# Or a boolean
# {"success": True, ...}
# {"success": False, ...}, httpCode >= 200
#
# 3 - Oversimplified, Python PEP8 forbids comparison operator(==) of different types
#
# 4 - We do not want to copy-paste and duplicate the code of self handler to other exchanges derived from Liqui
#
# To cover points 1, 2, 3 and 4 combined self handler should work like self:
#
success = self.safe_value(response, 'success', False)
if isinstance(success, basestring):
if (success == 'true') or (success == '1'):
success = True
else:
success = False
if not success:
code = self.safe_string(response, 'code')
message = self.safe_string(response, 'error')
feedback = self.id + ' ' + self.json(response)
exceptions = self.exceptions
if code in exceptions:
raise exceptions[code](feedback)
# need a second error map for these messages, apparently...
# in fact, we can use the same .exceptions with string-keys to save some loc here
if message == 'invalid api key':
raise AuthenticationError(feedback)
elif message == 'api key dont have trade permission':
raise AuthenticationError(feedback)
elif message.find('invalid parameter') >= 0: # errorCode 0, returned on buy(symbol, 0, 0)
raise InvalidOrder(feedback)
elif message == 'invalid order':
raise InvalidOrder(feedback)
elif message == 'Requests too often':
raise DDoSProtection(feedback)
elif message == 'not available':
raise DDoSProtection(feedback)
elif message == 'data unavailable':
raise DDoSProtection(feedback)
elif message == 'external service unavailable':
raise DDoSProtection(feedback)
else:
raise ExchangeError(self.id + ' unknown "error" value: ' + self.json(response))
| 40.679891
| 194
| 0.50422
|
25da2cb373241b5fae29940a1b1a127ce8921ff8
| 4,919
|
py
|
Python
|
sdk/python/pulumi_aws/ec2/vpc_endpoint_service.py
|
pulumi-bot/pulumi-aws
|
756c60135851e015232043c8206567101b8ebd85
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/ec2/vpc_endpoint_service.py
|
pulumi-bot/pulumi-aws
|
756c60135851e015232043c8206567101b8ebd85
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/ec2/vpc_endpoint_service.py
|
pulumi-bot/pulumi-aws
|
756c60135851e015232043c8206567101b8ebd85
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import pulumi
import pulumi.runtime
class VpcEndpointService(pulumi.CustomResource):
"""
Provides a VPC Endpoint Service resource.
Service consumers can create an _Interface_ [VPC Endpoint](vpc_endpoint.html) to connect to the service.
~> **NOTE on VPC Endpoint Services and VPC Endpoint Service Allowed Principals:** Terraform provides
both a standalone [VPC Endpoint Service Allowed Principal](vpc_endpoint_service_allowed_principal.html) resource
and a VPC Endpoint Service resource with an `allowed_principals` attribute. Do not use the same principal ARN in both
a VPC Endpoint Service resource and a VPC Endpoint Service Allowed Principal resource. Doing so will cause a conflict
and will overwrite the association.
"""
def __init__(__self__, __name__, __opts__=None, acceptance_required=None, allowed_principals=None, network_load_balancer_arns=None):
"""Create a VpcEndpointService resource with the given unique name, props, and options."""
if not __name__:
raise TypeError('Missing resource name argument (for URN creation)')
if not isinstance(__name__, basestring):
raise TypeError('Expected resource name to be a string')
if __opts__ and not isinstance(__opts__, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
__props__ = dict()
if not acceptance_required:
raise TypeError('Missing required property acceptance_required')
elif not isinstance(acceptance_required, bool):
raise TypeError('Expected property acceptance_required to be a bool')
__self__.acceptance_required = acceptance_required
"""
Whether or not VPC endpoint connection requests to the service must be accepted by the service owner - `true` or `false`.
"""
__props__['acceptanceRequired'] = acceptance_required
if allowed_principals and not isinstance(allowed_principals, list):
raise TypeError('Expected property allowed_principals to be a list')
__self__.allowed_principals = allowed_principals
"""
The ARNs of one or more principals allowed to discover the endpoint service.
"""
__props__['allowedPrincipals'] = allowed_principals
if not network_load_balancer_arns:
raise TypeError('Missing required property network_load_balancer_arns')
elif not isinstance(network_load_balancer_arns, list):
raise TypeError('Expected property network_load_balancer_arns to be a list')
__self__.network_load_balancer_arns = network_load_balancer_arns
"""
The ARNs of one or more Network Load Balancers for the endpoint service.
"""
__props__['networkLoadBalancerArns'] = network_load_balancer_arns
__self__.availability_zones = pulumi.runtime.UNKNOWN
"""
The Availability Zones in which the service is available.
"""
__self__.base_endpoint_dns_names = pulumi.runtime.UNKNOWN
"""
The DNS names for the service.
"""
__self__.private_dns_name = pulumi.runtime.UNKNOWN
"""
The private DNS name for the service.
"""
__self__.service_name = pulumi.runtime.UNKNOWN
"""
The service name.
"""
__self__.service_type = pulumi.runtime.UNKNOWN
"""
The service type, `Gateway` or `Interface`.
"""
__self__.state = pulumi.runtime.UNKNOWN
"""
The state of the VPC endpoint service.
"""
super(VpcEndpointService, __self__).__init__(
'aws:ec2/vpcEndpointService:VpcEndpointService',
__name__,
__props__,
__opts__)
def set_outputs(self, outs):
if 'acceptanceRequired' in outs:
self.acceptance_required = outs['acceptanceRequired']
if 'allowedPrincipals' in outs:
self.allowed_principals = outs['allowedPrincipals']
if 'availabilityZones' in outs:
self.availability_zones = outs['availabilityZones']
if 'baseEndpointDnsNames' in outs:
self.base_endpoint_dns_names = outs['baseEndpointDnsNames']
if 'networkLoadBalancerArns' in outs:
self.network_load_balancer_arns = outs['networkLoadBalancerArns']
if 'privateDnsName' in outs:
self.private_dns_name = outs['privateDnsName']
if 'serviceName' in outs:
self.service_name = outs['serviceName']
if 'serviceType' in outs:
self.service_type = outs['serviceType']
if 'state' in outs:
self.state = outs['state']
| 45.546296
| 136
| 0.67656
|
609066249d10ee3b9532bf148c76aab7562f06f6
| 7,653
|
py
|
Python
|
setup.py
|
FDecaYed/apex
|
789afd89fe2c5a3e772f557055a9cf0f5e9d1241
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
FDecaYed/apex
|
789afd89fe2c5a3e772f557055a9cf0f5e9d1241
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
FDecaYed/apex
|
789afd89fe2c5a3e772f557055a9cf0f5e9d1241
|
[
"BSD-3-Clause"
] | null | null | null |
import re
import sys
import os
import shutil
import inspect
import distutils
import distutils.spawn
from distutils.command.clean import clean
from setuptools import setup, Extension, find_packages
from setuptools.command.install import install
import subprocess
import ctypes.util
import torch
def find(path, regex_func, collect=False):
"""
Recursively searches through a directory with regex_func and
either collects all instances or returns the first instance.
Args:
path: Directory to search through
regex_function: A function to run on each file to decide if it should be returned/collected
collect (False) : If True will collect all instances of matching, else will return first instance only
"""
collection = [] if collect else None
for root, dirs, files in os.walk(path):
for file in files:
if regex_func(file):
if collect:
collection.append(os.path.join(root, file))
else:
return os.path.join(root, file)
return list(set(collection))
def findcuda():
"""
Based on PyTorch build process. Will look for nvcc for compilation.
Either will set cuda home by enviornment variable CUDA_HOME or will search
for nvcc. Returns NVCC executable, cuda major version and cuda home directory.
"""
cuda_path = None
CUDA_HOME = None
CUDA_HOME = os.getenv('CUDA_HOME', '/usr/local/cuda')
if not os.path.exists(CUDA_HOME):
# We use nvcc path on Linux and cudart path on macOS
cudart_path = ctypes.util.find_library('cudart')
if cudart_path is not None:
cuda_path = os.path.dirname(cudart_path)
if cuda_path is not None:
CUDA_HOME = os.path.dirname(cuda_path)
if not cuda_path and not CUDA_HOME:
nvcc_path = find('/usr/local/', re.compile("nvcc").search, False)
if nvcc_path:
CUDA_HOME = os.path.dirname(nvcc_path)
if CUDA_HOME:
os.path.dirname(CUDA_HOME)
if (not os.path.exists(CUDA_HOME+os.sep+"lib64")
or not os.path.exists(CUDA_HOME+os.sep+"include") ):
raise RuntimeError("Error: found NVCC at ", nvcc_path ," but could not locate CUDA libraries"+
" or include directories.")
raise RuntimeError("Error: Could not find cuda on this system."+
" Please set your CUDA_HOME enviornment variable to the CUDA base directory.")
NVCC = find(CUDA_HOME+os.sep+"bin",
re.compile('nvcc$').search)
print("Found NVCC = ", NVCC)
# Parse output of nvcc to get cuda major version
nvcc_output = subprocess.check_output([NVCC, '--version']).decode("utf-8")
CUDA_LIB = re.compile(', V[0-9]+\.[0-9]+\.[0-9]+').search(nvcc_output).group(0).split('V')[1]
print("Found CUDA_LIB = ", CUDA_LIB)
if CUDA_LIB:
try:
CUDA_VERSION = int(CUDA_LIB.split('.')[0])
except (ValueError, TypeError):
CUDA_VERSION = 9
else:
CUDA_VERSION = 9
if CUDA_VERSION < 8:
raise RuntimeError("Error: APEx requires CUDA 8 or newer")
return NVCC, CUDA_VERSION, CUDA_HOME
#Get some important paths
curdir = os.path.dirname(os.path.abspath(inspect.stack()[0][1]))
buildir = curdir+os.sep+"build"
if not os.path.exists(buildir):
os.makedirs(buildir)
torch_dir = os.path.split(torch.__file__)[0] + os.sep + "lib"
cuda_files = find(curdir, lambda file: file.endswith(".cu"), True)
cuda_headers = find(curdir, lambda file: file.endswith(".cuh"), True)
headers = find(curdir, lambda file: file.endswith(".h"), True)
libaten = find(torch_dir, re.compile("libaten", re.IGNORECASE).search, False)
aten_h = find(torch_dir, re.compile("aten.h", re.IGNORECASE).search, False)
include_dirs = [os.path.dirname(os.path.dirname(aten_h))]
library_dirs = []
for file in cuda_headers+headers:
dir = os.path.dirname(file)
if dir not in include_dirs:
include_dirs.append(dir)
assert libaten, "Could not find PyTorch's libATen."
assert aten_h, "Could not find PyTorch's ATen header."
library_dirs.append(os.path.dirname(libaten))
#create some places to collect important things
object_files = []
extra_link_args=[]
main_libraries = []
main_libraries += ['cudart', 'ATen']
extra_compile_args = ["--std=c++11",]
#findcuda returns root dir of CUDA
#include cuda/include and cuda/lib64 for python module build.
NVCC, CUDA_VERSION, CUDA_HOME=findcuda()
library_dirs.append(os.path.join(CUDA_HOME, "lib64"))
include_dirs.append(os.path.join(CUDA_HOME, 'include'))
class RMBuild(clean):
def run(self):
#BE VERY CAUTIOUS WHEN USING RMTREE!!!
#These are some carefully written/crafted directories
if os.path.exists(buildir):
shutil.rmtree(buildir)
distdir = curdir+os.sep+"dist"
if os.path.exists(distdir):
shutil.rmtree(distdir)
eggdir = curdir+os.sep+"apex.egg-info"
if os.path.exists(eggdir):
shutil.rmtree(eggdir)
clean.run(self)
def CompileCudaFiles(NVCC, CUDA_VERSION):
print()
print("Compiling cuda modules with nvcc:")
gencodes = ['-gencode', 'arch=compute_52,code=sm_52',
'-gencode', 'arch=compute_60,code=sm_60',
'-gencode', 'arch=compute_61,code=sm_61',]
if CUDA_VERSION > 8:
gencodes += ['-gencode', 'arch=compute_70,code=sm_70',
'-gencode', 'arch=compute_70,code=compute_70',]
#Need arches to compile for. Compiles for 70 which requires CUDA9
nvcc_cmd = [NVCC,
'-Xcompiler',
'-fPIC'
] + gencodes + [
'--std=c++11',
'-O3',
]
for dir in include_dirs:
nvcc_cmd.append("-I"+dir)
for file in cuda_files:
object_name = os.path.basename(
os.path.splitext(file)[0]+".o"
)
object_file = os.path.join(buildir, object_name)
object_files.append(object_file)
file_opts = ['-c', file, '-o', object_file]
print(' '.join(nvcc_cmd+file_opts))
subprocess.check_call(nvcc_cmd+file_opts)
for object_file in object_files:
extra_link_args.append(object_file)
if 'clean' not in sys.argv:
print()
print("Arguments used to build CUDA extension:")
print("extra_compile_args :", extra_compile_args)
print("include_dirs: ", include_dirs)
print("extra_link_args: ", extra_link_args)
print("library_dirs: ", library_dirs)
print("libraries: ", main_libraries)
print()
CompileCudaFiles(NVCC, CUDA_VERSION)
print("Building CUDA extension.")
cuda_ext = Extension('apex._C',
[os.path.join('csrc', 'Module.cpp')],
extra_compile_args = extra_compile_args,
include_dirs=include_dirs,
extra_link_args=extra_link_args,
library_dirs=library_dirs,
runtime_library_dirs = library_dirs,
libraries=main_libraries
)
if 'clean' not in sys.argv:
print("Building module.")
setup(
name='apex', version='0.1',
cmdclass={
'clean' : RMBuild,
},
ext_modules=[cuda_ext,],
description='PyTorch Extensions written by NVIDIA',
packages=find_packages(exclude=("build", "csrc", "include", "tests")),
)
| 33.713656
| 111
| 0.62028
|
9f492e72abc7cd283c36800399a83d316feffcd4
| 1,788
|
py
|
Python
|
wpa_project/student_app/urls.py
|
s-amundson/wpa_2p1
|
43deb859123e5ef2eab3652e403c8d2f53d43b77
|
[
"MIT"
] | 1
|
2022-01-03T02:46:34.000Z
|
2022-01-03T02:46:34.000Z
|
wpa_project/student_app/urls.py
|
s-amundson/wpa_2p1
|
43deb859123e5ef2eab3652e403c8d2f53d43b77
|
[
"MIT"
] | 31
|
2021-12-29T17:43:06.000Z
|
2022-03-25T01:03:17.000Z
|
wpa_project/student_app/urls.py
|
s-amundson/wpa_2p1
|
43deb859123e5ef2eab3652e403c8d2f53d43b77
|
[
"MIT"
] | null | null | null |
from django.urls import path
from .views import *
app_name = 'registration'
urlpatterns = [
path('', IndexView.as_view(), name='index'),
path('add_student/<int:student_id>/', AddStudentView.as_view(), name='add_student'),
path('add_student/', AddStudentView.as_view(), name='add_student'),
path('help/', HelpView.as_view(), name='help'),
path('instructor_update/', InstructorUpdateView.as_view(), name='instructor_update'),
path('pdf/<int:student_id>/', PdfGetView.as_view(), name='pdf'),
path('policy/<str:policy>/', PolicyView.as_view(), name='policy'),
path('profile/', ProfileView.as_view(), name='profile'),
path('search/', SearchView.as_view(), name='search'),
path('search_result/<int:student_family>', SearchResultView.as_view(), name='search_result'),
path('student_api/<int:student_id>/', StudentApiView.as_view(), name='student_api'),
path('student_api/', StudentApiView.as_view(), name='student_api'),
path('student_family_api/<int:family_id>/', StudentFamilyApiView.as_view(), name='student_family_api'),
path('student_family_api/', StudentFamilyApiView.as_view(), name='student_family_api'),
path('student_list/', StudentList.as_view(), name='student_list'),
path('student_register/<int:family_id>/', StudentFamilyRegisterView.as_view(), name='student_register'),
path('student_register/', StudentFamilyRegisterView.as_view(), name='student_register'),
path('student_table/', StudentTableView.as_view(), name='student_table'),
path('terms/', TermsView.as_view(), name='terms'),
path('theme/', ThemeView.as_view(), name='theme'),
path('update_user/<int:user_id>/', UserView.as_view(), name='update_user')
]
| 66.222222
| 112
| 0.676734
|
d4f99662e958ad17305a76038523214e82f2f10a
| 8,706
|
py
|
Python
|
qa/pull-tester/rpc-tests.py
|
pazzaBec/UlordChain
|
6dad822b688271712f9509ceefa301b42f705ec5
|
[
"MIT"
] | 174
|
2018-01-16T13:26:52.000Z
|
2022-02-16T15:12:10.000Z
|
qa/pull-tester/rpc-tests.py
|
pazzaBec/UlordChain
|
6dad822b688271712f9509ceefa301b42f705ec5
|
[
"MIT"
] | 32
|
2018-01-25T03:42:01.000Z
|
2020-07-31T17:37:52.000Z
|
qa/pull-tester/rpc-tests.py
|
pazzaBec/UlordChain
|
6dad822b688271712f9509ceefa301b42f705ec5
|
[
"MIT"
] | 55
|
2018-01-29T06:37:28.000Z
|
2020-03-20T03:34:30.000Z
|
#!/usr/bin/env python2
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
Run Regression Test Suite
This module calls down into individual test cases via subprocess. It will
forward all unrecognized arguments onto the individual test scripts, other
than:
- `-extended`: run the "extended" test suite in addition to the basic one.
- `-win`: signal that this is running in a Windows environment, and we
should run the tests.
- `--coverage`: this generates a basic coverage report for the RPC
interface.
For a description of arguments recognized by test scripts, see
`qa/pull-tester/test_framework/test_framework.py:BitcoinTestFramework.main`.
"""
import os
import time
import shutil
import sys
import subprocess
import tempfile
import re
from tests_config import *
#If imported values are not defined then set to zero (or disabled)
if 'ENABLE_WALLET' not in vars():
ENABLE_WALLET=0
if 'ENABLE_BITCOIND' not in vars():
ENABLE_BITCOIND=0
if 'ENABLE_UTILS' not in vars():
ENABLE_UTILS=0
if 'ENABLE_ZMQ' not in vars():
ENABLE_ZMQ=0
ENABLE_COVERAGE=0
#Create a set to store arguments and create the passOn string
opts = set()
passOn = ""
p = re.compile("^--")
bold = ("","")
if (os.name == 'posix'):
bold = ('\033[0m', '\033[1m')
for arg in sys.argv[1:]:
if arg == '--coverage':
ENABLE_COVERAGE = 1
elif (p.match(arg) or arg == "-h"):
passOn += " " + arg
else:
opts.add(arg)
#Set env vars
buildDir = BUILDDIR
if "UCD" not in os.environ:
os.environ["UCD"] = buildDir + '/src/ulordd' + EXEEXT
if "UCCLI" not in os.environ:
os.environ["UCCLI"] = buildDir + '/src/ulord-cli' + EXEEXT
if EXEEXT == ".exe" and "-win" not in opts:
# https://github.com/bitcoin/bitcoin/commit/d52802551752140cf41f0d9a225a43e84404d3e9
# https://github.com/bitcoin/bitcoin/pull/5677#issuecomment-136646964
print "Win tests currently disabled by default. Use -win option to enable"
sys.exit(0)
if not (ENABLE_WALLET == 1 and ENABLE_UTILS == 1 and ENABLE_BITCOIND == 1):
print "No rpc tests to run. Wallet, utils, and bitcoind must all be enabled"
sys.exit(0)
# python-zmq may not be installed. Handle this gracefully and with some helpful info
if ENABLE_ZMQ:
try:
import zmq
except ImportError as e:
print("ERROR: \"import zmq\" failed. Set ENABLE_ZMQ=0 or " \
"to run zmq tests, see dependency info in /qa/README.md.")
raise e
#Tests
testScripts = [
'bip68-112-113-p2p.py',
'wallet.py',
'listtransactions.py',
'receivedby.py',
'mempool_resurrect_test.py',
'txn_doublespend.py --mineblock',
'txn_clone.py',
'getchaintips.py',
'rawtransactions.py',
'rest.py',
'mempool_spendcoinbase.py',
'mempool_reorg.py',
'mempool_limit.py',
'httpbasics.py',
'multi_rpc.py',
'zapwallettxes.py',
'proxy_test.py',
'merkle_blocks.py',
'fundrawtransaction.py',
'signrawtransactions.py',
'walletbackup.py',
'nodehandling.py',
'reindex.py',
'addressindex.py',
'timestampindex.py',
'spentindex.py',
'decodescript.py',
'p2p-fullblocktest.py', # NOTE: needs ulord_hash to pass
'blockchain.py',
'disablewallet.py',
'sendheaders.py', # NOTE: needs ulord_hash to pass
'keypool.py',
'prioritise_transaction.py',
'invalidblockrequest.py', # NOTE: needs ulord_hash to pass
'invalidtxrequest.py', # NOTE: needs ulord_hash to pass
'abandonconflict.py',
'p2p-versionbits-warning.py',
]
if ENABLE_ZMQ:
testScripts.append('zmq_test.py')
testScriptsExt = [
'bip9-softforks.py',
'bip65-cltv.py',
'bip65-cltv-p2p.py', # NOTE: needs ulord_hash to pass
'bip68-sequence.py',
'bipdersig-p2p.py', # NOTE: needs ulord_hash to pass
'bipdersig.py',
'getblocktemplate_longpoll.py', # FIXME: "socket.error: [Errno 54] Connection reset by peer" on my Mac, same as https://github.com/bitcoin/bitcoin/issues/6651
'getblocktemplate_proposals.py',
'txn_doublespend.py',
'txn_clone.py --mineblock',
# 'pruning.py', # Prune mode is incompatible with -txindex.
'forknotify.py',
'invalidateblock.py',
# 'rpcbind_test.py', #temporary, bug in libevent, see #6655
'smartfees.py',
'maxblocksinflight.py',
'p2p-acceptblock.py', # NOTE: needs ulord_hash to pass
'mempool_packages.py',
'maxuploadtarget.py',
# 'replace-by-fee.py', # RBF is disabled in Ulord Core
]
def runtests():
coverage = None
if ENABLE_COVERAGE:
coverage = RPCCoverage()
print("Initializing coverage directory at %s\n" % coverage.dir)
rpcTestDir = buildDir + '/qa/rpc-tests/'
run_extended = '-extended' in opts
cov_flag = coverage.flag if coverage else ''
flags = " --srcdir %s/src %s %s" % (buildDir, cov_flag, passOn)
#Run Tests
for i in range(len(testScripts)):
if (len(opts) == 0
or (len(opts) == 1 and "-win" in opts )
or run_extended
or testScripts[i] in opts
or re.sub(".py$", "", testScripts[i]) in opts ):
print("Running testscript %s%s%s ..." % (bold[1], testScripts[i], bold[0]))
time0 = time.time()
subprocess.check_call(
rpcTestDir + testScripts[i] + flags, shell=True)
print("Duration: %s s\n" % (int(time.time() - time0)))
# exit if help is called so we print just one set of
# instructions
p = re.compile(" -h| --help")
if p.match(passOn):
sys.exit(0)
# Run Extended Tests
for i in range(len(testScriptsExt)):
if (run_extended or testScriptsExt[i] in opts
or re.sub(".py$", "", testScriptsExt[i]) in opts):
print(
"Running 2nd level testscript "
+ "%s%s%s ..." % (bold[1], testScriptsExt[i], bold[0]))
time0 = time.time()
subprocess.check_call(
rpcTestDir + testScriptsExt[i] + flags, shell=True)
print("Duration: %s s\n" % (int(time.time() - time0)))
if coverage:
coverage.report_rpc_coverage()
print("Cleaning up coverage data")
coverage.cleanup()
class RPCCoverage(object):
"""
Coverage reporting utilities for pull-tester.
Coverage calculation works by having each test script subprocess write
coverage files into a particular directory. These files contain the RPC
commands invoked during testing, as well as a complete listing of RPC
commands per `bitcoin-cli help` (`rpc_interface.txt`).
After all tests complete, the commands run are combined and diff'd against
the complete list to calculate uncovered RPC commands.
See also: qa/rpc-tests/test_framework/coverage.py
"""
def __init__(self):
self.dir = tempfile.mkdtemp(prefix="coverage")
self.flag = '--coveragedir %s' % self.dir
def report_rpc_coverage(self):
"""
Print out RPC commands that were unexercised by tests.
"""
uncovered = self._get_uncovered_rpc_commands()
if uncovered:
print("Uncovered RPC commands:")
print("".join((" - %s\n" % i) for i in sorted(uncovered)))
else:
print("All RPC commands covered.")
def cleanup(self):
return shutil.rmtree(self.dir)
def _get_uncovered_rpc_commands(self):
"""
Return a set of currently untested RPC commands.
"""
# This is shared from `qa/rpc-tests/test-framework/coverage.py`
REFERENCE_FILENAME = 'rpc_interface.txt'
COVERAGE_FILE_PREFIX = 'coverage.'
coverage_ref_filename = os.path.join(self.dir, REFERENCE_FILENAME)
coverage_filenames = set()
all_cmds = set()
covered_cmds = set()
if not os.path.isfile(coverage_ref_filename):
raise RuntimeError("No coverage reference found")
with open(coverage_ref_filename, 'r') as f:
all_cmds.update([i.strip() for i in f.readlines()])
for root, dirs, files in os.walk(self.dir):
for filename in files:
if filename.startswith(COVERAGE_FILE_PREFIX):
coverage_filenames.add(os.path.join(root, filename))
for filename in coverage_filenames:
with open(filename, 'r') as f:
covered_cmds.update([i.strip() for i in f.readlines()])
return all_cmds - covered_cmds
if __name__ == '__main__':
runtests()
| 31.543478
| 163
| 0.636687
|
237e62b1d7b9f3e8caf51387d71f2c25dc7118fa
| 2,397
|
py
|
Python
|
data/p4VQE/R4/benchmark/startQiskit_Class162.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
data/p4VQE/R4/benchmark/startQiskit_Class162.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
data/p4VQE/R4/benchmark/startQiskit_Class162.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
# qubit number=3
# total number=10
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
import networkx as nx
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def make_circuit(n:int) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
prog = QuantumCircuit(input_qubit)
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.y(input_qubit[3]) # number=5
for edge in E:
k = edge[0]
l = edge[1]
prog.cp(-2 * gamma, input_qubit[k-1], input_qubit[l-1])
prog.p(gamma, k)
prog.p(gamma, l)
prog.rx(2 * beta, range(len(V)))
prog.swap(input_qubit[1],input_qubit[0]) # number=6
prog.swap(input_qubit[1],input_qubit[0]) # number=7
prog.x(input_qubit[2]) # number=8
prog.x(input_qubit[2]) # number=9
# circuit end
return prog
if __name__ == '__main__':
n = 4
V = np.arange(0, n, 1)
E = [(0, 1, 1.0), (0, 2, 1.0), (1, 2, 1.0), (3, 2, 1.0), (3, 1, 1.0)]
G = nx.Graph()
G.add_nodes_from(V)
G.add_weighted_edges_from(E)
step_size = 0.1
a_gamma = np.arange(0, np.pi, step_size)
a_beta = np.arange(0, np.pi, step_size)
a_gamma, a_beta = np.meshgrid(a_gamma, a_beta)
F1 = 3 - (np.sin(2 * a_beta) ** 2 * np.sin(2 * a_gamma) ** 2 - 0.5 * np.sin(4 * a_beta) * np.sin(4 * a_gamma)) * (
1 + np.cos(4 * a_gamma) ** 2)
result = np.where(F1 == np.amax(F1))
a = list(zip(result[0], result[1]))[0]
gamma = a[0] * step_size
beta = a[1] * step_size
prog = make_circuit(4)
sample_shot =5600
writefile = open("../data/startQiskit_Class162.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
backend = BasicAer.get_backend('statevector_simulator')
circuit1 = transpile(prog, FakeYorktown())
prog = circuit1
info = execute(prog,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
| 27.238636
| 118
| 0.634543
|
b12e6c4856a6690b1ed9a1828ae2002e47c8841d
| 6,670
|
py
|
Python
|
tests/test_generators/test_sqltablegen.py
|
StroemPhi/linkml
|
a211fd1430a6d97efddbd637f5109b3c7e075e63
|
[
"CC0-1.0"
] | null | null | null |
tests/test_generators/test_sqltablegen.py
|
StroemPhi/linkml
|
a211fd1430a6d97efddbd637f5109b3c7e075e63
|
[
"CC0-1.0"
] | null | null | null |
tests/test_generators/test_sqltablegen.py
|
StroemPhi/linkml
|
a211fd1430a6d97efddbd637f5109b3c7e075e63
|
[
"CC0-1.0"
] | null | null | null |
from enum import Enum
import os
import re
import sqlite3
from typing import List
import unittest
import tempfile
from linkml_runtime.dumpers import yaml_dumper
from linkml_runtime.linkml_model.meta import SlotDefinition
from sqlalchemy.sql.sqltypes import Text, Enum
from linkml.utils.schemaloader import SchemaLoader
from linkml_runtime.utils.schemaview import SchemaView
from linkml.generators.yamlgen import YAMLGenerator
from linkml.generators.sqltablegen import SQLTableGenerator, SqlNamingPolicy
from linkml.transformers.relmodel_transformer import RelationalModelTransformer
from tests.test_generators.environment import env
SCHEMA = env.input_path('personinfo.yaml')
OUT_PATH = env.expected_path('personinfo.relational.yaml')
RSCHEMA_EXPANDED = env.expected_path('personinfo.relational.expanded.yaml')
OUT_DDL = env.expected_path('personinfo.ddl.sql')
SQLDDLLOG = env.expected_path('personinfo.sql.log')
DB = env.expected_path('personinfo.db')
class SQLTableGeneratorTestCase(unittest.TestCase):
"""
Tests the (new) SQLTableGenerator
"""
def test_generate_ddl(self):
"""Generate contents of DDL file as a string."""
gen = SQLTableGenerator(SCHEMA)
ddl = gen.generate_ddl()
new_file, filename = tempfile.mkstemp()
temp_ddl_filepath = filename + ".sql.ddl"
with open(temp_ddl_filepath, 'w') as stream:
stream.write(ddl)
py_file_list = []
with open(temp_ddl_filepath) as file:
lines = file.readlines()
py_file_list = [line.rstrip() for line in lines]
tbl_list = []
for item in py_file_list:
res = re.search(r"\"(.*?)\"", item)
if res:
tbl_list.append(res.group(1))
self.assertTrue(all(x in tbl_list for x in ["NamedThing",
"Place",
"Address",
"Event",
"Concept",
"DiagnosisConcept",
"ProcedureConcept",
"Relationship",
"Container",
"Person",
"Address",
"Organization"]),
f"Expected classes from {SCHEMA} not written to {temp_ddl_filepath}")
def test_get_sql_range(self):
"""Test case for the get_sql_range() method."""
gen = SQLTableGenerator(SCHEMA)
# loader = SchemaLoader(data=SCHEMA)
# schema_def_str = loader.resolve()
case_1_slot = SlotDefinition(name="id",
definition_uri="https://w3id.org/linkml/examples/personinfo/id",
mappings=['schema:identifier'],
from_schema="https://w3id.org/linkml/examples/personinfo",
range="string",
slot_uri='schema:identifier',
owner="Place",
domain_of=["NamedThing", "Place"])
case_2_slot = SlotDefinition(name='FamilialRelationship_type',
from_schema='https://w3id.org/linkml/examples/personinfo',
is_a='type',
domain='FamilialRelationship',
range='FamilialRelationshipType',
slot_uri='personinfo:type',
alias='type',
owner='FamilialRelationship',
domain_of=['FamilialRelationship'],
usage_slot_name='type')
case_3_slot = SlotDefinition(name='NonExistentSlot',
range='NonExistentRange')
# Slot range in list of schema classes
actual_1_output = gen.get_sql_range(case_1_slot)
# Slot range in list of schema enums
actual_2_output = gen.get_sql_range(case_2_slot)
# Slot not present in schema
actual_3_output = gen.get_sql_range(case_3_slot)
self.assertIsInstance(actual_1_output, Text)
self.assertIsInstance(actual_2_output, Enum)
self.assertIsInstance(actual_3_output, Text)
def test_get_foreign_key(self):
"""Test case for the get_foreign_key() method."""
gen = SQLTableGenerator(SCHEMA)
sv = SchemaView(schema=SCHEMA)
fk_value = gen.get_foreign_key("Person", sv)
self.assertEqual(fk_value, "Person.id")
def test_sqlddl_basic(self):
#sv = SchemaView(SCHEMA)
#sqltr = RelationalModelTransformer(sv)
gen = SQLTableGenerator(SCHEMA)
#ddl = gen.generate_ddl(naming_policy=SqlNamingPolicy.underscore)
ddl = gen.generate_ddl()
with open(OUT_DDL, 'w') as stream:
stream.write(ddl)
with open(SQLDDLLOG, 'w') as log:
# with open(DDL_PATH, 'w') as stream:
# stream.write(ddl)
#print(ddl)
try:
os.remove(DB)
except OSError:
pass
con = sqlite3.connect(DB)
cur = con.cursor()
cur.executescript(ddl)
NAME = 'fred'
cur.execute("INSERT INTO Person (id, name, age_in_years) VALUES (?,?,?)", ('P1', NAME, 33))
cur.execute("INSERT INTO Person_alias (Person_id, alias) VALUES (?,?)", ('P1', 'wibble'))
cur.execute("INSERT INTO FamilialRelationship (Person_id, type, related_to) VALUES (?,?,?)", ('P1', 'P2', 'BROTHER_OF'))
cur.execute("select * from Person where name=:name", {"name": NAME})
rows = cur.fetchall()
log.write(f"{rows}\n")
assert len(rows) == 1
con.commit()
with self.assertRaises(Exception):
# PK violation
cur.execute("INSERT INTO Person (id, name, age_in_years) VALUES (?,?,?)", ('P1', 'other person', 22))
with self.assertRaises(Exception):
cur.execute("INSERT INTO Person_alias (Person_id, alias) VALUES (?,?)", ('P1', 'wibble'))
con.close()
if __name__ == '__main__':
unittest.main()
| 40.424242
| 132
| 0.534783
|
86176b214e7c78af38f34f4872cc6d6f9f6f0381
| 1,365
|
py
|
Python
|
webopencv/flsk.py
|
alvinwan/webopencv
|
599ffb89d0c0c9d4e4686ecd2d89d325a3e70e23
|
[
"BSD-2-Clause"
] | 4
|
2021-09-15T08:22:31.000Z
|
2021-09-16T02:26:33.000Z
|
webopencv/flsk.py
|
alvinwan/webopencv
|
599ffb89d0c0c9d4e4686ecd2d89d325a3e70e23
|
[
"BSD-2-Clause"
] | 1
|
2021-09-15T11:26:54.000Z
|
2021-09-15T11:26:54.000Z
|
webopencv/flsk.py
|
alvinwan/webopencv
|
599ffb89d0c0c9d4e4686ecd2d89d325a3e70e23
|
[
"BSD-2-Clause"
] | 1
|
2021-09-17T10:16:54.000Z
|
2021-09-17T10:16:54.000Z
|
"""Flask-powered web application backend
TODO(alvin): ICE connection state does not reached completed stage
"""
from flask import Flask, render_template, request, jsonify
from .app import on_offer, on_shutdown
from .utils import ROOT
import atexit
import os
DEFAULT_TEMPLATE_FOLDER = os.path.join(ROOT, "templates")
DEFAULT_STATIC_FOLDER = os.path.join(ROOT, "static")
def index():
return render_template("index.html")
async def offer():
params = request.json
pc = await on_offer(params, sender="null")
return jsonify({
"sdp": pc.localDescription.sdp,
"type": pc.localDescription.type
})
class App(Flask):
def __init__(
self, *args,
template_folder=DEFAULT_TEMPLATE_FOLDER,
static_folder=DEFAULT_STATIC_FOLDER,
static_url_path="",
use_default_homepage=True,
**kwargs
):
super().__init__(
*args,
template_folder=template_folder,
static_folder=static_folder,
static_url_path=static_url_path,
**kwargs
)
self.add_url_rule("/offer", view_func=offer, methods=["POST"])
# atexit.register(on_shutdown) # TODO: doesn't actually work, because atexit doesn't await
if use_default_homepage:
self.add_url_rule("/", view_func=index)
| 26.25
| 99
| 0.647619
|
0a3b3313eefd0c72407ff7dcdc444d654140ee2b
| 4,069
|
py
|
Python
|
micawber_bs4_classes/test_utils.py
|
Gurbert/micawber
|
1e355cc3cf56cc57bf4d8b51cc8d5511e2eb4e66
|
[
"MIT"
] | 1
|
2020-07-18T06:46:28.000Z
|
2020-07-18T06:46:28.000Z
|
micawber_bs4_classes/test_utils.py
|
Gurbert/micawber
|
1e355cc3cf56cc57bf4d8b51cc8d5511e2eb4e66
|
[
"MIT"
] | null | null | null |
micawber_bs4_classes/test_utils.py
|
Gurbert/micawber
|
1e355cc3cf56cc57bf4d8b51cc8d5511e2eb4e66
|
[
"MIT"
] | null | null | null |
import unittest
try:
import simplejson as json
except ImportError:
import json
from micawber_bs4_classes import *
from micawber_bs4_classes.parsers import BeautifulSoup, bs_kwargs
from micawber_bs4_classes.providers import make_key
class TestProvider(Provider):
test_data = {
# link
'link?format=json&url=http%3A%2F%2Flink-test1': {'title': 'test1', 'type': 'link'},
'link?format=json&url=http%3A%2F%2Flink-test2': {'title': 'test2', 'type': 'link'},
# photo
'photo?format=json&url=http%3A%2F%2Fphoto-test1': {'title': 'ptest1', 'url': 'test1.jpg', 'type': 'photo'},
'photo?format=json&url=http%3A%2F%2Fphoto-test2': {'title': 'ptest2', 'url': 'test2.jpg', 'type': 'photo'},
# video
'video?format=json&url=http%3A%2F%2Fvideo-test1': {'title': 'vtest1', 'html': '<test1>video</test1>', 'type': 'video'},
'video?format=json&url=http%3A%2F%2Fvideo-test2': {'title': 'vtest2', 'html': '<test2>video</test2>', 'type': 'video'},
# rich
'rich?format=json&url=http%3A%2F%2Frich-test1': {'title': 'rtest1', 'html': '<test1>rich</test1>', 'type': 'rich'},
'rich?format=json&url=http%3A%2F%2Frich-test2': {'title': 'rtest2', 'html': '<test2>rich</test2>', 'type': 'rich'},
# with param
'link?format=json&url=http%3A%2F%2Flink-test1&width=100': {'title': 'test1', 'type': 'link', 'width': 99},
# no title
'photo?format=json&url=http%3A%2F%2Fphoto-notitle': {'url': 'notitle.jpg', 'type': 'photo'},
}
def fetch(self, url):
if url in self.test_data:
return json.dumps(self.test_data[url])
return False
test_pr = ProviderRegistry()
test_cache = Cache()
test_pr_cache = ProviderRegistry(test_cache)
for pr in (test_pr, test_pr_cache):
pr.register('http://link\S*', TestProvider('link'))
pr.register('http://photo\S*', TestProvider('photo'))
pr.register('http://video\S*', TestProvider('video'))
pr.register('http://rich\S*', TestProvider('rich'))
class BaseTestCase(unittest.TestCase):
def setUp(self):
test_cache._cache = {}
self.full_pairs = {
'http://link-test1': '<a href="http://link-test1" title="test1">test1</a>',
'http://photo-test2': '<a href="test2.jpg" title="ptest2"><img alt="ptest2" src="test2.jpg" /></a>',
'http://video-test1': '<test1>video</test1>',
'http://rich-test2': '<test2>rich</test2>',
'http://photo-notitle': '<a href="notitle.jpg" title="notitle.jpg"><img alt="notitle.jpg" src="notitle.jpg" /></a>',
}
self.inline_pairs = {
'http://link-test1': '<a href="http://link-test1" title="test1">test1</a>',
'http://photo-test2': '<a href="test2.jpg" title="ptest2">ptest2</a>',
'http://video-test1': '<a href="http://video-test1" title="vtest1">vtest1</a>',
'http://rich-test2': '<a href="http://rich-test2" title="rtest2">rtest2</a>',
'http://rich-test2': '<a href="http://rich-test2" title="rtest2">rtest2</a>',
'http://photo-notitle': '<a href="notitle.jpg" title="notitle.jpg">notitle.jpg</a>',
}
self.data_pairs = {
'http://link-test1': {'title': 'test1', 'type': 'link'},
'http://photo-test2': {'title': 'ptest2', 'url': 'test2.jpg', 'type': 'photo'},
'http://video-test1': {'title': 'vtest1', 'html': '<test1>video</test1>', 'type': 'video'},
'http://rich-test2': {'title': 'rtest2', 'html': '<test2>rich</test2>', 'type': 'rich'},
'http://photo-notitle': {'url': 'notitle.jpg', 'type': 'photo'},
}
def assertCached(self, url, data, **params):
key = make_key(url, params)
self.assertTrue(key in test_cache._cache)
self.assertEqual(test_cache._cache[key], data)
def assertHTMLEqual(self, first, second, msg=None):
first = BeautifulSoup(first, **bs_kwargs)
second = BeautifulSoup(second, **bs_kwargs)
self.assertEqual(first, second, msg)
| 44.228261
| 128
| 0.588105
|
c1e6a886ce06dee970c944e37aa2ab257884456d
| 6,583
|
py
|
Python
|
objective_functions/hole_reaching/planar_reaching_objective.py
|
ottofabian/rl_stochastic_search
|
0e96cdcb5d7a09e789c94f989192ae437d440861
|
[
"MIT"
] | null | null | null |
objective_functions/hole_reaching/planar_reaching_objective.py
|
ottofabian/rl_stochastic_search
|
0e96cdcb5d7a09e789c94f989192ae437d440861
|
[
"MIT"
] | 7
|
2020-07-03T14:36:46.000Z
|
2022-03-12T00:38:31.000Z
|
objective_functions/hole_reaching/planar_reaching_objective.py
|
ottofabian/rl_stochastic_search
|
0e96cdcb5d7a09e789c94f989192ae437d440861
|
[
"MIT"
] | null | null | null |
from objective_functions.hole_reaching.mp_lib import ExpDecayPhaseGenerator
from objective_functions.hole_reaching.mp_lib import DMPBasisGenerator
from objective_functions.hole_reaching.mp_lib import dmps
from experiments.robotics import planar_forward_kinematics as pfk
import numpy as np
import matplotlib.pyplot as plt
def ccw(A, B, C):
return (C[1]-A[1]) * (B[0]-A[0]) > (B[1]-A[1]) * (C[0]-A[0])
# Return true if line segments AB and CD intersect
def intersect(A, B, C, D):
return ccw(A,C,D) != ccw(B,C,D) and ccw(A,B,C) != ccw(A,B,D)
class ReachingTask:
def __init__(self, num_links, via_points=()):
self.num_links = num_links
self.via_points = via_points
self.goal_point = np.array((num_links, 0))
self.pfk = pfk.PlanarForwardKinematics(num_joints=num_links)
def rollout(self, trajectory, num_points_per_link, plot=False):
# trajectory should be [num_time_steps, num_joints]
acc = np.sum(np.diff(trajectory, n=2, axis=0) ** 2)
total_number_of_points_collided = 0
self.end_effector_points = []
distance = 0
if plot:
fig, ax = plt.subplots()
plt.xlim(-self.num_links, self.num_links), plt.ylim(-self.num_links, self.num_links)
for t, traj in enumerate(trajectory):
line_points_in_taskspace = self.pfk.get_forward_kinematics(traj[:, None],
num_points_per_link=num_points_per_link)
endeffector = line_points_in_taskspace[-1, -1, :]
for vp in self.via_points:
if t == vp['t']:
distance += np.abs(np.linalg.norm(endeffector - np.array(vp["vp"]))) ** 2
self.end_effector_points.append(line_points_in_taskspace[-1, -1, :])
is_collided = self.check_collision(line_points_in_taskspace)
if plot:
ax.clear()
plt.xlim(-self.num_links, self.num_links), plt.ylim(-self.num_links, self.num_links)
ax.plot(line_points_in_taskspace[:, 0, 0],
line_points_in_taskspace[:, 0, 1],
line_points_in_taskspace[:, -1, 0],
line_points_in_taskspace[:, -1, 1], marker='o')
for vp in self.via_points:
ax.scatter(vp["vp"][0], vp["vp"][1], c="r", marker="x")
plt.pause(0.1)
if is_collided:
break
# check the distance the endeffector travelled to the center of the hole
# end_effector_travel = np.sum(
# np.sqrt(np.sum(np.diff(np.stack(end_effector_points), axis=0)[:, 4, :] ** 2, axis=1, keepdims=True))) ** 2
# end_effector_travel = np.sum(np.sqrt(np.sum(np.diff(np.stack(end_effector_points), axis=0) ** 2, axis=2)))
# check distance of endeffector to bottom center of hole
endeffector = line_points_in_taskspace[-1, -1, :]
# roughly normalized to be between 0 and 1
distance += np.abs(np.linalg.norm(endeffector - self.goal_point)) ** 2 # / (self.num_links + np.abs(self.hole_x))
# TODO: tune factors
# distance in [0, 1]
# |acc| in [0, 0.1]
out = 1 * distance \
+ 100 * np.abs(acc) \
+ is_collided * 100000
# + 0.1 * total_number_of_points_collided\
# + 0.01 * end_effector_travel ** 2
return np.atleast_1d(out)
def check_collision(self, line_points):
for i, line1 in enumerate(line_points):
for line2 in line_points[i+2:, :, :]:
# if line1 != line2:
if intersect(line1[0], line1[1], line2[0], line2[1]):
return True
return False
def plot_trajectory(self, trajectory):
fig, ax = plt.subplots()
plt.xlim(-self.num_links, self.num_links), plt.ylim(-1, self.num_links)
for t in trajectory:
fk = self.pfk.get_forward_kinematics(t, num_points_per_link=2)
# print(fk)
ax.plot(fk[:, 0, 0], fk[:, 0, 1], fk[:, 1, 0], fk[:, 1, 1], marker='o')
# Add the patch to the Axes
plt.pause(0.1)
ax.clear()
plt.xlim(-self.num_links, self.num_links), plt.ylim(-1, self.num_links)
class ReachingObjective:
def __init__(self, num_links=5, num_basis=5, via_points=None, dmp_weights=None):
self.num_links = num_links
self.d = num_links * num_basis
self.f_opt = 0
# create task
self.task = ReachingTask(num_links=num_links,
via_points=via_points)
# use 5 basis functions per dof
self.num_basis = num_basis
self.t = np.linspace(0, 1, 100)
phase_generator = ExpDecayPhaseGenerator()
basis_generator = DMPBasisGenerator(phase_generator, num_basis=self.num_basis)
self.dmp = dmps.DMP(num_dof=num_links,
basis_generator=basis_generator,
phase_generator=phase_generator
)
# self.dmp.dmp_beta_x = 0
self.dmp.dmp_start_pos = np.zeros((1, num_links))
self.dmp.dmp_start_pos[0, 0] = np.pi / 2
self.dmp.dmp_goal_pos = np.zeros((1, num_links))
self.dmp.dmp_weights = dmp_weights if dmp_weights is not None else np.random.normal(0.0, 10.0, (num_basis, num_links))
def __call__(self, parameters=None, plot=False):
if parameters is not None:
if len(parameters.shape) > 1:
assert parameters.shape[0] == 1
parameters = parameters.flatten()
weight_matrix = np.reshape(parameters, [self.num_basis, self.num_links])
self.dmp.dmp_weights = weight_matrix
ref_pos_learned, ref_vel_learned = self.dmp.reference_trajectory(self.t)
# FIXME: How to ensure goal velocity is reached?
return self.task.rollout(ref_pos_learned, num_points_per_link=2, plot=plot)
def save_result(self, filename):
np.save(filename + "_dmp_weights", self.dmp.dmp_weights)
def load_result(self, filename):
self.dmp.dmp_weights = np.load(filename + "_dmp_weights.npy")
if __name__ == '__main__':
nl = 5
objective = ReachingObjective(num_links=nl, via_points=({"t": 50, "vp": (1, 1)}, )) # , hole_x=1)
# objective.load_result("/tmp/sac")
x_start = 1 * np.random.randn(10, nl*5)
for i in range(1):
rew = objective(plot=True) # , parameters=x_start[i])
print(rew)
| 37.833333
| 126
| 0.594562
|
9c79a478e2537573de057c11301890b66c8d7f4e
| 2,431
|
py
|
Python
|
Toolkits/Discovery/meta/searx/tests/unit/engines/test_mixcloud.py
|
roscopecoltran/SniperKit-Core
|
4600dffe1cddff438b948b6c22f586d052971e04
|
[
"MIT"
] | 4
|
2018-09-07T15:35:24.000Z
|
2019-03-27T09:48:12.000Z
|
Toolkits/Discovery/meta/searx/tests/unit/engines/test_mixcloud.py
|
roscopecoltran/SniperKit-Core
|
4600dffe1cddff438b948b6c22f586d052971e04
|
[
"MIT"
] | 371
|
2020-03-04T21:51:56.000Z
|
2022-03-31T20:59:11.000Z
|
Toolkits/Discovery/meta/searx/tests/unit/engines/test_mixcloud.py
|
roscopecoltran/SniperKit-Core
|
4600dffe1cddff438b948b6c22f586d052971e04
|
[
"MIT"
] | 3
|
2019-06-18T19:57:17.000Z
|
2020-11-06T03:55:08.000Z
|
from collections import defaultdict
import mock
from searx.engines import mixcloud
from searx.testing import SearxTestCase
class TestMixcloudEngine(SearxTestCase):
def test_request(self):
query = 'test_query'
dicto = defaultdict(dict)
dicto['pageno'] = 0
params = mixcloud.request(query, dicto)
self.assertTrue('url' in params)
self.assertTrue(query in params['url'])
self.assertTrue('mixcloud.com' in params['url'])
def test_response(self):
self.assertRaises(AttributeError, mixcloud.response, None)
self.assertRaises(AttributeError, mixcloud.response, [])
self.assertRaises(AttributeError, mixcloud.response, '')
self.assertRaises(AttributeError, mixcloud.response, '[]')
response = mock.Mock(text='{}')
self.assertEqual(mixcloud.response(response), [])
response = mock.Mock(text='{"data": []}')
self.assertEqual(mixcloud.response(response), [])
json = """
{"data":[
{
"user": {
"url": "http://www.mixcloud.com/user/",
"username": "user",
"name": "User",
"key": "/user/"
},
"key": "/user/this-is-the-url/",
"created_time": "2014-11-14T13:30:02Z",
"audio_length": 3728,
"slug": "this-is-the-url",
"name": "Title of track",
"url": "http://www.mixcloud.com/user/this-is-the-url/",
"updated_time": "2014-11-14T13:14:10Z"
}
]}
"""
response = mock.Mock(text=json)
results = mixcloud.response(response)
self.assertEqual(type(results), list)
self.assertEqual(len(results), 1)
self.assertEqual(results[0]['title'], 'Title of track')
self.assertEqual(results[0]['url'], 'http://www.mixcloud.com/user/this-is-the-url/')
self.assertEqual(results[0]['content'], 'User')
self.assertTrue('http://www.mixcloud.com/user/this-is-the-url/' in results[0]['embedded'])
json = r"""
{"toto":[
{"id":200,"name":"Artist Name",
"link":"http:\/\/www.mixcloud.com\/artist\/1217","type":"artist"}
]}
"""
response = mock.Mock(text=json)
results = mixcloud.response(response)
self.assertEqual(type(results), list)
self.assertEqual(len(results), 0)
| 35.75
| 98
| 0.566434
|
35c90e90b09fb4e4cbd4a15064bcb503448f0f5e
| 593
|
py
|
Python
|
app/models/users.py
|
ZanMax/wrs
|
b62bcb50f305a83b7fe08f83f5e2d9f1c2cf1ec5
|
[
"MIT"
] | null | null | null |
app/models/users.py
|
ZanMax/wrs
|
b62bcb50f305a83b7fe08f83f5e2d9f1c2cf1ec5
|
[
"MIT"
] | null | null | null |
app/models/users.py
|
ZanMax/wrs
|
b62bcb50f305a83b7fe08f83f5e2d9f1c2cf1ec5
|
[
"MIT"
] | null | null | null |
from sqlalchemy import Column, Integer, String, Boolean
from app.db.base_class import Base
class Users(Base):
__tablename__ = 'users'
id = Column(Integer, primary_key=True, index=True, autoincrement=True)
name = Column(String(255))
email = Column(String(32), unique=True, index=True, nullable=False)
hashed_password = Column(String(255), nullable=False)
ip_info = Column(String(1024))
id_group = Column(Integer)
is_group_admin = Column(Boolean, default=False)
is_super_admin = Column(Boolean, default=False)
is_active = Column(Boolean, default=True)
| 34.882353
| 74
| 0.726813
|
874be2b28735a418bd443c9be127f3fed13a5fd3
| 461
|
py
|
Python
|
Ex0028.py
|
r-1c4rd0/pythonExercicios
|
c78a12239e813f3ade95b05ecd1c83ccfa82bafc
|
[
"MIT"
] | null | null | null |
Ex0028.py
|
r-1c4rd0/pythonExercicios
|
c78a12239e813f3ade95b05ecd1c83ccfa82bafc
|
[
"MIT"
] | null | null | null |
Ex0028.py
|
r-1c4rd0/pythonExercicios
|
c78a12239e813f3ade95b05ecd1c83ccfa82bafc
|
[
"MIT"
] | null | null | null |
from random import randint
computador = randint(0, 5)
jogador = 0
print('-=-' * 8)
print('Vou pensar em um número tente adivinhar...')
print('-=-' * 8)
while jogador != computador:
jogador = int(input('Em que número eu pensei? \n:'))
if jogador == computador:
print('PARABÉNS! Você conseguiu me vencer')
else:
print('Eu pensei no número {} e não no {}!'.format(computador, jogador))
print('Fim de jogo \n =-=-=- GAME OVER -=-=-= ')
| 32.928571
| 80
| 0.62256
|
3a0e4c29dd2dd4246a187871ef21b5707346f31c
| 3,403
|
py
|
Python
|
datmo/cli/main.py
|
dmh43/datmo
|
e97aad4e2417a72d8f136f7afd9bfac1bf24d9f9
|
[
"Apache-2.0"
] | null | null | null |
datmo/cli/main.py
|
dmh43/datmo
|
e97aad4e2417a72d8f136f7afd9bfac1bf24d9f9
|
[
"Apache-2.0"
] | null | null | null |
datmo/cli/main.py
|
dmh43/datmo
|
e97aad4e2417a72d8f136f7afd9bfac1bf24d9f9
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
import os
import sys
from datmo.cli.command.base import BaseCommand
from datmo.cli.driver.helper import Helper
from datmo.core.util.exceptions import CLIArgumentError
from datmo.core.util.i18n import get as __
from datmo.core.util.logger import DatmoLogger
from datmo.config import Config
#from datmo.core.util.misc_functions import get_logger, create_logger
def main():
cli_helper = Helper()
# Config is required to run first so it can
# initialize/find datmo home directory (.datmo)
# This is required for logging to place the logs in a
# place for the user.
config = Config()
config.set_home(os.getcwd())
log = DatmoLogger.get_logger(__name__)
log.info("handling command %s", config.home)
# parse_args defaults to [1:] for args, but you need to
# exclude the rest of the args too, or validation will fail
# args = parser.parse_args(sys.argv[1:2])
if len(sys.argv) > 1 and \
sys.argv[1] in cli_helper.get_command_choices():
command_name = sys.argv[1]
# commands in project.py
if command_name == "init":
command_name = "project"
elif command_name == "version" or \
command_name == "--version" or \
command_name == "-v":
command_name = "project"
sys.argv[1] = "version"
elif command_name == "status":
command_name = "project"
sys.argv[1] = "status"
elif command_name == "cleanup":
command_name = "project"
sys.argv[1] = "cleanup"
# commands in workspace.py
elif command_name in ["notebook", "jupyterlab", "terminal", "rstudio"]:
sys.argv[1] = command_name
command_name = "workspace"
# commands in run.py
elif command_name == "rerun":
command_name = "run"
sys.argv[1] = "rerun"
elif command_name == "run":
if len(sys.argv) == 2:
command_name = "run"
sys.argv.append("--help")
else:
command_name = "run"
elif command_name == "stop": # stop command in run.py
if len(sys.argv) == 2:
command_name = "run"
sys.argv.append("--help")
else:
command_name = "run"
elif command_name == "ls": # ls command in run.py
command_name = "run"
elif command_name == "delete": # delete command in run.py
command_name = "run"
command_class = cli_helper.get_command_class(command_name)
elif len(sys.argv) == 1:
command_name = "datmo_command"
command_class = cli_helper.get_command_class(command_name)
else:
command_class = BaseCommand
# instantiate the command class
try:
command_instance = command_class(cli_helper)
except TypeError as ex:
cli_helper.echo(__("error", "cli.general", "%s %s" %(type(ex), ex)))
return 1
# parse the command line arguments
try:
command_instance.parse(sys.argv[1:])
except CLIArgumentError as ex:
cli_helper.echo(__("error", "cli.general", "%s %s" %(type(ex), ex)))
return 1
try:
command_instance.execute()
return 0
except Exception as ex:
cli_helper.echo(__("error", "cli.general", "%s %s" %(type(ex), ex)))
return 1
| 34.373737
| 79
| 0.594475
|
39fb027d513c79402bacc5735de3dd80dbf4f2e5
| 22,419
|
py
|
Python
|
contrib/devtools/copyright_header.py
|
jsb4ch/dash
|
1e1d8c980bf4cd9cdba7c4c769686c9ebb3bb7d4
|
[
"MIT"
] | 1
|
2020-01-04T15:37:15.000Z
|
2020-01-04T15:37:15.000Z
|
contrib/devtools/copyright_header.py
|
jsb4ch/dash
|
1e1d8c980bf4cd9cdba7c4c769686c9ebb3bb7d4
|
[
"MIT"
] | null | null | null |
contrib/devtools/copyright_header.py
|
jsb4ch/dash
|
1e1d8c980bf4cd9cdba7c4c769686c9ebb3bb7d4
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2016 The Bitcoin Core developers
# Copyright (c) 2019 The Dash Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import re
import fnmatch
import sys
import subprocess
import datetime
import os
################################################################################
# file filtering
################################################################################
EXCLUDE = [
# libsecp256k1:
'src/secp256k1/include/secp256k1.h',
'src/secp256k1/include/secp256k1_ecdh.h',
'src/secp256k1/include/secp256k1_recovery.h',
'src/secp256k1/include/secp256k1_schnorr.h',
'src/secp256k1/src/java/org_bitcoin_NativeSecp256k1.c',
'src/secp256k1/src/java/org_bitcoin_NativeSecp256k1.h',
'src/secp256k1/src/java/org_bitcoin_Secp256k1Context.c',
'src/secp256k1/src/java/org_bitcoin_Secp256k1Context.h',
# auto generated:
'src/univalue/lib/univalue_escapes.h',
'src/qt/bitcoinstrings.cpp',
'src/chainparamsseeds.h',
# other external copyrights:
'src/tinyformat.h',
'src/leveldb/util/env_win.cc',
'src/crypto/ctaes/bench.c',
'qa/rpc-tests/test_framework/bignum.py',
# python init:
'*__init__.py',
]
EXCLUDE_COMPILED = re.compile('|'.join([fnmatch.translate(m) for m in EXCLUDE]))
INCLUDE = ['*.h', '*.cpp', '*.cc', '*.c', '*.py']
INCLUDE_COMPILED = re.compile('|'.join([fnmatch.translate(m) for m in INCLUDE]))
def applies_to_file(filename):
return ((EXCLUDE_COMPILED.match(filename) is None) and
(INCLUDE_COMPILED.match(filename) is not None))
################################################################################
# obtain list of files in repo according to INCLUDE and EXCLUDE
################################################################################
GIT_LS_CMD = 'git ls-files'
def call_git_ls():
out = subprocess.check_output(GIT_LS_CMD.split(' '))
return [f for f in out.decode("utf-8").split('\n') if f != '']
def get_filenames_to_examine():
filenames = call_git_ls()
return sorted([filename for filename in filenames if
applies_to_file(filename)])
################################################################################
# define and compile regexes for the patterns we are looking for
################################################################################
COPYRIGHT_WITH_C = 'Copyright \(c\)'
COPYRIGHT_WITHOUT_C = 'Copyright'
ANY_COPYRIGHT_STYLE = '(%s|%s)' % (COPYRIGHT_WITH_C, COPYRIGHT_WITHOUT_C)
YEAR = "20[0-9][0-9]"
YEAR_RANGE = '(%s)(-%s)?' % (YEAR, YEAR)
YEAR_LIST = '(%s)(, %s)+' % (YEAR, YEAR)
ANY_YEAR_STYLE = '(%s|%s)' % (YEAR_RANGE, YEAR_LIST)
ANY_COPYRIGHT_STYLE_OR_YEAR_STYLE = ("%s %s" % (ANY_COPYRIGHT_STYLE,
ANY_YEAR_STYLE))
ANY_COPYRIGHT_COMPILED = re.compile(ANY_COPYRIGHT_STYLE_OR_YEAR_STYLE)
def compile_copyright_regex(copyright_style, year_style, name):
return re.compile('%s %s %s' % (copyright_style, year_style, name))
EXPECTED_HOLDER_NAMES = [
"Satoshi Nakamoto\n",
"The Bitcoin Core developers\n",
"The Bitcoin Core developers \n",
"Bitcoin Core Developers\n",
"the Bitcoin Core developers\n",
"The Bitcoin developers\n",
"The LevelDB Authors\. All rights reserved\.\n",
"BitPay Inc\.\n",
"BitPay, Inc\.\n",
"University of Illinois at Urbana-Champaign\.\n",
"MarcoFalke\n",
"Pieter Wuille\n",
"Pieter Wuille +\*\n",
"Pieter Wuille, Gregory Maxwell +\*\n",
"Pieter Wuille, Andrew Poelstra +\*\n",
"Andrew Poelstra +\*\n",
"Wladimir J. van der Laan\n",
"Jeff Garzik\n",
"Diederik Huys, Pieter Wuille +\*\n",
"Thomas Daede, Cory Fields +\*\n",
"Jan-Klaas Kollhof\n",
"Sam Rushing\n",
"ArtForz -- public domain half-a-node\n",
"The Kash Core developers\n",
]
DOMINANT_STYLE_COMPILED = {}
YEAR_LIST_STYLE_COMPILED = {}
WITHOUT_C_STYLE_COMPILED = {}
for holder_name in EXPECTED_HOLDER_NAMES:
DOMINANT_STYLE_COMPILED[holder_name] = (
compile_copyright_regex(COPYRIGHT_WITH_C, YEAR_RANGE, holder_name))
YEAR_LIST_STYLE_COMPILED[holder_name] = (
compile_copyright_regex(COPYRIGHT_WITH_C, YEAR_LIST, holder_name))
WITHOUT_C_STYLE_COMPILED[holder_name] = (
compile_copyright_regex(COPYRIGHT_WITHOUT_C, ANY_YEAR_STYLE,
holder_name))
################################################################################
# search file contents for copyright message of particular category
################################################################################
def get_count_of_copyrights_of_any_style_any_holder(contents):
return len(ANY_COPYRIGHT_COMPILED.findall(contents))
def file_has_dominant_style_copyright_for_holder(contents, holder_name):
match = DOMINANT_STYLE_COMPILED[holder_name].search(contents)
return match is not None
def file_has_year_list_style_copyright_for_holder(contents, holder_name):
match = YEAR_LIST_STYLE_COMPILED[holder_name].search(contents)
return match is not None
def file_has_without_c_style_copyright_for_holder(contents, holder_name):
match = WITHOUT_C_STYLE_COMPILED[holder_name].search(contents)
return match is not None
################################################################################
# get file info
################################################################################
def read_file(filename):
return open(os.path.abspath(filename), 'r').read()
def gather_file_info(filename):
info = {}
info['filename'] = filename
c = read_file(filename)
info['contents'] = c
info['all_copyrights'] = get_count_of_copyrights_of_any_style_any_holder(c)
info['classified_copyrights'] = 0
info['dominant_style'] = {}
info['year_list_style'] = {}
info['without_c_style'] = {}
for holder_name in EXPECTED_HOLDER_NAMES:
has_dominant_style = (
file_has_dominant_style_copyright_for_holder(c, holder_name))
has_year_list_style = (
file_has_year_list_style_copyright_for_holder(c, holder_name))
has_without_c_style = (
file_has_without_c_style_copyright_for_holder(c, holder_name))
info['dominant_style'][holder_name] = has_dominant_style
info['year_list_style'][holder_name] = has_year_list_style
info['without_c_style'][holder_name] = has_without_c_style
if has_dominant_style or has_year_list_style or has_without_c_style:
info['classified_copyrights'] = info['classified_copyrights'] + 1
return info
################################################################################
# report execution
################################################################################
SEPARATOR = '-'.join(['' for _ in range(80)])
def print_filenames(filenames, verbose):
if not verbose:
return
for filename in filenames:
print("\t%s" % filename)
def print_report(file_infos, verbose):
print(SEPARATOR)
examined = [i['filename'] for i in file_infos]
print("%d files examined according to INCLUDE and EXCLUDE fnmatch rules" %
len(examined))
print_filenames(examined, verbose)
print(SEPARATOR)
print('')
zero_copyrights = [i['filename'] for i in file_infos if
i['all_copyrights'] == 0]
print("%4d with zero copyrights" % len(zero_copyrights))
print_filenames(zero_copyrights, verbose)
one_copyright = [i['filename'] for i in file_infos if
i['all_copyrights'] == 1]
print("%4d with one copyright" % len(one_copyright))
print_filenames(one_copyright, verbose)
two_copyrights = [i['filename'] for i in file_infos if
i['all_copyrights'] == 2]
print("%4d with two copyrights" % len(two_copyrights))
print_filenames(two_copyrights, verbose)
three_copyrights = [i['filename'] for i in file_infos if
i['all_copyrights'] == 3]
print("%4d with three copyrights" % len(three_copyrights))
print_filenames(three_copyrights, verbose)
four_or_more_copyrights = [i['filename'] for i in file_infos if
i['all_copyrights'] >= 4]
print("%4d with four or more copyrights" % len(four_or_more_copyrights))
print_filenames(four_or_more_copyrights, verbose)
print('')
print(SEPARATOR)
print('Copyrights with dominant style:\ne.g. "Copyright (c)" and '
'"<year>" or "<startYear>-<endYear>":\n')
for holder_name in EXPECTED_HOLDER_NAMES:
dominant_style = [i['filename'] for i in file_infos if
i['dominant_style'][holder_name]]
if len(dominant_style) > 0:
print("%4d with '%s'" % (len(dominant_style),
holder_name.replace('\n', '\\n')))
print_filenames(dominant_style, verbose)
print('')
print(SEPARATOR)
print('Copyrights with year list style:\ne.g. "Copyright (c)" and '
'"<year1>, <year2>, ...":\n')
for holder_name in EXPECTED_HOLDER_NAMES:
year_list_style = [i['filename'] for i in file_infos if
i['year_list_style'][holder_name]]
if len(year_list_style) > 0:
print("%4d with '%s'" % (len(year_list_style),
holder_name.replace('\n', '\\n')))
print_filenames(year_list_style, verbose)
print('')
print(SEPARATOR)
print('Copyrights with no "(c)" style:\ne.g. "Copyright" and "<year>" or '
'"<startYear>-<endYear>":\n')
for holder_name in EXPECTED_HOLDER_NAMES:
without_c_style = [i['filename'] for i in file_infos if
i['without_c_style'][holder_name]]
if len(without_c_style) > 0:
print("%4d with '%s'" % (len(without_c_style),
holder_name.replace('\n', '\\n')))
print_filenames(without_c_style, verbose)
print('')
print(SEPARATOR)
unclassified_copyrights = [i['filename'] for i in file_infos if
i['classified_copyrights'] < i['all_copyrights']]
print("%d with unexpected copyright holder names" %
len(unclassified_copyrights))
print_filenames(unclassified_copyrights, verbose)
print(SEPARATOR)
def exec_report(base_directory, verbose):
original_cwd = os.getcwd()
os.chdir(base_directory)
filenames = get_filenames_to_examine()
file_infos = [gather_file_info(f) for f in filenames]
print_report(file_infos, verbose)
os.chdir(original_cwd)
################################################################################
# report cmd
################################################################################
REPORT_USAGE = """
Produces a report of all copyright header notices found inside the source files
of a repository.
Usage:
$ ./copyright_header.py report <base_directory> [verbose]
Arguments:
<base_directory> - The base directory of a Kash Core source code repository.
[verbose] - Includes a list of every file of each subcategory in the report.
"""
def report_cmd(argv):
if len(argv) == 2:
sys.exit(REPORT_USAGE)
base_directory = argv[2]
if not os.path.exists(base_directory):
sys.exit("*** bad <base_directory>: %s" % base_directory)
if len(argv) == 3:
verbose = False
elif argv[3] == 'verbose':
verbose = True
else:
sys.exit("*** unknown argument: %s" % argv[2])
exec_report(base_directory, verbose)
################################################################################
# query git for year of last change
################################################################################
GIT_LOG_CMD = "git log --pretty=format:%%ai %s"
def call_git_log(filename):
out = subprocess.check_output((GIT_LOG_CMD % filename).split(' '))
return out.decode("utf-8").split('\n')
def get_git_change_years(filename):
git_log_lines = call_git_log(filename)
if len(git_log_lines) == 0:
return [datetime.date.today().year]
# timestamp is in ISO 8601 format. e.g. "2016-09-05 14:25:32 -0600"
return [line.split(' ')[0].split('-')[0] for line in git_log_lines]
def get_most_recent_git_change_year(filename):
return max(get_git_change_years(filename))
################################################################################
# read and write to file
################################################################################
def read_file_lines(filename):
f = open(os.path.abspath(filename), 'r')
file_lines = f.readlines()
f.close()
return file_lines
def write_file_lines(filename, file_lines):
f = open(os.path.abspath(filename), 'w')
f.write(''.join(file_lines))
f.close()
################################################################################
# update header years execution
################################################################################
COPYRIGHT = 'Copyright \(c\)'
YEAR = "20[0-9][0-9]"
YEAR_RANGE = '(%s)(-%s)?' % (YEAR, YEAR)
HOLDER = 'The Kash Core developers'
UPDATEABLE_LINE_COMPILED = re.compile(' '.join([COPYRIGHT, YEAR_RANGE, HOLDER]))
def get_updatable_copyright_line(file_lines):
index = 0
for line in file_lines:
if UPDATEABLE_LINE_COMPILED.search(line) is not None:
return index, line
index = index + 1
return None, None
def parse_year_range(year_range):
year_split = year_range.split('-')
start_year = year_split[0]
if len(year_split) == 1:
return start_year, start_year
return start_year, year_split[1]
def year_range_to_str(start_year, end_year):
if start_year == end_year:
return start_year
return "%s-%s" % (start_year, end_year)
def create_updated_copyright_line(line, last_git_change_year):
copyright_splitter = 'Copyright (c) '
copyright_split = line.split(copyright_splitter)
# Preserve characters on line that are ahead of the start of the copyright
# notice - they are part of the comment block and vary from file-to-file.
before_copyright = copyright_split[0]
after_copyright = copyright_split[1]
space_split = after_copyright.split(' ')
year_range = space_split[0]
start_year, end_year = parse_year_range(year_range)
if end_year == last_git_change_year:
return line
return (before_copyright + copyright_splitter +
year_range_to_str(start_year, last_git_change_year) + ' ' +
' '.join(space_split[1:]))
def update_updatable_copyright(filename):
file_lines = read_file_lines(filename)
index, line = get_updatable_copyright_line(file_lines)
if not line:
print_file_action_message(filename, "No updatable copyright.")
return
last_git_change_year = get_most_recent_git_change_year(filename)
new_line = create_updated_copyright_line(line, last_git_change_year)
if line == new_line:
print_file_action_message(filename, "Copyright up-to-date.")
return
file_lines[index] = new_line
write_file_lines(filename, file_lines)
print_file_action_message(filename,
"Copyright updated! -> %s" % last_git_change_year)
def exec_update_header_year(base_directory):
original_cwd = os.getcwd()
os.chdir(base_directory)
for filename in get_filenames_to_examine():
update_updatable_copyright(filename)
os.chdir(original_cwd)
################################################################################
# update cmd
################################################################################
UPDATE_USAGE = """
Updates all the copyright headers of "The Dash Core developers" which were
changed in a year more recent than is listed. For example:
// Copyright (c) <firstYear>-<lastYear> The Dash Core developers
will be updated to:
// Copyright (c) <firstYear>-<lastModifiedYear> The Dash Core developers
where <lastModifiedYear> is obtained from the 'git log' history.
This subcommand also handles copyright headers that have only a single year. In those cases:
// Copyright (c) <year> The Dash Core developers
will be updated to:
// Copyright (c) <year>-<lastModifiedYear> The Dash Core developers
where the update is appropriate.
Usage:
$ ./copyright_header.py update <base_directory>
Arguments:
<base_directory> - The base directory of Kash Core source code repository.
"""
def print_file_action_message(filename, action):
print("%-52s %s" % (filename, action))
def update_cmd(argv):
if len(argv) != 3:
sys.exit(UPDATE_USAGE)
base_directory = argv[2]
if not os.path.exists(base_directory):
sys.exit("*** bad base_directory: %s" % base_directory)
exec_update_header_year(base_directory)
################################################################################
# inserted copyright header format
################################################################################
def get_header_lines(header, start_year, end_year):
lines = header.split('\n')[1:-1]
lines[0] = lines[0] % year_range_to_str(start_year, end_year)
return [line + '\n' for line in lines]
CPP_HEADER = '''
// Copyright (c) %s The Dash Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
def get_cpp_header_lines_to_insert(start_year, end_year):
return reversed(get_header_lines(CPP_HEADER, start_year, end_year))
PYTHON_HEADER = '''
# Copyright (c) %s The Dash Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
def get_python_header_lines_to_insert(start_year, end_year):
return reversed(get_header_lines(PYTHON_HEADER, start_year, end_year))
################################################################################
# query git for year of last change
################################################################################
def get_git_change_year_range(filename):
years = get_git_change_years(filename)
return min(years), max(years)
################################################################################
# check for existing core copyright
################################################################################
def file_already_has_core_copyright(file_lines):
index, _ = get_updatable_copyright_line(file_lines)
return index != None
################################################################################
# insert header execution
################################################################################
def file_has_hashbang(file_lines):
if len(file_lines) < 1:
return False
if len(file_lines[0]) <= 2:
return False
return file_lines[0][:2] == '#!'
def insert_python_header(filename, file_lines, start_year, end_year):
if file_has_hashbang(file_lines):
insert_idx = 1
else:
insert_idx = 0
header_lines = get_python_header_lines_to_insert(start_year, end_year)
for line in header_lines:
file_lines.insert(insert_idx, line)
write_file_lines(filename, file_lines)
def insert_cpp_header(filename, file_lines, start_year, end_year):
header_lines = get_cpp_header_lines_to_insert(start_year, end_year)
for line in header_lines:
file_lines.insert(0, line)
write_file_lines(filename, file_lines)
def exec_insert_header(filename, style):
file_lines = read_file_lines(filename)
if file_already_has_core_copyright(file_lines):
sys.exit('*** %s already has a copyright by The Dash Core developers'
% (filename))
start_year, end_year = get_git_change_year_range(filename)
if style == 'python':
insert_python_header(filename, file_lines, start_year, end_year)
else:
insert_cpp_header(filename, file_lines, start_year, end_year)
################################################################################
# insert cmd
################################################################################
INSERT_USAGE = """
Inserts a copyright header for "The Dash Core developers" at the top of the
file in either Python or C++ style as determined by the file extension. If the
file is a Python file and it has a '#!' starting the first line, the header is
inserted in the line below it.
The copyright dates will be set to be:
"<year_introduced>-<current_year>"
where <year_introduced> is according to the 'git log' history. If
<year_introduced> is equal to <current_year>, the date will be set to be:
"<current_year>"
If the file already has a copyright for "The Dash Core developers", the
script will exit.
Usage:
$ ./copyright_header.py insert <file>
Arguments:
<file> - A source file in the Kash Core repository.
"""
def insert_cmd(argv):
if len(argv) != 3:
sys.exit(INSERT_USAGE)
filename = argv[2]
if not os.path.isfile(filename):
sys.exit("*** bad filename: %s" % filename)
_, extension = os.path.splitext(filename)
if extension not in ['.h', '.cpp', '.cc', '.c', '.py']:
sys.exit("*** cannot insert for file extension %s" % extension)
if extension == '.py':
style = 'python'
else:
style = 'cpp'
exec_insert_header(filename, style)
################################################################################
# UI
################################################################################
USAGE = """
copyright_header.py - utilities for managing copyright headers of 'The Dash
Core developers' in repository source files.
Usage:
$ ./copyright_header <subcommand>
Subcommands:
report
update
insert
To see subcommand usage, run them without arguments.
"""
SUBCOMMANDS = ['report', 'update', 'insert']
if __name__ == "__main__":
if len(sys.argv) == 1:
sys.exit(USAGE)
subcommand = sys.argv[1]
if subcommand not in SUBCOMMANDS:
sys.exit(USAGE)
if subcommand == 'report':
report_cmd(sys.argv)
elif subcommand == 'update':
update_cmd(sys.argv)
elif subcommand == 'insert':
insert_cmd(sys.argv)
| 36.572594
| 92
| 0.600116
|
a36f6d2f766e4bb2fff54a1a843496a874256460
| 6,357
|
py
|
Python
|
plugins/trochoidal.py
|
linuxnico/bCNC
|
b7fc6380cff6d8baea6886a46f4f60615e063b2b
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
plugins/trochoidal.py
|
linuxnico/bCNC
|
b7fc6380cff6d8baea6886a46f4f60615e063b2b
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
plugins/trochoidal.py
|
linuxnico/bCNC
|
b7fc6380cff6d8baea6886a46f4f60615e063b2b
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/python
# -*- coding: ascii -*-
# Author: @harvie Tomas Mudrunka
# Date: 7 july 2018
__author__ = "@harvie Tomas Mudrunka"
#__email__ = ""
__name__ = _("Trochoidal")
__version__ = "0.0.2"
import math
import os.path
import re
from CNC import CNC,Block
from ToolsPage import Plugin
from math import pi, sqrt, sin, cos, asin, acos, atan2, hypot, degrees, radians, copysign, fmod
class Tool(Plugin):
__doc__ = _("""Trochoidal g-code postprocessor""") #<<< This comment will be show as tooltip for the ribbon button
def __init__(self, master):
Plugin.__init__(self, master,"Trochoidal")
#Helical_Descent: is the name of the plugin show in the tool ribbon button
self.icon = "helical" #<<< This is the name of png file used as icon for the ribbon button. It will be search in the "icons" subfolder
self.group = "CAM" #<<< This is the name of group that plugin belongs
#Here we are creating the widgets presented to the user inside the plugin
#Name, Type , Default value, Description
self.variables = [ #<<< Define a list of components for the GUI
("name" , "db" , "", _("Name")), #used to store plugin settings in the internal database
("cw" , "bool" , True, _("Clockwise")),
("circ" , "bool" , False, _("Circular")),
("evenspacing" , "bool" , True, _("Even spacing across segment")),
("entry" , "bool" , False, _("Trochoid entry (prepare for helicut)")),
("rdoc" , "mm" , "0.2", _("Radial depth of cut (<= cutter D * 0.4)")),
("dia" , "mm" , "3", _("Trochoid diameter (<= cutter D)")),
("feed" , "mm" , "2000", _("Feedrate"))
]
self.buttons.append("exe") #<<< This is the button added at bottom to call the execute method below
# ----------------------------------------------------------------------
# This method is executed when user presses the plugin execute button
# ----------------------------------------------------------------------
def execute(self, app):
feed = self["feed"]
rdoc = self["rdoc"]
radius = self["dia"]/2
cw = self["cw"]
circ = self["circ"]
evenspacing = self["evenspacing"]
if cw: cwtext = 'cw'
else: cwtext = 'ccw'
if cw: arcg = 'g2'
else: arcg = 'g3'
#print("go!")
blocks = []
for bid in app.editor.getSelectedBlocks():
#print(blocks[bid])
path = app.gcode.toPath(bid)[0]
#print(path)
block = Block("trochoid "+cwtext+" "+str(radius*2)+"+"+str(rdoc))
block.append("F"+str(feed))
entry = self["entry"]
A=path[0].A
block.append("g0 x"+str(A[0])+" y"+str(A[1]))
block.append("G1 Z0")
for segment in path:
#print(segment.A)
#block.append("g0 x0 y0")
#block.append("g1 x10 y10")
#block.append("g1 x20 y10")
#block.append("g0 x0 y0")
if entry:
eblock = Block("trochoid-in")
eblock.append("G0 Z0")
eblock.append("G0 x"+str(segment.A[0])+" y"+str(segment.A[1]-radius))
eblock.append("G2 x"+str(segment.A[0])+" y"+str(segment.A[1]-radius)+" i"+str(0)+" j"+str(radius))
blocks.append(eblock)
entry = False
#Continuity BEGINING
block.append("g1 x"+str(segment.A[0])+" y"+str(segment.A[1]))
#block.append(arcg+" x"+str(segment.A[0])+" y"+str(segment.A[1])+" r"+str(radius/2))
phi = atan2(segment.B[1]-segment.A[1], segment.B[0]-segment.A[0])
#TODO: handle arc segments
#if segment.type == Segment.LINE:
#if segment.type in (Segment.CW, Segment.CCW):
#Compensate for uneven spacing
srdoc = rdoc
if evenspacing:
subsegs = segment.length()//rdoc
remainder = segment.length()%rdoc
if remainder != 0:
srdoc = segment.length()/(subsegs+1)
#Loop over subsegmnents of segment
i=0
while i<(segment.length()+srdoc):
pos=min(segment.length(), i)
B = segment.distPoint(pos)
block.extend(self.trochoid(A,B,radius,cw,circ))
A = B
i+=srdoc
#Continuity END
#block.append("g1 x"+str(segment.B[0])+" y"+str(segment.B[1]))
block.append(arcg+" x"+str(segment.B[0])+" y"+str(segment.B[1])+" r"+str(radius/2))
blocks.append(block)
active = app.activeBlock()
app.gcode.insBlocks(active, blocks, "Trochoidal created") #<<< insert blocks over active block in the editor
app.refresh() #<<< refresh editor
app.setStatus(_("Generated: Trochoidal")) #<<< feed back result
#app.gcode.blocks.append(block)
#Convert polar to cartesian and add that to existing vector
def pol2car(self, r, phi, a=[0,0]):
return [round(a[0]+r*cos(phi),4),round(a[1]+r*sin(phi),4)]
#Generate single trochoidal element between two points
def trochoid(self, A, B, radius, cw=True, circular=False):
block = []
if cw:
u = 1
arc = "G2"
else:
u = -1
arc = "G3"
phi = atan2(B[1]-A[1], B[0]-A[0])
step = sqrt((A[0]-B[0])**2+(A[1]-B[1])**2)
l = self.pol2car(radius, phi+radians(90*u))
r = self.pol2car(radius, phi+radians(-90*u))
al = self.pol2car(radius, phi+radians(90*u), A)
ar = self.pol2car(radius, phi+radians(-90*u), A)
bl = self.pol2car(radius, phi+radians(90*u), B)
br = self.pol2car(radius, phi+radians(-90*u), B)
# This schematic drawing represents naming convention
# of points and vectors calculated in previous block
#
# <--L---
# ---R-->
#
# * *
# * *
# * *
# BL B BR
# * *
# * ^ *
# * | *
# * | *
# * *
# AL A AR
# * *
# * *
# * *
#TODO: improve strategies
if circular:
block.append("g1 x"+str(al[0])+" y"+str(al[1]))
block.append("g1 x"+str(bl[0])+" y"+str(bl[1]))
block.append(arc+" x"+str(bl[0])+" y"+str(bl[1])+" i"+str(r[0])+" j"+str(r[1]))
else:
#block.append(arc+" x"+str(al[0])+" y"+str(al[1])+" r"+str(radius/2))
block.append("g1 x"+str(al[0])+" y"+str(al[1]))
block.append("g1 x"+str(bl[0])+" y"+str(bl[1]))
block.append(arc+" x"+str(br[0])+" y"+str(br[1])+" i"+str(r[0])+" j"+str(r[1]))
block.append("g1 x"+str(ar[0])+" y"+str(ar[1]))
block.append(arc+" x"+str(al[0])+" y"+str(al[1])+" i"+str(l[0])+" j"+str(l[1]))
block.append("g1 x"+str(bl[0])+" y"+str(bl[1]))
return block
| 33.109375
| 138
| 0.560327
|
051931bdd920d79b639cd344dbb73660283db68d
| 738
|
py
|
Python
|
src/websocket/list_users.py
|
rafaeldarocha/serverless-websocket
|
f50bdd4a86bef41c4dd5e33f54979caaf3b534dc
|
[
"Apache-2.0"
] | 18
|
2020-11-30T14:05:41.000Z
|
2021-08-17T00:52:04.000Z
|
src/websocket/list_users.py
|
rafaeldarocha/serverless-websocket
|
f50bdd4a86bef41c4dd5e33f54979caaf3b534dc
|
[
"Apache-2.0"
] | 1
|
2020-12-22T21:17:02.000Z
|
2020-12-22T21:17:02.000Z
|
src/websocket/list_users.py
|
rafaeldarocha/serverless-websocket
|
f50bdd4a86bef41c4dd5e33f54979caaf3b534dc
|
[
"Apache-2.0"
] | 4
|
2020-12-08T20:43:21.000Z
|
2022-01-08T11:15:24.000Z
|
import json
import boto3
import os
from lib.scan_table import scan
def handler(event, context):
# Get the connection Id from the user
connectionId=event['requestContext'].get('connectionId')
# Get the list of users connected
connections_list = scan(os.getenv('TABLE_NAME'))
# Filter the users connected, removing the requester
connections_filtered = list(filter(lambda item: (item['connectionId'] != connectionId), connections_list))
users_list = list(map(lambda item: ({'userName': item['userName'], 'connectionId': item['connectionId']}), connections_filtered))
output = {'action': 'list', 'users': users_list}
# Returns the users connected
return {
'statusCode': 200,
'body': json.dumps(output)
}
| 29.52
| 131
| 0.720867
|
3eca2a98d37ce99d0e1a5675945edbfd5b8fa81f
| 4,146
|
py
|
Python
|
roboticstoolbox/models/list.py
|
Russ76/robotics-toolbox-python
|
4b3e82a6522757ffde1f83aef8d05b3ad475e9de
|
[
"MIT"
] | null | null | null |
roboticstoolbox/models/list.py
|
Russ76/robotics-toolbox-python
|
4b3e82a6522757ffde1f83aef8d05b3ad475e9de
|
[
"MIT"
] | null | null | null |
roboticstoolbox/models/list.py
|
Russ76/robotics-toolbox-python
|
4b3e82a6522757ffde1f83aef8d05b3ad475e9de
|
[
"MIT"
] | null | null | null |
from typing import Type
from roboticstoolbox.robot.Robot import Robot
from roboticstoolbox.tools import rtb_get_param
from roboticstoolbox.robot.ERobot import ERobot2
from ansitable import ANSITable, Column
import inspect
# import importlib
def list(keywords=None, dof=None, type=None, border="thin"):
"""
Display all robot models in summary form
:param keywords: keywords to filter on, defaults to None
:type keywords: tuple of str, optional
:param dof: number of DoF to filter on, defaults to None
:type dof: int, optional
:param type: model type "DH", "ETS", "URDF", defaults to all types
:type type: str, optional
- ``list()`` displays a list of all models provided by the Toolbox. It
lists the name, manufacturer, model type, number of DoF, and keywords.
- ``list(type=MT)`` as above, but only displays models of type ``MT``
where ``MT`` is one of "DH", "ETS" or "URDF".
- ``list(keywords=KW)`` as above, but only displays models that have a
keyword in the tuple ``KW``.
- ``list(dof=N)`` as above, but only display models that have ``N``
degrees of freedom.
The filters can be combined
- ``list(keywords=KW, dof=N)`` are those models that have a keyword in
``KW`` and have ``N`` degrees of freedom.
"""
import roboticstoolbox.models as models
# module = importlib.import_module(
# '.' + os.path.splitext(file)[0], package='bdsim.blocks')
unicode = rtb_get_param("unicode")
if not unicode:
border = "ascii"
def make_table(border=None):
table = ANSITable(
Column("class", headalign="^", colalign="<"),
Column("name", headalign="^", colalign="<"),
Column("manufacturer", headalign="^", colalign="<"),
Column("type", headalign="^", colalign="<"),
Column("DoF", colalign="<"),
Column("dims", colalign="<"),
Column("structure", colalign="<"),
Column("dynamics", colalign="<"),
Column("geometry", colalign="<"),
Column("keywords", headalign="^", colalign="<"),
border=border,
)
if type is not None:
categories = [type]
else:
categories = ["DH", "URDF", "ETS"]
for category in categories:
# get all classes in this category
group = models.__dict__[category]
for cls in group.__dict__.values():
if inspect.isclass(cls) and issubclass(cls, Robot):
# we found a BaseRobot subclass, instantiate it
try:
robot = cls()
except TypeError:
print(f"failed to load {cls}")
try:
structure = robot.structure
except Exception: # pragma nocover
structure = ""
# apply filters
if keywords is not None:
if len(set(keywords) & set(robot.keywords)) == 0:
continue
if dof is not None and robot.n != dof:
continue # pragma nocover
dims = 0
if isinstance(robot, ERobot2):
dims = 2
else:
dims = 3
# add the row
table.row(
cls.__name__,
robot.name,
robot.manufacturer,
category,
robot.n,
f"{dims}d",
structure,
"Y" if robot._hasdynamics else "",
"Y" if robot._hasgeometry else "",
", ".join(robot.keywords),
)
table.print()
make_table(border=border)
if __name__ == "__main__": # pragma nocover
list(border='ascii')
list(keywords=("dynamics",), border='thin')
list(dof=6)
list(keywords=("dynamics",), dof=6)
| 34.840336
| 76
| 0.514713
|
33d7652f622e576bac098b637b43ff4815e5fbf4
| 239
|
py
|
Python
|
generate.py
|
orwell-int/proxy-robots-python
|
76e23eff0e3b2c55b4b865533a9db7e49e444f20
|
[
"BSD-3-Clause"
] | null | null | null |
generate.py
|
orwell-int/proxy-robots-python
|
76e23eff0e3b2c55b4b865533a9db7e49e444f20
|
[
"BSD-3-Clause"
] | 3
|
2020-08-13T17:46:07.000Z
|
2020-10-23T18:52:08.000Z
|
generate.py
|
orwell-int/proxy-robots-python
|
76e23eff0e3b2c55b4b865533a9db7e49e444f20
|
[
"BSD-3-Clause"
] | null | null | null |
from pathlib import Path
import importlib.util
spec = importlib.util.spec_from_file_location("messages", Path(".") / "messages" / "generate.py")
foo = importlib.util.module_from_spec(spec)
spec.loader.exec_module(foo)
foo.main(Path("."))
| 29.875
| 97
| 0.757322
|
22432d1639accd93b6ce4201a5b2d31cf5072583
| 4,875
|
py
|
Python
|
Tools/scripts/pathfix.py
|
Hadron/python
|
73137f499ed658169f49273eee46845e3b53e800
|
[
"PSF-2.0"
] | 486
|
2016-05-28T18:51:54.000Z
|
2022-03-20T17:30:31.000Z
|
Tools/scripts/pathfix.py
|
Hadron/python
|
73137f499ed658169f49273eee46845e3b53e800
|
[
"PSF-2.0"
] | 40
|
2016-05-29T00:24:56.000Z
|
2020-07-13T11:56:58.000Z
|
Tools/scripts/pathfix.py
|
Hadron/python
|
73137f499ed658169f49273eee46845e3b53e800
|
[
"PSF-2.0"
] | 74
|
2015-05-29T17:18:53.000Z
|
2022-01-15T14:06:44.000Z
|
#!/usr/bin/env python3
# Change the #! line occurring in Python scripts. The new interpreter
# pathname must be given with a -i option.
#
# Command line arguments are files or directories to be processed.
# Directories are searched recursively for files whose name looks
# like a python module.
# Symbolic links are always ignored (except as explicit directory
# arguments). Of course, the original file is kept as a back-up
# (with a "~" attached to its name).
#
# Undoubtedly you can do this using find and sed or perl, but this is
# a nice example of Python code that recurses down a directory tree
# and uses regular expressions. Also note several subtleties like
# preserving the file's mode and avoiding to even write a temp file
# when no changes are needed for a file.
#
# NB: by changing only the function fixfile() you can turn this
# into a program for a different change to Python programs...
import sys
import re
import os
from stat import *
import getopt
err = sys.stderr.write
dbg = err
rep = sys.stdout.write
new_interpreter = None
preserve_timestamps = False
def main():
global new_interpreter
global preserve_timestamps
usage = ('usage: %s -i /interpreter -p file-or-directory ...\n' %
sys.argv[0])
try:
opts, args = getopt.getopt(sys.argv[1:], 'i:p')
except getopt.error as msg:
err(str(msg) + '\n')
err(usage)
sys.exit(2)
for o, a in opts:
if o == '-i':
new_interpreter = a.encode()
if o == '-p':
preserve_timestamps = True
if not new_interpreter or not new_interpreter.startswith(b'/') or \
not args:
err('-i option or file-or-directory missing\n')
err(usage)
sys.exit(2)
bad = 0
for arg in args:
if os.path.isdir(arg):
if recursedown(arg): bad = 1
elif os.path.islink(arg):
err(arg + ': will not process symbolic links\n')
bad = 1
else:
if fix(arg): bad = 1
sys.exit(bad)
ispythonprog = re.compile('^[a-zA-Z0-9_]+\.py$')
def ispython(name):
return bool(ispythonprog.match(name))
def recursedown(dirname):
dbg('recursedown(%r)\n' % (dirname,))
bad = 0
try:
names = os.listdir(dirname)
except OSError as msg:
err('%s: cannot list directory: %r\n' % (dirname, msg))
return 1
names.sort()
subdirs = []
for name in names:
if name in (os.curdir, os.pardir): continue
fullname = os.path.join(dirname, name)
if os.path.islink(fullname): pass
elif os.path.isdir(fullname):
subdirs.append(fullname)
elif ispython(name):
if fix(fullname): bad = 1
for fullname in subdirs:
if recursedown(fullname): bad = 1
return bad
def fix(filename):
## dbg('fix(%r)\n' % (filename,))
try:
f = open(filename, 'rb')
except IOError as msg:
err('%s: cannot open: %r\n' % (filename, msg))
return 1
line = f.readline()
fixed = fixline(line)
if line == fixed:
rep(filename+': no change\n')
f.close()
return
head, tail = os.path.split(filename)
tempname = os.path.join(head, '@' + tail)
try:
g = open(tempname, 'wb')
except IOError as msg:
f.close()
err('%s: cannot create: %r\n' % (tempname, msg))
return 1
rep(filename + ': updating\n')
g.write(fixed)
BUFSIZE = 8*1024
while 1:
buf = f.read(BUFSIZE)
if not buf: break
g.write(buf)
g.close()
f.close()
# Finishing touch -- move files
mtime = None
atime = None
# First copy the file's mode to the temp file
try:
statbuf = os.stat(filename)
mtime = statbuf.st_mtime
atime = statbuf.st_atime
os.chmod(tempname, statbuf[ST_MODE] & 0o7777)
except OSError as msg:
err('%s: warning: chmod failed (%r)\n' % (tempname, msg))
# Then make a backup of the original file as filename~
try:
os.rename(filename, filename + '~')
except OSError as msg:
err('%s: warning: backup failed (%r)\n' % (filename, msg))
# Now move the temp file to the original file
try:
os.rename(tempname, filename)
except OSError as msg:
err('%s: rename failed (%r)\n' % (filename, msg))
return 1
if preserve_timestamps:
if atime and mtime:
try:
os.utime(filename, (atime, mtime))
except OSError as msg:
err('%s: reset of timestamp failed (%r)\n' % (filename, msg))
return 1
# Return success
return 0
def fixline(line):
if not line.startswith(b'#!'):
return line
if b"python" not in line:
return line
return b'#! ' + new_interpreter + b'\n'
if __name__ == '__main__':
main()
| 29.36747
| 77
| 0.595692
|
24a26d6fd774d8a39eeb5bd4d65eae810847b9bb
| 466
|
py
|
Python
|
tests/unit/conftest.py
|
knowsuchagency/composer
|
b422ed4048b4d421e5100ea1770cbed37c4fb158
|
[
"MIT"
] | 37
|
2021-05-24T22:34:59.000Z
|
2022-02-22T04:47:06.000Z
|
tests/unit/conftest.py
|
knowsuchagency/composer
|
b422ed4048b4d421e5100ea1770cbed37c4fb158
|
[
"MIT"
] | 21
|
2021-05-26T09:14:05.000Z
|
2021-06-15T08:08:55.000Z
|
tests/unit/conftest.py
|
knowsuchagency/composer
|
b422ed4048b4d421e5100ea1770cbed37c4fb158
|
[
"MIT"
] | 2
|
2021-06-22T09:51:39.000Z
|
2022-01-28T20:00:30.000Z
|
import pytest
from examples.hello_orkestra import Item
from orkestra import generic_context as _generic_context
@pytest.fixture(autouse=True)
def disable_powertools(monkeypatch):
monkeypatch.setenv("POWERTOOLS_TRACE_DISABLED", "1")
monkeypatch.setenv("POWERTOOLS_LOG_DEDUPLICATION_DISABLED", "1")
@pytest.fixture
def generic_event():
return Item.random().dict()
@pytest.fixture(scope="session")
def generic_context():
return _generic_context
| 21.181818
| 68
| 0.787554
|
34fd17fbd8910f31b920966a33d6a8f75f4d3a62
| 794
|
py
|
Python
|
chrome/common/extensions/docs/server2/patcher.py
|
pozdnyakov/chromium-crosswalk
|
0fb25c7278bf1d93e53a3b0bcb75aa8b99d4b26e
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 2
|
2019-02-03T05:19:48.000Z
|
2021-11-15T15:07:21.000Z
|
chrome/common/extensions/docs/server2/patcher.py
|
pozdnyakov/chromium-crosswalk
|
0fb25c7278bf1d93e53a3b0bcb75aa8b99d4b26e
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null |
chrome/common/extensions/docs/server2/patcher.py
|
pozdnyakov/chromium-crosswalk
|
0fb25c7278bf1d93e53a3b0bcb75aa8b99d4b26e
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null |
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
class Patcher(object):
def GetPatchedFiles(self, version=None):
''' Returns patched files as(added_files, deleted_files, modified_files)
from the patchset specified by |version|.
'''
raise NotImplementedError()
def GetVersion(self):
''' Returns patch version. Returns None when nothing is patched by the
patcher.
'''
raise NotImplementedError()
def Apply(self, paths, file_system, binary, version=None):
''' Apply the patch to added/modified files. Returns Future with patched
data. Throws FileNotFoundError if |paths| contains deleted files.
'''
raise NotImplementedError()
| 34.521739
| 76
| 0.724181
|
f7f74eef92c28adef66c4f1ec70ea2a5ac127046
| 20,940
|
py
|
Python
|
src/ADDA/Networks.py
|
fol21/domain-adaptation-in-deforestation
|
ae1c37b1634f54230f1d2217c209dabd6780568a
|
[
"MIT"
] | null | null | null |
src/ADDA/Networks.py
|
fol21/domain-adaptation-in-deforestation
|
ae1c37b1634f54230f1d2217c209dabd6780568a
|
[
"MIT"
] | null | null | null |
src/ADDA/Networks.py
|
fol21/domain-adaptation-in-deforestation
|
ae1c37b1634f54230f1d2217c209dabd6780568a
|
[
"MIT"
] | null | null | null |
import os
import numpy as np
import tensorflow as tf
class Networks():
def __init__(self, args):
super(Networks, self).__init__()
self.args = args
# Wittich design
def VNET_16L(self, I, is_train, reuse_unet=False, reuse_ada=False, adaption_net=False):
def encoder_conf(name, X, filter, f_size, scale, norm, reuse, is_train, dropout=0.0, stddev=-1.0, slope=0.00,
use_bias=True):
with tf.variable_scope(name) as scope:
if scale > 1:
X = self.conv(name + '_downsample', X, filter, scale, scale, (not norm) and use_bias, "VALID", stddev)
else:
X = self.conv(name + '_conf', X, filter, f_size, 1, (not norm) and use_bias, "VALID", stddev)
if norm == 'I':
X = tf.contrib.layers.instance_norm(X, scope=scope, reuse=reuse)
elif norm == 'B':
X = tf.layers.batch_normalization(X, reuse=reuse, training=is_train, name=name)
elif norm == 'G':
X = tf.contrib.layers.group_norm(X, groups=16, scope=scope, reuse=reuse)
if dropout > 0.0:
X = tf.layers.dropout(X, dropout, training=is_train)
if slope < 1.0:
X = tf.nn.leaky_relu(X, slope) if slope > 0.0 else tf.nn.relu(X)
return X
def decoder_conf(name, X, filter, f_size, scale, norm, reuse, is_train, dropout=0.0, stddev=-1.0, slope=0.00,
use_bias=True):
with tf.variable_scope(name) as scope:
if scale > 1:
X = self.t_conv(name + '_upsample', X, filter, scale, scale, (not norm) and use_bias, "VALID", stddev)
else:
X = self.t_conv(name + '_deconf', X, filter, f_size, 1, (not norm) and use_bias, "VALID", stddev)
if norm == 'I':
X = tf.contrib.layers.instance_norm(X, scope=scope, reuse=reuse)
elif norm == 'B':
X = tf.layers.batch_normalization(X, reuse=reuse, training=is_train, name=name)
elif norm == 'G':
X = tf.contrib.layers.group_norm(X, groups=16, scope=scope, reuse=reuse)
if dropout > 0.0:
X = tf.layers.dropout(X, dropout, training=is_train)
if slope < 1.0:
X = tf.nn.leaky_relu(X, slope) if slope > 0.0 else tf.nn.relu(X)
return X
F = 3
norm = self.args.norm
# print('norm', norm)
# print('skip cons', self.args.skip_connections)
# print('VNET In:', I.get_shape().as_list())
if adaption_net:
# print('ada scope T/R', is_train, reuse_ada)
encoderscope = 'ada_enc'
decoderscope = 'ada_dec'
reuse_encoder = reuse_ada
reuse_decoder = reuse_ada
else:
# print('vnet scope T/R', is_train, reuse_unet)
encoderscope = 'unet_enc'
decoderscope = 'unet_dec'
reuse_encoder = reuse_unet
reuse_decoder = reuse_unet
print([encoderscope, ' ', decoderscope])
# ===============================================================================ENCODER
with tf.variable_scope(encoderscope) as scope:
if reuse_encoder: scope.reuse_variables()
with tf.variable_scope('color_encoder'):
X = encoder_conf('eI', I[:, :, :, :-1], 96, 5, 1, norm, reuse_encoder, is_train, self.args.dropout) # 128 > 124
X0 = encoder_conf('d0', X, 96, 2, 2, norm, reuse_encoder, is_train, self.args.dropout) # 124 > 62 @2
X = encoder_conf('e1', X0, 128, 3, 1, norm, reuse_encoder, is_train, self.args.dropout) # 62 > 60
X_EARLY = X
X1 = encoder_conf('d1', X, 128, 2, 2, norm, reuse_encoder, is_train, self.args.dropout) # 60 > 30 @4
X = encoder_conf('e2', X1, 256, 3, 1, norm, reuse_encoder, is_train, self.args.dropout) # 30 > 28
X2 = encoder_conf('d2', X, 256, 2, 2, norm, reuse_encoder, is_train, self.args.dropout) # 28 > 14 @8
X = encoder_conf('e3', X2, 512, 3, 1, norm, reuse_encoder, is_train, self.args.dropout) # 14 > 12
X_MIDDLE = X
# ===============================================================================DECODER
with tf.variable_scope(decoderscope) as scope:
if reuse_decoder: scope.reuse_variables()
# print('vnet scope', is_train, reuse_unet)
# print('VNET Latent:', X.get_shape().as_list())
with tf.variable_scope('decoder'):
X = decoder_conf('d3', X, 512, F, 1, norm, reuse_decoder, is_train, self.args.dropout) # 12 > 14
if self.args.skip_connections: X = tf.concat((X, X2), axis=-1)
X = decoder_conf('u4', X, 256, F, 2, norm, reuse_decoder, is_train, self.args.dropout) # 14 > 28
X = decoder_conf('d4', X, 256, F, 1, norm, reuse_decoder, is_train, self.args.dropout) # 28 > 30
if self.args.skip_connections: X = tf.concat((X, X1), axis=-1)
X = decoder_conf('u5', X, 128, F, 2, norm, reuse_decoder, is_train, self.args.dropout) # 30 > 60
X_LATE = X
X = decoder_conf('d5', X, 128, F, 1, norm, reuse_decoder, is_train, self.args.dropout) # 60 > 62
if self.args.skip_connections: X = tf.concat((X, X0), axis=-1)
X = decoder_conf('u6', X, 64, F, 2, norm, reuse_decoder, is_train, self.args.dropout) # 62 > 124
X = decoder_conf('d6', X, 64, 5, 1, norm, reuse_decoder, is_train, self.args.dropout) # 124 > 128
X = decoder_conf('out', X, self.args.num_classes, 1, 1, '', reuse_decoder, is_train, slope=1.0, stddev=0.02,
use_bias=False)
prediction = tf.nn.softmax(X, name = 'softmax')
# ============================================================================OUT
# print('VNET Out:', X.get_shape().as_list())
# if self.args.mode == 'adapt':
return X, X_EARLY, X_MIDDLE, X_LATE, prediction
# else:
# return X, prediction
def D_4(self, X, reuse):
def discrim_conv(name, X, out_channels, filtersize, stride=1, norm='', nonlin=True, init_stddev=-1):
with tf.variable_scope(name) as scope:
if init_stddev <= 0.0:
init = tf.contrib.layers.variance_scaling_initializer(dtype=tf.float32)
else:
init = tf.truncated_normal_initializer(stddev=init_stddev)
X = tf.layers.conv2d(X, out_channels, kernel_size=filtersize, strides=(stride, stride), padding="valid",
kernel_initializer=init)
if norm == 'I':
X = tf.contrib.layers.instance_norm(X, scope=scope, reuse=reuse, epsilon=0.001)
elif norm == 'B':
X = tf.layers.batch_normalization(X, reuse=reuse, training=True)
elif norm == 'G':
X = tf.contrib.layers.group_norm(X, groups=16, scope=scope, reuse=reuse)
if nonlin:
X = tf.nn.leaky_relu(X, 0.2)
return X
with tf.variable_scope('discriminator') as scope:
if reuse:
scope.reuse_variables()
print('D in:', X.get_shape().as_list())
X = self.conv('DZ1', X, 512, 1, 1)
X = tf.nn.leaky_relu(X, 0.2)
X = self.conv('DZ2', X, 512, 1, 1)
X = tf.nn.leaky_relu(X, 0.2)
X = self.conv('DZ3', X, 512, 1, 1)
X = tf.nn.leaky_relu(X, 0.2)
X = self.conv('DZ4', X, 512, 1, 1)
X = tf.nn.leaky_relu(X, 0.2)
X = discrim_conv('d_out', X, 1, 1, norm=False, nonlin=False, init_stddev=0.02)
print('D out:', X.get_shape().as_list())
return X
def atrous_discriminator(self, X, reuse):
def atrous_convs(net, scope, rate=None, depth=256, reuse=None):
"""
ASPP layer 1×1 convolution and three 3×3 atrous convolutions
"""
with tf.variable_scope(scope, reuse=reuse):
pyram_1x1_0 = self.conv('_1x1', net, depth, size=1, stride=1, padding="SAME")
pyram_3x3_1 = self.conv('_3x3', net, depth, size=3, stride=1, padding="SAME")
pyram_3x3_2 = self.conv('_atr_3x3_1', net, depth, size=3, stride=1, padding="SAME", dilation=rate[0])
pyram_3x3_3 = self.conv('_atr_3x3_2', net, depth, size=3, stride=1, padding="SAME", dilation=rate[1])
# pyram_3x3_4 = self.z_conv('_atr_3x3_3', net, depth/2, size=3, stride=1, padding="SAME", dilation=rate[2])
net = tf.concat((pyram_1x1_0, pyram_3x3_1, pyram_3x3_2, pyram_3x3_3), axis=3, name="concat")
net = self.conv('_1x1_output', net, depth, size=1, stride=1, padding="SAME")
# pyram_1x1_0 = self.conv('_1x1', net, depth, size=1, stride=1, padding="SAME")
# pyram_3x3_1 = self.conv('_3x3', net, depth/2, size=3, stride=1, padding="SAME")
# pyram_3x3_2 = self.conv('_atr_3x3_1', net, depth/2, size=3, stride=1, padding="SAME", dilation=rate[0])
# pyram_3x3_3 = self.conv('_atr_3x3_2', net, depth/2, size=3, stride=1, padding="SAME", dilation=rate[1])
# # pyram_3x3_4 = self.conv('_atr_3x3_3', net, depth/2, size=3, stride=1, padding="SAME", dilation=rate[2])
# net = tf.concat((pyram_1x1_0, pyram_3x3_1, pyram_3x3_2, pyram_3x3_3), axis=3, name="concat")
# net = self.conv('_1x1_output', net, depth, size=1, stride=1, padding="SAME")
return net
with tf.variable_scope('discriminator') as scope:
if reuse:
scope.reuse_variables()
print('D in:', X.get_shape().as_list())
rate = [2, 3, 4]
X = atrous_convs(X, "d_atrous_0", rate = rate, depth=256, reuse=reuse)
X = tf.nn.leaky_relu(X, 0.2)
X = self.conv('d_1', X, 512, size=1, stride=1, padding="SAME")
X = tf.nn.leaky_relu(X, 0.2)
X = self.conv('d_2', X, 512, size=1, stride=1, padding="SAME")
X = tf.nn.leaky_relu(X, 0.2)
X = self.conv('d_3', X, 512, size=1, stride=1, padding="SAME")
X = tf.nn.leaky_relu(X, 0.2)
X = self.conv('d_out', X, 1, size=1, stride=1, padding="SAME")
print('D out:', X.get_shape().as_list())
return X
def conv(self, id, input, channels, size=3, stride=1, use_bias=True, padding="SAME", init_stddev=-1.0, dilation=1):
assert padding in ["SAME", "VALID", "REFLECT", "PARTIAL"], 'valid paddings: "SAME", "VALID", "REFLECT", "PARTIAL"'
if type(size) == int: size = [size, size]
if init_stddev <= 0.0:
init = tf.contrib.layers.variance_scaling_initializer(dtype=tf.float32)
else:
init = tf.truncated_normal_initializer(stddev=init_stddev)
if padding == "PARTIAL":
with tf.variable_scope('mask'):
_, h, w, _ = input.get_shape().as_list()
slide_window = size[0] * size[1]
mask = tf.ones(shape=[1, h, w, 1])
update_mask = tf.layers.conv2d(mask, filters=1, dilation_rate=(dilation, dilation), name='mask' + id,
kernel_size=size, kernel_initializer=tf.constant_initializer(1.0),
strides=stride, padding="SAME", use_bias=False, trainable=False)
mask_ratio = slide_window / (update_mask + 1e-8)
update_mask = tf.clip_by_value(update_mask, 0.0, 1.0)
mask_ratio = mask_ratio * update_mask
with tf.variable_scope('parconv'):
x = tf.layers.conv2d(input, filters=channels, name='conv' + id, kernel_size=size, kernel_initializer=init,
strides=stride, padding="SAME", use_bias=False)
x = x * mask_ratio
if use_bias:
bias = tf.get_variable("bias" + id, [channels], initializer=tf.constant_initializer(0.0))
x = tf.nn.bias_add(x, bias)
return x * update_mask
if padding == "REFLECT":
assert size[0] % 2 == 1 and size[1] % 2 == 1, "REFLECTION PAD ONLY WORKING FOR ODD FILTER SIZE.. " + str(size)
pad_x = size[0] // 2
pad_y = size[1] // 2
input = tf.pad(input, [[0, 0], [pad_x, pad_x], [pad_y, pad_y], [0, 0]], "REFLECT")
padding = "VALID"
return tf.layers.conv2d(input, channels, kernel_size=size, strides=[stride, stride],
padding=padding, kernel_initializer=init, name='conv' + id,
use_bias=use_bias, dilation_rate=(dilation, dilation))
def z_conv(self, id, input, channels, size, stride=1, padding="SAME", use_bias=False, dilation=1):
# zero mean conv
if type(size) == int: size = [size, size]
in_ch = input.get_shape().as_list()[-1]
# init = tf.contrib.layers.variance_scaling_initializer(dtype=tf.float32)
init = tf.truncated_normal_initializer(mean=0.0, stddev=0.02)
filters = tf.get_variable('zero_conv_weights' + id, initializer=init, shape=[size[0], size[1], in_ch, channels])
filters = filters - tf.reduce_mean(filters, axis=[0, 1, 2], keepdims=True)
if padding == "PARTIAL":
with tf.variable_scope('mask'):
_, h, w, _ = input.get_shape().as_list()
slide_window = size[0] * size[1]
mask = tf.ones(shape=[1, h, w, 1])
update_mask = tf.layers.conv2d(mask, filters=1, name='mask' + id,
kernel_size=size, kernel_initializer=tf.constant_initializer(1.0),
strides=stride, padding="SAME", use_bias=False, trainable=False,
dilation_rate=(dilation, dilation))
mask_ratio = slide_window / (update_mask + 1e-8)
update_mask = tf.clip_by_value(update_mask, 0.0, 1.0)
mask_ratio = mask_ratio * update_mask
with tf.variable_scope('parconv'):
x = tf.nn.conv2d(input, filters, strides=[1, stride, stride, 1], padding="SAME", name='zero-conv_' + id,
dilations=(1, dilation, dilation, 1))
x = x * mask_ratio
if use_bias:
bias = tf.get_variable("bias" + id, [channels], initializer=tf.constant_initializer(0.0))
x = tf.nn.bias_add(x, bias)
return x * update_mask
x = tf.nn.conv2d(input, filters, strides=[1, stride, stride, 1], padding=padding, name='zero-conv_' + id,
dilations=(1, dilation, dilation, 1))
if use_bias:
bias = tf.get_variable("bias", [channels], initializer=tf.constant_initializer(0.0))
x = tf.nn.bias_add(x, bias)
return x
def t_conv(self, id, input, channels, size=3, stride=1, use_bias=True, padding="SAME", init_stddev=-1.0):
# good old t-conv. I love it!
assert padding in ["SAME", "VALID"], 'valid paddings are "SAME", "VALID"'
if type(size) == int:
size = [size, size]
if init_stddev <= 0.0:
init = tf.contrib.layers.variance_scaling_initializer(dtype=tf.float32)
else:
init = tf.truncated_normal_initializer(stddev=init_stddev)
return tf.layers.conv2d_transpose(input, channels, kernel_size=size, strides=[stride, stride],
padding=padding, kernel_initializer=init, name='tr_conv' + id, use_bias=use_bias)
# Traditional U-Net
def build_Unet_Arch(self, input_data, name="Unet_Arch"):
self.base_number_of_features = 32
with tf.variable_scope(name):
# Encoder definition
o_c1 = self.general_conv2d(input_data, self.base_number_of_features, 3, stride = 1, padding = 'SAME', activation_function = 'relu', do_norm = False, name = name + '_conv2d_1')
o_mp1 = tf.layers.max_pooling2d(o_c1, 2, 2, name = name + '_maxpooling_1')
o_c2 = self.general_conv2d(o_mp1, self.base_number_of_features * 2, 3, stride = 1, padding = 'SAME', activation_function = 'relu', do_norm = False, name = name + '_conv2d_2')
o_mp2 = tf.layers.max_pooling2d(o_c2, 2, 2, name = name + '_maxpooling_2')
o_c3 = self.general_conv2d(o_mp2, self.base_number_of_features * 4, 3, stride = 1, padding = 'SAME', activation_function = 'relu', do_norm = False, name = name + '_conv2d_3')
o_mp3 = tf.layers.max_pooling2d(o_c3, 2, 2, name = name + '_maxpooling_3')
o_c4 = self.general_conv2d(o_mp3, self.base_number_of_features * 8, 3, stride = 1, padding = 'SAME', activation_function = 'relu', do_norm = False, name = name + '_conv2d_4')
o_mp4 = tf.layers.max_pooling2d(o_c4, 2, 2, name = name + '_maxpooling_4')
o_c5 = self.general_conv2d(o_mp4, self.base_number_of_features * 16, 3, stride = 1, padding = 'SAME', activation_function = 'relu', do_norm = False, name = name + '_conv2d_5')
# Decoder definition
o_d1 = self.general_deconv2d(o_c5, self.base_number_of_features * 8, 3, stride = 2, padding = 'SAME', activation_function = 'relu', do_norm = False, name = name + '_deconv2d_1')
o_me1 = tf.concat([o_d1, o_c4], 3) # Skip connection
o_d2 = self.general_deconv2d(o_me1, self.base_number_of_features * 4, 3, stride = 2, padding = 'SAME', activation_function = 'relu', do_norm = False, name = name + '_deconv2d_2')
o_me2 = tf.concat([o_d2, o_c3], 3) # Skip connection
o_d3 = self.general_deconv2d(o_me2, self.base_number_of_features * 2, 3, stride = 2, padding = 'SAME', activation_function = 'relu', do_norm = False, name = name + '_deconv2d_3')
o_me3 = tf.concat([o_d3, o_c2], 3) # Skip connection
o_d4 = self.general_deconv2d(o_me3, self.base_number_of_features, 3, stride = 2, padding = 'SAME', activation_function = 'relu', do_norm = False, name = name + '_deconv2d_4')
o_me4 = tf.concat([o_d4, o_c1], 3) # Skip connection
logits = tf.layers.conv2d(o_me4, self.args.num_classes, 1, 1, 'SAME', activation = None)
prediction = tf.nn.softmax(logits, name = name + '_softmax')
return logits, prediction
def general_conv2d(self, input_data, filters = 64, kernel_size = 7, stride = 1, stddev = 0.02, activation_function = "relu", padding = "VALID", do_norm=True, relu_factor = 0, name="conv2d"):
with tf.variable_scope(name):
conv = tf.layers.conv2d(input_data, filters, kernel_size, stride, padding, activation=None)
if do_norm:
conv = tf.layers.batch_normalization(conv, momentum=0.9)
if activation_function == "relu":
conv = tf.nn.relu(conv, name = 'relu')
if activation_function == "leakyrelu":
conv = tf.nn.leaky_relu(conv, alpha=relu_factor)
if activation_function == "elu":
conv = tf.nn.elu(conv, name = 'elu')
return conv
def general_deconv2d(self, input_data, filters = 64, kernel_size = 7, stride = 1, stddev = 0.02, activation_function = "relu", padding = "VALID", do_norm = True, relu_factor = 0, name="deconv2d"):
with tf.variable_scope(name):
deconv = tf.layers.conv2d_transpose(input_data, filters, kernel_size, (stride, stride), padding, activation = None)
if do_norm:
deconv = tf.layers.batch_normalization(deconv, momentum = 0.9)
if activation_function == "relu":
deconv = tf.nn.relu(deconv, name = 'relu')
if activation_function == "leakyrelu":
deconv = tf.nn.leaky_relu(deconv, alpha=relu_factor)
if activation_function == "elu":
deconv = tf.nn.elu(deconv, name = 'elu')
return deconv
| 56.747967
| 200
| 0.545463
|
ef54021103f1b88a2ef5913e7def0622a8a1c5e0
| 13,923
|
py
|
Python
|
model_deprecated.py
|
OpenSUM/ChNewsLongSum
|
fed7a24f40f8a8218918e0261ff5d5dd359ac56b
|
[
"Apache-2.0"
] | null | null | null |
model_deprecated.py
|
OpenSUM/ChNewsLongSum
|
fed7a24f40f8a8218918e0261ff5d5dd359ac56b
|
[
"Apache-2.0"
] | null | null | null |
model_deprecated.py
|
OpenSUM/ChNewsLongSum
|
fed7a24f40f8a8218918e0261ff5d5dd359ac56b
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import tensorflow as tf
from data.lcsts_loader import load
from utils import load_config, Config
from constants import *
# config = load_config('./lcsts.yaml')
# data = load(config, train=False)
# print(data)
class Model:
def __init__(self, config, data):
self.config = config
self.data = data
self.param = Config()
self.input_src = None
self.input_dst = None
self.dropout = None
self.V_U_g = tf.get_variable(name='V_U_g', shape=[2 * self.config.gru_hidden_dim],
dtype=tf.float32, initializer=tf.random_normal_initializer()) # todo: here.
self.V_U_w = tf.get_variable(name='V_U_w', dtype=tf.float32,
shape=[4 * self.config.gru_hidden_dim],
initializer=tf.random_normal_initializer())
self.W_Era = tf.get_variable(name='W_Era',
shape=[2 * self.config.gru_hidden_dim, 2 * self.config.gru_hidden_dim],
dtype=tf.float32, initializer=tf.random_normal_initializer())
self.W_Add = tf.get_variable(name='W_Add',
shape=[2 * self.config.gru_hidden_dim, 2 * self.config.gru_hidden_dim],
dtype=tf.float32, initializer=tf.random_normal_initializer())
self.W_Att = tf.get_variable(name='W_Att',
shape=[2 * self.config.gru_hidden_dim, 2 * self.config.gru_hidden_dim],
dtype=tf.float32, initializer=tf.random_normal_initializer())
self.W_c = tf.get_variable(name='W_c',
shape=[2*self.config.gru_hidden_dim], dtype=tf.float32, initializer=tf.random_normal_initializer())
self.W_s = tf.get_variable(name='W_s',
shape=[2*self.config.gru_hidden_dim], dtype=tf.float32, initializer=tf.random_normal_initializer())
self.W_y = tf.get_variable(name='W_y',
shape=[self.config.embedding_size], dtype=tf.float32, initializer=tf.random_normal_initializer())
self.b_ptr = tf.get_variable(name='b_ptr',
shape=[1], dtype=tf.float32, initializer=tf.random_normal_initializer())
self.MS = tf.Variable(name='MS', dtype=tf.float32, trainable=False,
initial_value=tf.random_normal(shape=[self.config.sequence_length,
self.config.batch_size,
2 * self.config.gru_hidden_dim]))
self.MU = None
self.w_u = None
self.all1 = tf.constant(np.zeros(shape=(2 * self.config.gru_hidden_dim), dtype=np.float32))
self.build()
# @staticmethod
def lstm_cell(self, hidden_dim, cudnn=True):
if cudnn:
return tf.contrib.rnn.LSTMBlockCell(hidden_dim)
# return tf.contrib.cudnn_rnn.CudnnCompatibleLSTMCell(self.config.gru_hidden_dim)
else:
return tf.nn.rnn_cell.BasicLSTMCell(hidden_dim, state_is_tuple=True)
def gru_cell(self, hidden_dim, cudnn=True):
if cudnn:
return tf.contrib.rnn.GRUBlockCellV2(hidden_dim)
# return tf.contrib.cudnn_rnn.CudnnCompatibleGRUCell(self.config.gru_hidden_dim)
else:
return tf.nn.rnn_cell.GRUCell(hidden_dim)
def transformer_cell(self, cudnn=False):
return None
def cell(self, double=False):
hidden_dim = self.config.gru_hidden_dim if not double else (2 * self.config.gru_hidden_dim)
assert self.config.cell in ['gru', 'lstm']
if self.config.cell == 'lstm':
cell = self.lstm_cell(hidden_dim)
elif self.config.cell == 'gru':
cell = self.gru_cell(hidden_dim)
else:
cell = self.transformer_cell()
return cell
def dropout_cell(self, cell, keep_prob=None):
if keep_prob is None:
keep_prob = self.config.dropout
return tf.nn.rnn_cell.DropoutWrapper(cell, output_keep_prob=keep_prob)
def encoder_basic(self, inputs):
# encoder definitions
fcells = [self.cell() for _ in range(self.config.gru_hidden_layers)]
fstacked_rnn = tf.contrib.rnn.MultiRNNCell(fcells, state_is_tuple=True)
bcells = [self.cell() for _ in range(self.config.gru_hidden_layers)]
bstacked_rnn = tf.nn.rnn_cell.MultiRNNCell(bcells, state_is_tuple=True)
# encode operation
outputs, states = tf.contrib.rnn.stack_bidirectional_dynamic_rnn(fstacked_rnn, bstacked_rnn, inputs,
dtype=tf.float32)
return outputs, states
def encode(self, inputs):
"""
encode the inputs using CUDNN LSTM
:param inputs: shape=(batch_size, time_len, input_size(embedding_dim)]
:return: outputs, (output_h, output_c)
"""
# todo : CudnnLSTM的call方法参数inputs的shape=[time_len, batch_size, input_size],因此这里需要变换一下维度
ip = tf.transpose(inputs, perm=[1, 0, 2])
encoder = tf.contrib.cudnn_rnn.CudnnGRU(
num_layers=self.config.gru_hidden_layers,
num_units=self.config.gru_hidden_dim,
input_mode='linear_input',
direction='bidirectional',
dropout=0.0,
seed=None,
dtype=tf.float32,
kernel_initializer=None,
bias_initializer=None,
name=None)
output, state = encoder(ip) # 这里c和h的顺序与BasicLSTMCell相反
return output, state
def decoder_RNN(self):
cells = self.cell(double=True)
def read(self, SS):
"""
read content from MU using SS.
MU: shape=(time_step, batch_size, hidden_dim)
:param SS:shape=(batch_size, hidden_dim)
:return: context c(shape=(batch_size, hidden_dim)), weight w.
"""
if self.config.cell == 'lstm':
SS = SS[1]
SS_tiled = tf.tile(tf.expand_dims(SS, 0), multiples=[self.config.sequence_length, 1, 1])
t = tf.tensordot(tf.concat([self.MU, SS_tiled], axis=2), self.V_U_w, axes=[2, 0])
w_ = tf.nn.softmax(t, axis=1)
g = tf.nn.sigmoid(tf.tensordot(SS, self.V_U_g, axes=[1, 0]))
if self.w_u is None:
self.w_u = (1 - g) * w_
else:
self.w_u = g * self.w_u + (1 - g) * w_
c_u = tf.reduce_sum(self.MU * tf.expand_dims(self.w_u, -1), 0)
return c_u, self.w_u
def write(self, SS):
"""
modify content of MU using SS.
MU: shape=(time_step, batch_size, hidden_dim)
uEra/uAdd: shape=(batch_size, hiddem_dim)
all1: shape=(hidden_dim)
self.w_u: shape=(batch_size,
:param SS:shape=(batch_size, hidden_dim)
:return:
"""
if self.config.cell == 'lstm':
SS = SS[1]
uEra = tf.sigmoid(tf.tensordot(SS, self.W_Era, axes=[-1, 0]))
uAdd = tf.sigmoid(tf.tensordot(SS, self.W_Add, axes=[-1, 0]))
self.MU = self.MU * (1 - tf.expand_dims(self.w_u, -1) * tf.expand_dims(uEra, 0)) + \
tf.expand_dims(self.w_u, -1) * tf.expand_dims(uAdd, 0)
pass
def read_MS(self, state):
"""
read content of MS using state of SU.
MS: shape=(time_step, batch_size, hidden_dim)
:param state: shape=(batch_size, hidden_dim)
:return: context c
"""
if self.config.cell == 'lstm':
state = state[1]
e = tf.tensordot(self.MS, self.W_Att, axes=[-1, 0]) # shape=(time_step, batch_size, hidden_dim)
e = tf.reduce_sum(tf.expand_dims(state, 0) * e, axis=2) # shape=(time_step, batch_size)
a = tf.nn.softmax(e, axis=0) # shape=(time_step, batch_size)
c = tf.reduce_sum(tf.expand_dims(a, -1) * self.MS, axis=0)
# c = self.MS * a # shape=(time_step, batch_size, hidden_dim)
return c, a
def predict(self, y_, SS, cs, alpha, pointer=True):
"""
calc prediction probabilities of every word
:param y_: the last output word, shape=(batch_size, embedding_size)
:param SS: shape=(batch_size, hidden_dim)
:param cs: shape=(batch_size, hidden_dim)
:param pointer: whether use pointer-generator network
:param alpha:
:return y: probabilities of every word in current batch. shape=(batch_size, predict_vocab_size)
"""
if self.config.cell == 'lstm':
SS = SS[1]
if pointer:
p_gen = tf.nn.sigmoid(tf.tensordot(cs, self.W_c, axes=[1, 0]) +
tf.tensordot(SS, self.W_s, axes=[1, 0]) +
tf.tensordot(y_, self.W_y, axes=[1, 0]) +
self.b_ptr)
p_vocab = tf.nn.softmax(tf.layers.dense(tf.layers.dense(tf.concat([SS, cs], axis=1), self.config.gru_hidden_dim), self.config.predict_vocab_size), axis=1)
logits = None
else:
logits = tf.layers.dense(tf.concat([y_, SS, cs], axis=1), self.config.predict_vocab_size)
# pred = tf.argmax(logits, axis=1)
return logits
def decode(self, inputs, init_state, eval=True):
# decoder definitions
cellU = self.cell(double=True)
cellS = self.cell(double=True)
# decode operation
if self.config.cell == 'lstm':
stateU = (tf.random_normal(shape=(-1, 2 * self.config.gru_hidden_dim), dtype=tf.float32),
tf.random_normal(shape=(-1, 2 * self.config.gru_hidden_dim), dtype=tf.float32))
stateS = (tf.random_normal(shape=(-1, 2 * self.config.gru_hidden_dim), dtype=tf.float32),
tf.random_normal(shape=(-1, 2 * self.config.gru_hidden_dim), dtype=tf.float32))
elif self.config.cell == 'gru':
stateU = tf.random_normal(shape=(-1, 2 * self.config.gru_hidden_dim), dtype=tf.float32)
stateS = tf.random_normal(shape=(-1, 2 * self.config.gru_hidden_dim), dtype=tf.float32)
else:
# todo: transfromer block
stateU = tf.random_normal(shape=(-1, 2 * self.config.gru_hidden_dim), dtype=tf.float32)
stateS = tf.random_normal(shape=(-1, 2 * self.config.gru_hidden_dim), dtype=tf.float32)
yt_ = tf.nn.embedding_lookup(self.data.word_embeddings, [0]*self.config.batch_size) # y_t-1
for i in range(self.config.sequence_length):
# prepare data
x = inputs[:, i, :]
# RNN part:
cu, _ = self.read(stateS)
ip = tf.concat([x, cu], axis=1)
outputsU, stateU = cellU(inputs=ip, state=stateU)
cs, alpha = self.read_MS(stateU)
outputsS, stateS = cellS(inputs=tf.concat([x, cs], axis=1), state=stateS)
self.write(stateS)
# Prediction part
# todo: codes below need to fill in and correct.
logits = self.predict(yt_, stateS, cs, alpha)
stepwise_cross_entropy = tf.nn.softmax_cross_entropy_with_logits(
labels=tf.one_hot(self.input_dst, depth=self.config.predict_vocab_size, dtype=tf.float32),
logits=logits,
)
loss = tf.reduce_mean(stepwise_cross_entropy)
train_op = tf.train.AdamOptimizer().minimize(loss)
pass
def build(self):
self.input_src = tf.placeholder(tf.int32, [None, self.config.sequence_length], name='input_src')
self.input_dst = tf.placeholder(tf.int32, [None, self.config.sequence_length], name='input_dst')
self.dropout = tf.placeholder(tf.float32, [None], name='keep_prob')
if self.config.use_pretrain_embeddings == PRETRAIN_WORD_EMBEDDING:
embedding_matrix = tf.Variable(self.data.word_embeddings, name='pretrain_embeddings', trainable=False)
# elif self.config.use_pretrain_embeddings == TRAINABLE_WORD_EMBEDDING:
else:
embedding_matrix = tf.get_variable(name='trainable_embeddings',
shape=[self.config.vocab_size, self.config.embedding_size])
embedding_inputs = tf.nn.embedding_lookup(embedding_matrix, self.input_src)
embedding_outputs = tf.nn.embedding_lookup(embedding_matrix, self.input_dst)
encoder_outputs, encoder_states = self.encode(embedding_inputs)
self.MU = encoder_outputs
# 这里添加了一层非线性变换,用以将encoder的hidden_state变换后作为decoder的initial hidden_state
decoder_states = tf.layers.dense(encoder_states[0][-1, :, :], 2 * self.config.gru_hidden_dim, activation='relu')
with tf.control_dependencies([tf.assign(self.MS, self.MU)]): # todo:这里不知道是不是这么做,姑且放着后面再来改
self.decode(embedding_outputs, init_state=decoder_states)
pass
print('kms')
pass
if __name__ == '__main__':
cfg = Config(dict(train_src='./data/lcsts/output/eval.src',
train_dst='./data/lcsts/output/eval.dst',
eval_src='./data/lcsts/output/test.src',
eval_dst='./data/lcsts/output/test.dst',
use_pretrain_embeddings=True,
embedding_file='./word_embeddings/sgns.merge.word',
vocab_file='./data/lcsts/output/vocab',
embedding_size=300,
sequence_length=5,
dropout=1.0,
gru_hidden_dim=256,
gru_hidden_layers=1,
cell='gru',
batch_size=10,
predict_vocab_size=50000,
))
data = load(cfg, train=False, eval=False, test=False)
model = Model(cfg, data)
print('kms')
| 46.878788
| 166
| 0.587517
|
d25af9ef32c107c08ccce5e714121e2f75eb97ed
| 835
|
py
|
Python
|
export_to_csv/s3upload.py
|
Sensorica/Sensor-Network
|
872be3aad2f31666a88d8544cabe0066cae2f9c8
|
[
"CC0-1.0"
] | 4
|
2016-02-21T22:53:24.000Z
|
2020-05-23T19:57:49.000Z
|
export_to_csv/s3upload.py
|
Sensorica/Sensor-Network
|
872be3aad2f31666a88d8544cabe0066cae2f9c8
|
[
"CC0-1.0"
] | 1
|
2016-06-27T17:47:47.000Z
|
2016-06-27T17:47:47.000Z
|
export_to_csv/s3upload.py
|
Sensorica/Sensor-Network
|
872be3aad2f31666a88d8544cabe0066cae2f9c8
|
[
"CC0-1.0"
] | 5
|
2016-02-11T17:46:23.000Z
|
2020-05-23T19:58:06.000Z
|
import boto.s3
import config_file #This is a file that stores our API keys and will not be pushed to git for obvious reasons
conn = boto.connect_s3(aws_access_key_id=config_file.AWS_ACCESS_KEY,
aws_secret_access_key=config_file.AWS_SECRET_KEY)
def percent_cb(complete,total):
print ('.')
def upload_to_s3_bucket_path(bucketname,path,filename):
mybucket = conn.get_bucket(bucketname)
fullkeyname=os.path.join(path,filename)
key = mybucket.new_key(fullkeyname)
key.set_contents_from_filename(filename,cb=percent_cb,num_cb=10)
def upload_to_s3_bucket_root(bucketname,filename):
mybucket = conn.get_bucket(bucketname)
key = mybucket.new_key(filename)
key.set_contents_from_filename(filename,cb=percent_cb,num_cb=10)
upload_to_s3_bucket_root('mltest12345','banking-batch.csv')
| 33.4
| 109
| 0.77485
|
d332de9dab19576772d0ca58ae752d1ce005d2c1
| 21,984
|
py
|
Python
|
lib/junno/nnet/history.py
|
LIV4D/JuNNo
|
7358f8344a7c125088e53aa1de0072c4699a9f07
|
[
"BSD-3-Clause"
] | null | null | null |
lib/junno/nnet/history.py
|
LIV4D/JuNNo
|
7358f8344a7c125088e53aa1de0072c4699a9f07
|
[
"BSD-3-Clause"
] | 1
|
2019-03-04T09:18:54.000Z
|
2019-03-05T06:15:06.000Z
|
lib/junno/nnet/history.py
|
LIV4D/JuNNo
|
7358f8344a7c125088e53aa1de0072c4699a9f07
|
[
"BSD-3-Clause"
] | null | null | null |
import pandas
import scipy.interpolate
import numpy as np
from ..j_utils.string import str2time, time2str
from ..j_utils.path import format_filepath
from collections import OrderedDict
class History:
"""
Store dataseries by iteration and epoch.
Data are index through timestamp: the number of iteration since the first iteration of the first epoch.
"""
def __init__(self):
self._timeline_series = OrderedDict()
self._timestamps = pandas.DataFrame(columns=['date', 'time'])
self._events = []
self._nb_iterations_by_epoch = [0]
self._current_epoch = 1
self._current_epoch_iteration = -1
def save(self, path):
path = format_filepath(path)
df = self.export_dataframe()
def load(self, path):
path = format_filepath(path)
# --- Current Iteration ---
@property
def epoch(self):
return self._current_epoch
@property
def iteration(self):
return self._current_epoch_iteration
@property
def last_timeid(self):
return sum(self._nb_iterations_by_epoch)
def __len__(self):
return self.last_timeid + 1
def next_iteration(self, time, date=None):
self._current_epoch_iteration += 1
self._nb_iterations_by_epoch[-1] = self._current_epoch_iteration
self._update_timestamp(time, date)
def next_epoch(self, time, date=None):
self._current_epoch += 1
self._current_epoch_iteration = 0
self._nb_iterations_by_epoch.append(0)
self._update_timestamp(time, date)
def _update_timestamp(self, time, date):
if date is None:
date = pandas.Timestamp.now()
date = pandas.to_datetime(date)
df = pandas.DataFrame([[time, date]], index=[self.last_timeid], columns=['time', 'date'])
self._timestamps = self._timestamps.append(df)
def __setitem__(self, key, value):
if not isinstance(key, str):
raise KeyError('History key should be a serie name not (%s, type:%s).'
% (str(key), type(key)))
if key not in self._timeline_series:
serie = pandas.Series(data=[value], index=[self.last_timeid], name=key)
self._timeline_series[key] = serie
else:
self._timeline_series[key][self.last_timeid] = value
# --- Store/Read Data ---
def keys(self):
return self._timeline_series.keys()
def series(self, only_number=False):
keys = list(self.keys())
if only_number:
return [k for k in keys if self._timeline_series[k].dtype != 'O']
return keys
def __getitem__(self, item):
if isinstance(item, str):
if item not in self.keys():
raise KeyError('%s is an unknown serie name.' % item)
return self._timeline_series[item].iloc[-1]
elif isinstance(item, tuple):
if len(item) != 2 or item[0] not in self.keys():
raise KeyError("Invalid history index: %s\n"
"Index should follow the form: ['series name', time_index]" % repr(item))
series = item[0]
timeid = item[1]
if isinstance(timeid, slice):
df = self.read(series=series, start=timeid.start, stop=timeid.stop, step=timeid.step,
interpolation='previous', averaged=True, std=False)
return df[series].values
else:
return self.get(series=series, timeid=timeid, interpolation='previous')
raise IndexError('Invalid index: unable to read from history series')
def get(self, series, timeid=-1, interpolation='previous', default='raise exception'):
try:
t = self.interpret_timeid(timeid)
if series not in self.keys():
raise KeyError('%s is an unknown serie name.' % series)
except LookupError as e:
if default != 'raise exception':
return default
raise e from None
serie = self._timeline_series[series]
if interpolation is None:
try:
return serie.loc[t]
except KeyError:
if default != 'raise exception':
return default
raise IndexError("Serie %s doesn't store any data at time: %s.\n"
"The interpolation parameter may be use to remove this exception."
% (series, repr(timeid)))
else:
serie = scipy.interpolate.interp1d(x=serie.index, y=serie.values,
kind=interpolation, fill_value='extrapolate',
assume_sorted=True, copy=False)
return serie(timeid)
def read(self, series=None, start=0, stop=0, step=1, timestamp=None,
interpolation='previous', smooth=None, averaged=True, std=False):
"""
Interpolate or average
:param series: Keys of the variables to read
:type series: str or tuple or set
:param start: timestamp from which data should be read
:type start: int, TimeStamp, ...
:param stop: timestamp until which data should be read
:type stop: int, TimeStamp, ...
:param step: Interval between to sample
:type step: int, TimeStamp, ...
:param timestamp: Additional timestamp related columns. Acceptable values are:
- epoch
- iteration
- time
- date
:param interpolation: Specify which number serie should be interpolated and how.
NaN in number series can automatically be replaced by interpolated values using pandas interpolation algorithms.
This parameter most be one of those:
- True: All numbers series are interpolated linearly
- False: No interpolation is applied (NaN are not replaced)
- List of series name: The specified series are interpolated linearly
- Dictionary associating an interpolation method to a series name.
:param smooth: Specify which number series should be smoothed and how much.
Specified series are Savitzky-Golay filter of order 3. The window size may be chosen (default is 15).
:param averaged: Names of the time series whose values should be averaged along each step
instead of being naively down-sampled. Can only be applied on number series.
True means that all number series are be averaged and False means no series are.
:param std: Names of the averaged time series whose standard deviation should be computed.
A new columns is created for every of these time series with the name 'STD columnName'.
:return: time series
:rtype: pandas.DataFrame
"""
if stop is None:
stop = len(self)
indexes = np.array(list(self.timeid_iterator(start=start, stop=stop, step=step)), dtype=np.uint32)
intervals = np.stack((indexes, np.concatenate((indexes[1:], [stop]))), axis=1)
series_name = self.interpret_series_name(series)
if isinstance(averaged, bool):
averaged = self.series(only_number=True) if averaged else []
else:
averaged = self.interpret_series_name(averaged, only_number=True)
if isinstance(std, bool):
std = averaged if std else []
else:
if isinstance(std, str):
std = [std]
not_averaged_series = set(std).difference(averaged)
if not_averaged_series:
raise ValueError("Can't compute standard deviation of: %s.\n"
"Those series are not averaged." % repr(not_averaged_series))
if not interpolation:
interpolation = {}
elif isinstance(interpolation, bool):
interpolation = {_: 'linear' for _ in self.series(only_number=True)}
elif isinstance(interpolation, str):
if interpolation in self.series(only_number=True):
interpolation = {interpolation: 'linear'}
else:
interpolation = {_: interpolation for _ in self.series(only_number=True)}
elif isinstance(interpolation, (dict, OrderedDict)):
unknown_keys = set(interpolation.keys()).difference(self.series(only_number=True))
if unknown_keys:
raise ValueError("Can't interpolate series: %s.\n"
"Those series are either unknown or don't contain numbers!" % repr(unknown_keys))
else:
unknown_keys = set(interpolation).difference(self.series(only_number=True))
if unknown_keys:
raise ValueError("Can't interpolate series: %s.\n"
"Those series are either unknown or don't contain numbers!" % repr(unknown_keys))
interpolation = {_: 'linear' for _ in interpolation}
if not smooth:
smooth = {}
elif isinstance(smooth, bool):
smooth = {_: 15 for _ in self.series(only_number=True)}
elif isinstance(smooth, str):
if smooth not in self.series(only_number=True):
raise ValueError("Can't smooth series %s. It is either unknown or doesn't contain number!"
% smooth)
smooth = {smooth: 15}
elif isinstance(smooth, (dict, OrderedDict)):
unknown_keys = set(smooth.keys()).difference(self.series(only_number=True))
if unknown_keys:
raise ValueError("Can't smooth series: %s.\n"
"Those series are either unknown or don't contain numbers!" % repr(unknown_keys))
else:
unknown_keys = set(smooth).difference(self.series(only_number=True))
if unknown_keys:
raise ValueError("Can't smooth series: %s.\n"
"Those series are either unknown or don't contain numbers!" % repr(unknown_keys))
smooth = {_: 15 for _ in smooth}
if smooth:
import scipy.signal
df = []
for k in series_name:
series = self._timeline_series[k]
std_series = None
# Sample
if k in self.series(only_number=True):
if k not in averaged:
series = series.reindex(indexes, copy=False)
else:
mean_series = np.zeros(shape=(intervals.shape[0],))
std_series = np.zeros(shape=(intervals.shape[0],)) if k in std else None
for i, (start_id, end_id) in enumerate(intervals):
s = series.loc[start_id:end_id-1]
mean_series[i] = np.nanmean(s) if len(s) else np.nan
if std_series is not None:
std_series[i] = np.nanvar(s) if len(s) else np.nan
series = pandas.Series(index=indexes, data=mean_series, name=series.name)
if std_series is not None:
std_series = pandas.Series(index=indexes, data=std_series, name='STD '+series.name)
# Interpolate
if k in interpolation:
if interpolation[k] == 'previous':
series.fillna(method='pad', inplace=True)
if std_series is not None:
std_series.fillna(method='pad', inplace=True)
else:
series.interpolate(method=interpolation[k], inplace=True)
if std_series is not None:
std_series.interpolate(method=interpolation[k], inplace=True)
# Smooth
if k in smooth:
s = series.values
s = scipy.signal.savgol_filter(s, smooth[k], 3, mode='constant')
series = pandas.Series(index=indexes, data=s, dtype=series.dtype, name=series.name)
else:
series = series.reindex(indexes, copy=False, method='pad')
# Store
df.append(series)
if std_series is not None:
df.append(std_series)
if timestamp:
df = self.timestamp_dataframe(timestamp, indexes, series_list=True) + df
return pandas.DataFrame(df).transpose()
# --- Export ---
def export_dataframe(self, series=None, start=0, stop=0, timestamp=None):
"""
Export time series as a pandas DataFrame
:param series: Name of the series to export. None means all series.
:param start: Minimum time index of exported data
:param stop: Maximum time index of exported data
:param timestamp: Additional exported columns. Acceptable values are:
- epoch
- iteration
- time
- date
:rtype: pandas.DataFrame
"""
start = self.interpret_timeid(start)
stop = self.interpret_timeid(stop, stop_index=True)
series_name = self.interpret_series_name(series)
series = []
for k in series_name:
serie = self._timeline_series[k].loc[start:stop]
series.append(serie)
df = pandas.DataFrame(series).transpose()
if timestamp:
timestamp_df = self.timestamp_dataframe(timestamp, df.index)
df = pandas.concat([timestamp_df, df], axis=1)
return df
def export_csv(self, path, series=None, start=0, stop=0, timestamp=('epoch', 'iteration')):
df = self.export_dataframe(series=series, start=start, stop=stop, timestamp=timestamp)
df.to_csv(path_or_buf=path)
def export_CURView(self, path, series=None, start=0, stop=0):
def minibatch_count(e):
return self._nb_iterations_by_epoch[e-1]
df = self.export_dataframe(series=series, start=start, stop=stop, timestamp=['epoch', 'iteration'])
mini_count = df['epoch'].map(minibatch_count)
mini_count.name = 'number_of_minibatches'
df = pandas.concat((df, mini_count), axis=1, copy=False)
df.rename({'iteration': 'minibatch'}, axis='columns', inplace=True)
df.to_csv(path_or_buf=path)
# --- Timestamp Conversion ---
def epoch_to_timeid(self, epoch, iteration=1):
# Check
if epoch > self.epoch:
raise IndexError('Invalid time stamp: %ie%i. (Current iteration is %ie%i)'
% (epoch, iteration, self.epoch, self.last_timeid))
if iteration > self._nb_iterations_by_epoch[epoch]:
raise IndexError('Invalid time stamp: %ie%i. (Epoch %i only has %i iterations)'
% (epoch, iteration, epoch, self._nb_iterations_by_epoch[epoch]))
# Sum
return iteration + sum(nb_it for e, nb_it in enumerate(self._nb_iterations_by_epoch) if e + 1 < epoch) - 1
def timeid_to_timestamp(self, time_id):
if not 0 <= time_id < len(self):
raise ValueError('%i is not a valid timestamp (min:0, max:%i)' % (time_id, len(self)-1))
e = 1
epoch_iteration = self._nb_iterations_by_epoch[0]
while e <= self.epoch and time_id > epoch_iteration:
epoch_iteration += self._nb_iterations_by_epoch[e]
e += 1
i = time_id-epoch_iteration
time = self._timestamps['time'][time_id]
date = self._timestamps['date'][time_id]
return TimeStamp(epoch=e, iteration=i, time=time, date=date)
def interpret_timeid(self, timestamp, stop_index=False):
if isinstance(timestamp, TimeStamp):
return self.epoch_to_timeid(epoch=timestamp.epoch,
iteration=timestamp.iteration)
if isinstance(timestamp, int):
length = len(self)
if not -length < timestamp < length+(1 if stop_index else 0):
raise IndexError('%i is not a valid timestamp (min:-%i, max:%i)' % (timestamp, length, length+(1 if stop_index else 0)))
if timestamp < 0:
timestamp += length
if timestamp == 0 and stop_index:
timestamp = length
return timestamp
else:
timestamp = TimeStamp.interpret(timestamp)
return self.epoch_to_timeid(epoch=timestamp.epoch, iteration=timestamp.iteration)
def interpret_timestamp(self, timestamp):
return self.timeid_to_timestamp(self.interpret_timeid(timestamp))
def timeid_iterator(self, start=0, stop=0, step=1, last=False):
start = 0 if start is None else self.interpret_timeid(start)
stop = len(self) if stop is None else self.interpret_timeid(stop, stop_index=True)
if step is None:
step = 1
if isinstance(step, int):
i = start
for i in range(start, stop, step):
yield i
if last and i+step < len(self):
yield i+step
return
start_timestamp = self.timeid_to_timestamp(start)
step = TimeStamp.interpret(step)
i = start
e = start_timestamp.epoch; e_i = start_timestamp.iteration
while i < stop:
yield i
e += step.epoch
e_i += step.iteration
while e < self.epoch and e_i > self._nb_iterations_by_epoch[e]:
e_i -= self._nb_iterations_by_epoch[e]
e += 1
i = self.epoch_to_timeid(e, e_i)
if i < len(self) and last:
yield i
def interpret_series_name(self, series, only_number=False):
if series is None:
return self.series(only_number=only_number)
if isinstance(series, str):
series = [series]
elif not isinstance(series, list):
series = list(series)
unknown_keys = set(series).difference(self.series(only_number=only_number))
if unknown_keys:
raise KeyError('%s are not known or valid serie name.' % repr(unknown_keys))
if only_number:
not_number_keys = set(series).difference(self.series(only_number=True))
if not_number_keys:
raise KeyError('%s are not number series.' % repr(not_number_keys))
return series
def timestamp_dataframe(self, timestamp=('date', 'time', 'epoch', 'iteration'), indexes=None, series_list=False):
if isinstance(timestamp, str):
timestamp = (timestamp,)
if indexes is None:
indexes = self._timestamps.index
from bisect import bisect_left
cumul_epoch = np.concatenate(([0], np.cumsum(self._nb_iterations_by_epoch)))
df = []
for k in timestamp:
if k == 'epoch':
series = pandas.Series(index=indexes, name='epoch',
data=indexes.map(lambda timeid: bisect_left(cumul_epoch, timeid+1)))
df.append(series)
elif k == 'iteration':
series = pandas.Series(index=indexes, name='iteration',
data=indexes.map(
lambda timeid: timeid - cumul_epoch[bisect_left(cumul_epoch, timeid+1)-1]))
df.append(series)
elif k == 'time':
df.append(self._timestamps['time'].reindex(indexes, copy=False))
elif k == 'date':
df.append(self._timestamps['date'].reindex(indexes, copy=False))
if series_list:
return df
return pandas.DataFrame(df).transpose()
class TimeStamp:
def __init__(self, epoch, iteration, time=None, date=None):
self._epoch = epoch
self._iteration = iteration
self._time = time
self._date = date
@property
def epoch(self):
return self._epoch
@property
def iteration(self):
return self._iteration
@property
def time(self):
return self._time
@property
def date(self):
return self._date
def __str__(self):
return '%ie%i' % (self.epoch, self.iteration)
def __repr__(self):
r = 'E%i I%i' % (self.epoch, self.iteration)
if self.time is not None:
r += ' (%s)' % time2str(self.time)
return r
@staticmethod
def interpret(timestamp):
if isinstance(timestamp, TimeStamp):
return timestamp
elif isinstance(timestamp, tuple):
if len(timestamp) != 2:
raise ValueError('%s is not a invalid timestamp\n'
'tuple size should be 2 to be interpreted as TimeStamp'
% (repr(timestamp)))
if isinstance(timestamp[0], int) and isinstance(timestamp[1], int):
raise ValueError('%s is not a valid timestamp' % (repr(timestamp)))
return TimeStamp(epoch=timestamp[0], iteration=timestamp[1])
elif isinstance(timestamp, str):
error = ValueError('%s is not a valid timestamp\n'
'timestamp string should be formatted: #Ee#I where #E is the epoch and #I the iteration'
% timestamp)
try:
timestamp = [int(_) for _ in timestamp.split('e')]
except TypeError:
raise error
if len(timestamp) not in (1,2):
raise error
if len(timestamp) == 1:
return TimeStamp(epoch=timestamp[0], iteration=0)
else:
return TimeStamp(epoch=timestamp[0], iteration=timestamp[1])
raise TypeError('%s is not a valid timestamp.\n Invalid timestamp type: %s'
% (repr(timestamp), type(timestamp)))
| 43.105882
| 136
| 0.580786
|
d2f2edca4e3c89252c13ba2741590398b222ab03
| 108
|
py
|
Python
|
doc/samples/my_module_with_tasks.py
|
m4ta1l/doit
|
d1a1b7b3abc7641d977d3b78b580d97aea4e27ea
|
[
"MIT"
] | 1,390
|
2015-01-01T21:11:47.000Z
|
2022-03-31T11:35:44.000Z
|
doc/samples/my_module_with_tasks.py
|
m4ta1l/doit
|
d1a1b7b3abc7641d977d3b78b580d97aea4e27ea
|
[
"MIT"
] | 393
|
2015-01-05T11:18:29.000Z
|
2022-03-20T11:46:46.000Z
|
doc/samples/my_module_with_tasks.py
|
m4ta1l/doit
|
d1a1b7b3abc7641d977d3b78b580d97aea4e27ea
|
[
"MIT"
] | 176
|
2015-01-07T16:58:56.000Z
|
2022-03-28T12:12:11.000Z
|
def task_sample():
return {'actions': ['echo hello from module loader'],
'verbosity': 2,}
| 18
| 57
| 0.574074
|
19eed1d41a60bb16ea57b215ee9ad07315009da9
| 1,355
|
py
|
Python
|
quotespage/views/index_view.py
|
Cornell-CIS-Slack/cs-quotes
|
a4451ff0703acebb762641cbc236cc0e51e2d2fd
|
[
"BSD-3-Clause"
] | 1
|
2017-10-04T16:16:22.000Z
|
2017-10-04T16:16:22.000Z
|
quotespage/views/index_view.py
|
Cornell-CIS-Slack/cs-quotes
|
a4451ff0703acebb762641cbc236cc0e51e2d2fd
|
[
"BSD-3-Clause"
] | null | null | null |
quotespage/views/index_view.py
|
Cornell-CIS-Slack/cs-quotes
|
a4451ff0703acebb762641cbc236cc0e51e2d2fd
|
[
"BSD-3-Clause"
] | null | null | null |
from django.shortcuts import render
from django.views.decorators.csrf import ensure_csrf_cookie
from quotespage.models import Quote
from quotespage.forms import SearchForm
import datetime
import random
@ensure_csrf_cookie
def index(request, pagenum="0"):
"""Renders a page of the "home" view, listing quotes in descending date order."""
QUOTES_PER_PAGE=12
ipagenum = int(pagenum) #because Django insists on storing numbers as strings
allquotes = Quote.objects.filter(approved=True).order_by('-date')
begin_index = ipagenum*QUOTES_PER_PAGE
end_index = (ipagenum+1)*QUOTES_PER_PAGE
quotes_on_page = allquotes[begin_index:end_index]
if allquotes[end_index:]:
morepages = True
else:
morepages = False
search_form = SearchForm()
context = {'search_form' : search_form,
'quotes_on_page' : quotes_on_page,
'pagenum' : ipagenum,
'morepages' : morepages}
#On the first page, display a random quote of the day
if ipagenum == 0:
todays_quotes = Quote.objects.filter(
approved=True
).filter(
date__month=datetime.date.today().month
).filter(
date__day=datetime.date.today().day
)
if todays_quotes.count() > 0:
rand_index = random.randint(0,todays_quotes.count()-1)
qotd = todays_quotes[rand_index]
context["qotd"] = qotd
return render(request, 'quotespage/index.html', context)
| 31.511628
| 82
| 0.737269
|
17501caa1553ef309f0d443a5b7513cc11b50d02
| 17,040
|
py
|
Python
|
tests/functional/transactions/test_read_consist_statement_delete_undone_01.py
|
FirebirdSQL/firebird-qa
|
96af2def7f905a06f178e2a80a2c8be4a4b44782
|
[
"MIT"
] | 1
|
2022-02-05T11:37:13.000Z
|
2022-02-05T11:37:13.000Z
|
tests/functional/transactions/test_read_consist_statement_delete_undone_01.py
|
FirebirdSQL/firebird-qa
|
96af2def7f905a06f178e2a80a2c8be4a4b44782
|
[
"MIT"
] | 1
|
2021-09-03T11:47:00.000Z
|
2021-09-03T12:42:10.000Z
|
tests/functional/transactions/test_read_consist_statement_delete_undone_01.py
|
FirebirdSQL/firebird-qa
|
96af2def7f905a06f178e2a80a2c8be4a4b44782
|
[
"MIT"
] | 1
|
2021-06-30T14:14:16.000Z
|
2021-06-30T14:14:16.000Z
|
#coding:utf-8
#
# id: functional.transactions.read_consist_statement_delete_undone_01
# title: READ CONSISTENCY. Changes produced by DELETE statement must be UNDONE when cursor resultset becomes empty after this statement start. Test-01
# decription:
# Initial article for reading:
# https://asktom.oracle.com/pls/asktom/f?p=100:11:::::P11_QUESTION_ID:11504247549852
# Note on terms which are used there: "BLOCKER", "LONG" and "FIRSTLAST" - their names are slightly changed here
# to: LOCKER-1, WORKER and LOCKER-2 respectively.
# See also: doc\\README.read_consistency.md
#
# **********************************************
#
# ::: NB :::
# This test uses script %FBT_REPO%
# iles
# ead-consist-sttm-restart-DDL.sql which contains common DDL for all other such tests.
# Particularly, it contains two TRIGGERS (TLOG_WANT and TLOG_DONE) which are used for logging of planned actions and actual
# results against table TEST. These triggers launched AUTONOMOUS transactions in order to have ability to see results in any
# outcome of test.
#
# ###############
# Following scenario if executed here:
# * five rows are inserted into the table TEST, with IDs: 1...5.
#
# * session 'locker-1' ("BLOCKER" in Tom Kyte's article ):
# update test set id = id where id=1;
#
# * session 'worker' ("LONG" in TK article) has mission:
# delete from test where not exists(select * from test where id >= 10) order by id desc; // using TIL = read committed read consistency
#
# // Execution will have PLAN ORDER <DESCENDING_INDEX>.
# // It will delete rows starting with ID = 5 and down to ID = 2, but hang on row with ID = 1 because of locker-1;
#
# * session 'locker-2' ("FIRSTLAST" in TK article):
# (1) insert into test(id) values(6);
# (2) commit;
# (3) update test set id=id where id = 6;
#
# // session-'worker' remains waiting at this point because row with ID = 5 is still occupied by by locker-1
# // but worker must further see record with (new) id = 6 because its TIL was changed to RC NO RECORD_VERSION.
#
# * session 'locker-1': commit (and allows lead session-worker to delete row with ID = 1).
# (1) commit;
# (2) insert into test(id) values(7);
# (3) commit;
# (4) update test set id=id where id = 7;
#
# // This: '(1) commit' - will release record with ID = 1. Worker sees this record and put write-lock on it.
# // [DOC]: "b) engine put write lock on conflicted record"
# // Because of TIL = RC NRV session-'worker' must see all committed records regardless on its own snapshot.
# // Worker resumes search for any rows which with taking in account required order of its DML (i.e. 'ORDER BY ID DESC').
# // [DOC]: "c) engine continue to evaluate remaining records of update\\delete cursor and put write locks on it too"
# // Worker starts to search records which must be involved in its DML and *found* first sucn row: it has ID = 7.
# // Then it goes on and stops on ID=6 because id is occupied by locker-2.
# // BECAUSE OF FACT THAT AT LEAST ONE ROW *WAS FOUND* - STATEMENT-LEVEL RESTART *NOT* YET OCCURS HERE.
# // [DOC]: "d) when there is *no more* records to fetch, engine start to undo all actions performed since
# // top-level statement execution starts and preserve already taken write locks
# // e) then engine restores transaction isolation mode as READ COMMITTED *READ CONSISTENCY*,
# // creates new statement-level snapshot and restart execution of top-level statement."
#
#
# * session 'locker-2':
# (1) commit;
# (2) insert into test(id) values(8);
# (3) commit;
# (4) update test set id=id where id = 8;
#
# // This: '(1) commit' - will release record with ID = 6. Worker sees this record and put write-lock on it.
# // [DOC]: "b) engine put write lock on conflicted record"
# // Because of TIL = RC NRV session-'worker' must see all committed records regardless on its own snapshot.
# // Worker resumes search for any rows which with taking in account required order of its DML (i.e. 'ORDER BY ID DESC')
# // [DOC]: "c) engine continue to evaluate remaining records of update\\delete cursor and put write locks on it too"
# // Worker starts to search records which must be involved in its DML and *found* first sucn row: it has ID = 8.
# // Then it goes on stops on ID=7 because id is occupied by locker-1.
# // BECAUSE OF FACT THAT AT LEAST ONE ROW *WAS FOUND* - STATEMENT-LEVEL RESTART *NOT* YET OCCURS HERE.
# // [DOC]: "d) when there is *no more* records to fetch, engine start to undo all actions performed since
# // top-level statement execution starts and preserve already taken write locks
# // e) then engine restores transaction isolation mode as READ COMMITTED *READ CONSISTENCY*,
# // creates new statement-level snapshot and restart execution of top-level statement."
#
# * session 'locker-1': commit (this allows session-worker to delete row with ID = 7).
# (1) commit;
# (2) insert into test(id) values(9);
# (3) commit;
# (4) update test set id=id where id = 9;
#
# // Comments here are similar to previous one: STATEMENT-LEVEL RESTART *NOT* YET OCCURS HERE.
#
# * session 'locker-2': commit (this allows session-worker to delete row with ID = 6).
# (1) commit;
# (2) insert into test(id) values(10);
# (3) commit;
# (4) update test set id=id where id = 10;
#
# // This will made this row visible to session-worker when it will resume its DML.
# // NOTE: this record will cause session-worker immediately UNDO all changes that it was performed before - see "WHERE NOT EXISTS(...)" in its DML expression.
#
#
# Expected result:
# * session-'worker' must be cancelled. No rows must be deleted, PLUS new rows must remain (with ID = 6 ... 10).
# * we must NOT see statement-level restart because no rows actually were affected by session-worker statement.
# Column TLOG_DONE.SNAP_NO must contain only one unique value that relates to start of DELETE statement.
#
# ################
#
# Additional comments for this case - see letter from Vlad, 05-aug-2020 00:51.
#
# Checked on 4.0.0.2151 SS/CS
#
# tracker_id:
# min_versions: ['4.0']
# versions: 4.0
# qmid:
import pytest
from firebird.qa import db_factory, python_act, Action
# version: 4.0
# resources: None
substitutions_1 = [('=', ''), ('[ \t]+', ' ')]
init_script_1 = """"""
db_1 = db_factory(sql_dialect=3, init=init_script_1)
# test_script_1
#---
#
# import os
# import sys
# import subprocess
# from subprocess import Popen
# import re
# import difflib
# from fdb import services
# import time
#
# os.environ["ISC_USER"] = user_name
# os.environ["ISC_PASSWORD"] = user_password
#
# db_conn.close()
#
# #--------------------------------------------
#
# def flush_and_close( file_handle ):
# # https://docs.python.org/2/library/os.html#os.fsync
# # If you're starting with a Python file object f,
# # first do f.flush(), and
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
# global os
#
# file_handle.flush()
# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull:
# # otherwise: "OSError: [Errno 9] Bad file descriptor"!
# os.fsync(file_handle.fileno())
# file_handle.close()
#
# #--------------------------------------------
#
# def cleanup( f_names_list ):
# global os
# for f in f_names_list:
# if type(f) == file:
# del_name = f.name
# elif type(f) == str:
# del_name = f
# else:
# print('Unrecognized type of element:', f, ' - can not be treated as file.')
# del_name = None
#
# if del_name and os.path.isfile( del_name ):
# os.remove( del_name )
#
# #--------------------------------------------
#
#
# sql_init_ddl = os.path.join(context['files_location'],'read-consist-sttm-restart-DDL.sql')
#
# f_init_log=open( os.path.join(context['temp_directory'],'read-consist-sttm-restart-DDL.log'), 'w')
# f_init_err=open( ''.join( ( os.path.splitext(f_init_log.name)[0], '.err') ), 'w')
#
# subprocess.call( [context['isql_path'], dsn, '-q', '-i', sql_init_ddl], stdout=f_init_log, stderr=f_init_err )
# flush_and_close(f_init_log)
# flush_and_close(f_init_err)
#
# # add rows with ID = 1,2,3,4,5:
# sql_addi='''
# set term ^;
# execute block as
# begin
# rdb$set_context('USER_SESSION', 'WHO', 'INIT_DATA');
# end
# ^
# set term ;^
# insert into test(id, x)
# select row_number()over(),row_number()over()
# from rdb$types rows 5;
# commit;
# '''
# runProgram('isql', [ dsn, '-q' ], sql_addi)
#
#
# con_lock_1 = fdb.connect( dsn = dsn )
# con_lock_2 = fdb.connect( dsn = dsn )
# con_lock_1.execute_immediate( "execute block as begin rdb$set_context('USER_SESSION', 'WHO', 'LOCKER #1'); end" )
# con_lock_2.execute_immediate( "execute block as begin rdb$set_context('USER_SESSION', 'WHO', 'LOCKER #2'); end" )
#
#
# #########################
# ### L O C K E R - 1 ###
# #########################
#
# con_lock_1.execute_immediate( 'update test set id=id where id = 1' )
#
# sql_text='''
# connect '%(dsn)s';
# set list on;
# set autoddl off;
# set term ^;
# execute block returns (whoami varchar(30)) as
# begin
# whoami = 'WORKER'; -- , ATT#' || current_connection;
# rdb$set_context('USER_SESSION','WHO', whoami);
# -- suspend;
# end
# ^
# set term ;^
# commit;
# SET KEEP_TRAN_PARAMS ON;
# set transaction read committed read consistency;
# --select current_connection, current_transaction from rdb$database;
# set list off;
# set wng off;
# --set plan on;
# set count on;
#
# delete from test where not exists(select * from test where id >= 10) order by id desc; -- THIS MUST BE LOCKED
#
# -- check results:
# -- ###############
#
# select id from test order by id; -- this will produce output only after all lockers do their commit/rollback
#
# select v.old_id, v.op, v.snap_no_rank
# from v_worker_log v
# where v.op = 'del';
#
# set width who 10;
# -- DO NOT check this! Values can differ here from one run to another!
# --select id, trn, who, old_id, new_id, op, rec_vers, global_cn, snap_no from tlog_done order by id;
#
# rollback;
#
# ''' % dict(globals(), **locals())
#
# f_worker_sql=open( os.path.join(context['temp_directory'],'tmp_read_consist_statement_undone_delete_01.sql'), 'w')
# f_worker_sql.write(sql_text)
# flush_and_close(f_worker_sql)
#
#
# f_worker_log=open( ''.join( ( os.path.splitext(f_worker_sql.name)[0], '.log') ), 'w')
# f_worker_err=open( ''.join( ( os.path.splitext(f_worker_log.name)[0], '.err') ), 'w')
#
# ############################################################################
# ### L A U N C H W O R K E R U S I N G I S Q L, A S Y N C. ###
# ############################################################################
#
# p_worker = Popen( [ context['isql_path'], '-pag', '999999', '-q', '-i', f_worker_sql.name ],stdout=f_worker_log, stderr=f_worker_err)
# time.sleep(1)
#
#
# #########################
# ### L O C K E R - 2 ###
# #########################
# # Add record so that it **will* be included in the set of rows that must be affected by session-worker:
# con_lock_2.execute_immediate( 'insert into test(id, x) values(6, 6);' )
# con_lock_2.commit()
# con_lock_2.execute_immediate( 'update test set id = id where id = 6;' )
#
# #########################
# ### L O C K E R - 1 ###
# #########################
# con_lock_1.commit() # releases record with ID=1 (allow it to be deleted by session-worker)
# # Add record so that it **will* be included in the set of rows that must be affected by session-worker:
# con_lock_1.execute_immediate( 'insert into test(id, x) values(7, 7);' )
# con_lock_1.commit()
# con_lock_1.execute_immediate( 'update test set id = id where id = 7;' )
#
# #########################
# ### L O C K E R - 2 ###
# #########################
# con_lock_2.commit() # releases record with ID = 6, but session-worker is waiting for record with ID = 7 (that was added by locker-1).
# con_lock_2.execute_immediate( 'insert into test(id, x) values(8, 8);' )
# con_lock_2.commit()
# con_lock_2.execute_immediate( 'update test set id = id where id = 8;' )
#
#
# #########################
# ### L O C K E R - 1 ###
# #########################
# con_lock_1.commit() # releases record with ID = 7, but session-worker is waiting for record with ID = 8 (that was added by locker-2).
# con_lock_1.execute_immediate( 'insert into test(id, x) values(9, 9);' )
# con_lock_1.commit()
# con_lock_1.execute_immediate( 'update test set id = id where id = 9;' )
#
#
# #########################
# ### L O C K E R - 2 ###
# #########################
# con_lock_2.commit() # releases record with ID = 8, but session-worker is waiting for record with ID = 9 (that was added by locker-1).
# con_lock_2.execute_immediate( 'insert into test(id, x) values(10, 10);' )
# con_lock_2.commit()
# con_lock_2.execute_immediate( 'update test set id = id where id = 10;' )
#
#
# #########################
# ### L O C K E R - 1 ###
# #########################
# con_lock_1.commit() # <<< THIS MUST CANCEL ALL PERFORMED DELETIONS OF SESSION-WORKER
#
# con_lock_2.commit()
#
# # Here we wait for ISQL complete its mission:
# p_worker.wait()
#
# flush_and_close(f_worker_log)
# flush_and_close(f_worker_err)
#
# # Close lockers:
# ################
# for c in (con_lock_1, con_lock_2):
# c.close()
#
#
# # CHECK RESULTS
# ###############
#
# for f in (f_init_err, f_worker_err):
# with open(f.name,'r') as g:
# for line in g:
# if line:
# print( 'UNEXPECTED STDERR IN ' + g.name + ':' + line)
#
# with open(f_worker_log.name,'r') as f:
# for line in f:
# print(line)
#
#
# # Cleanup.
# ##########
# time.sleep(1)
# cleanup( (f_init_log, f_init_err, f_worker_sql, f_worker_log, f_worker_err) )
#
#---
act_1 = python_act('db_1', substitutions=substitutions_1)
expected_stdout_1 = """
Records affected: 0
ID
=======
1
2
3
4
5
6
7
8
9
10
Records affected: 10
OLD_ID OP SNAP_NO_RANK
======= ====== =====================
5 DEL 1
4 DEL 1
3 DEL 1
2 DEL 1
Records affected: 4
"""
@pytest.mark.version('>=4.0')
@pytest.mark.xfail
def test_1(act_1: Action):
pytest.fail("Test not IMPLEMENTED")
| 43.358779
| 180
| 0.52588
|
c3a08ac545c33348d7ec8f1974b8be6408b64593
| 25,218
|
py
|
Python
|
tensorforce/models/memory_model.py
|
petrosgk/tensorforce
|
dd04f904acac78fd185ea8ee2c3ce6bac8859c1d
|
[
"Apache-2.0"
] | 1
|
2019-04-06T10:04:00.000Z
|
2019-04-06T10:04:00.000Z
|
tensorforce/models/memory_model.py
|
petrosgk/tensorforce
|
dd04f904acac78fd185ea8ee2c3ce6bac8859c1d
|
[
"Apache-2.0"
] | null | null | null |
tensorforce/models/memory_model.py
|
petrosgk/tensorforce
|
dd04f904acac78fd185ea8ee2c3ce6bac8859c1d
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 reinforce.io. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import tensorflow as tf
from tensorforce import util, TensorForceError
from tensorforce.core.memories import Memory
from tensorforce.core.optimizers import Optimizer
from tensorforce.models import Model
class MemoryModel(Model):
"""
A memory model is a generical model to accumulate and sample data.
"""
def __init__(
self,
states,
actions,
scope,
device,
saver,
summarizer,
distributed,
batching_capacity,
variable_noise,
states_preprocessing,
actions_exploration,
reward_preprocessing,
update_mode,
memory,
optimizer,
discount
):
"""
Memory model.
Args:
states (spec): The state-space description dictionary.
actions (spec): The action-space description dictionary.
scope (str): The root scope str to use for tf variable scoping.
device (str): The name of the device to run the graph of this model on.
saver (spec): Dict specifying whether and how to save the model's parameters.
summarizer (spec): Dict specifying which tensorboard summaries should be created and added to the graph.
distributed (spec): Dict specifying whether and how to do distributed training on the model's graph.
batching_capacity (int): Batching capacity.
variable_noise (float): The stddev value of a Normal distribution used for adding random
noise to the model's output (for each batch, noise can be toggled and - if active - will be resampled).
Use None for not adding any noise.
states_preprocessing (spec / dict of specs): Dict specifying whether and how to preprocess state signals
(e.g. normalization, greyscale, etc..).
actions_exploration (spec / dict of specs): Dict specifying whether and how to add exploration to the model's
"action outputs" (e.g. epsilon-greedy).
reward_preprocessing (spec): Dict specifying whether and how to preprocess rewards coming
from the Environment (e.g. reward normalization).
update_mode (spec): Update mode.
memory (spec): Memory.
optimizer (spec): Dict specifying the tf optimizer to use for tuning the model's trainable parameters.
discount (float): The RL reward discount factor (gamma).
"""
self.update_mode = update_mode
self.memory_spec = memory
self.optimizer_spec = optimizer
# Discount
assert discount is None or discount >= 0.0
self.discount = discount
self.memory = None
self.optimizer = None
self.fn_discounted_cumulative_reward = None
self.fn_loss_per_instance = None
self.fn_regularization_losses = None
self.fn_loss = None
self.fn_optimization = None
super(MemoryModel, self).__init__(
states=states,
actions=actions,
scope=scope,
device=device,
saver=saver,
summarizer=summarizer,
distributed=distributed,
batching_capacity=batching_capacity,
variable_noise=variable_noise,
states_preprocessing=states_preprocessing,
actions_exploration=actions_exploration,
reward_preprocessing=reward_preprocessing
)
def as_local_model(self):
super(MemoryModel, self).as_local_model()
self.optimizer_spec = dict(
type='global_optimizer',
optimizer=self.optimizer_spec
)
def initialize(self, custom_getter):
super(MemoryModel, self).initialize(custom_getter)
# Memory
self.memory = Memory.from_spec(
spec=self.memory_spec,
kwargs=dict(
states=self.states_spec,
internals=self.internals_spec,
actions=self.actions_spec,
summary_labels=self.summary_labels
)
)
# Optimizer
self.optimizer = Optimizer.from_spec(
spec=self.optimizer_spec,
kwargs=dict(summary_labels=self.summary_labels)
)
# TensorFlow functions
self.fn_discounted_cumulative_reward = tf.make_template(
name_='discounted-cumulative-reward',
func_=self.tf_discounted_cumulative_reward,
custom_getter_=custom_getter
)
self.fn_reference = tf.make_template(
name_='reference',
func_=self.tf_reference,
custom_getter_=custom_getter
)
self.fn_loss_per_instance = tf.make_template(
name_='loss-per-instance',
func_=self.tf_loss_per_instance,
custom_getter_=custom_getter
)
self.fn_regularization_losses = tf.make_template(
name_='regularization-losses',
func_=self.tf_regularization_losses,
custom_getter_=custom_getter
)
self.fn_loss = tf.make_template(
name_='loss',
func_=self.tf_loss,
custom_getter_=custom_getter
)
self.fn_optimization = tf.make_template(
name_='optimization',
func_=self.tf_optimization,
custom_getter_=custom_getter
)
self.fn_import_experience = tf.make_template(
name_='import-experience',
func_=self.tf_import_experience,
custom_getter_=custom_getter
)
def tf_initialize(self):
super(MemoryModel, self).tf_initialize()
self.memory.initialize()
def tf_discounted_cumulative_reward(self, terminal, reward, discount, final_reward=0.0):
"""
Creates the TensorFlow operations for calculating the discounted cumulative rewards
for a given sequence of rewards.
Args:
terminal: Terminal boolean tensor.
reward: Reward tensor.
discount: Discount factor.
final_reward: Last reward value in the sequence.
Returns:
Discounted cumulative reward tensor.
"""
# TODO: n-step cumulative reward (particularly for envs without terminal)
def cumulate(cumulative, reward_and_terminal):
rew, term = reward_and_terminal
return tf.where(condition=term, x=rew, y=(rew + cumulative * discount))
# Reverse since reward cumulation is calculated right-to-left, but tf.scan only works left-to-right
reward = tf.reverse(tensor=reward, axis=(0,))
terminal = tf.reverse(tensor=terminal, axis=(0,))
reward = tf.scan(fn=cumulate, elems=(reward, terminal), initializer=tf.stop_gradient(input=final_reward))
return tf.reverse(tensor=reward, axis=(0,))
# # TODO: this could be a utility helper function if we remove self.discount and only allow external discount-value input
# def tf_discounted_cumulative_reward(self, terminal, reward, discount=None, final_reward=0.0, horizon=0):
# """
# Creates and returns the TensorFlow operations for calculating the sequence of discounted cumulative rewards
# for a given sequence of single rewards.
# Example:
# single rewards = 2.0 1.0 0.0 0.5 1.0 -1.0
# terminal = False, False, False, False True False
# gamma = 0.95
# final_reward = 100.0 (only matters for last episode (r=-1.0) as this episode has no terminal signal)
# horizon=3
# output = 2.95 1.45 1.38 1.45 1.0 94.0
# Args:
# terminal: Tensor (bool) holding the is-terminal sequence. This sequence may contain more than one
# True value. If its very last element is False (not terminating), the given `final_reward` value
# is assumed to follow the last value in the single rewards sequence (see below).
# reward: Tensor (float) holding the sequence of single rewards. If the last element of `terminal` is False,
# an assumed last reward of the value of `final_reward` will be used.
# discount (float): The discount factor (gamma). By default, take the Model's discount factor.
# final_reward (float): Reward value to use if last episode in sequence does not terminate (terminal sequence
# ends with False). This value will be ignored if horizon == 1 or discount == 0.0.
# horizon (int): The length of the horizon (e.g. for n-step cumulative rewards in continuous tasks
# without terminal signals). Use 0 (default) for an infinite horizon. Note that horizon=1 leads to the
# exact same results as a discount factor of 0.0.
# Returns:
# Discounted cumulative reward tensor with the same shape as `reward`.
# """
# # By default -> take Model's gamma value
# if discount is None:
# discount = self.discount
# # Accumulates discounted (n-step) reward (start new if terminal)
# def cumulate(cumulative, reward_terminal_horizon_subtract):
# rew, is_terminal, is_over_horizon, sub = reward_terminal_horizon_subtract
# return tf.where(
# # If terminal, start new cumulation.
# condition=is_terminal,
# x=rew,
# y=tf.where(
# # If we are above the horizon length (H) -> subtract discounted value from H steps back.
# condition=is_over_horizon,
# x=(rew + cumulative * discount - sub),
# y=(rew + cumulative * discount)
# )
# )
# # Accumulates length of episodes (starts new if terminal)
# def len_(cumulative, term):
# return tf.where(
# condition=term,
# # Start counting from 1 after is-terminal signal
# x=tf.ones(shape=(), dtype=tf.int32),
# # Otherwise, increase length by 1
# y=cumulative + 1
# )
# # Reverse, since reward cumulation is calculated right-to-left, but tf.scan only works left-to-right.
# reward = tf.reverse(tensor=reward, axis=(0,))
# # e.g. -1.0 1.0 0.5 0.0 1.0 2.0
# terminal = tf.reverse(tensor=terminal, axis=(0,))
# # e.g. F T F F F F
# # Store the steps until end of the episode(s) determined by the input terminal signals (True starts new count).
# lengths = tf.scan(fn=len_, elems=terminal, initializer=0)
# # e.g. 1 1 2 3 4 5
# off_horizon = tf.greater(lengths, tf.fill(dims=tf.shape(lengths), value=horizon))
# # e.g. F F F F T T
# # Calculate the horizon-subtraction value for each step.
# if horizon > 0:
# horizon_subtractions = tf.map_fn(lambda x: (discount ** horizon) * x, reward, dtype=tf.float32)
# # Shift right by size of horizon (fill rest with 0.0).
# horizon_subtractions = tf.concat([np.zeros(shape=(horizon,)), horizon_subtractions], axis=0)
# horizon_subtractions = tf.slice(horizon_subtractions, begin=(0,), size=tf.shape(reward))
# # e.g. 0.0, 0.0, 0.0, -1.0*g^3, 1.0*g^3, 0.5*g^3
# # all 0.0 if infinite horizon (special case: horizon=0)
# else:
# horizon_subtractions = tf.zeros(shape=tf.shape(reward))
# # Now do the scan, each time summing up the previous step (discounted by gamma) and
# # subtracting the respective `horizon_subtraction`.
# reward = tf.scan(
# fn=cumulate,
# elems=(reward, terminal, off_horizon, horizon_subtractions),
# initializer=final_reward if horizon != 1 else 0.0
# )
# # Re-reverse again to match input sequences.
# return tf.reverse(tensor=reward, axis=(0,))
def tf_reference(self, states, internals, actions, terminal, reward, next_states, next_internals, update):
"""
Creates the TensorFlow operations for obtaining the reference tensor(s), in case of a
comparative loss.
Args:
states: Dict of state tensors.
internals: List of prior internal state tensors.
actions: Dict of action tensors.
terminal: Terminal boolean tensor.
reward: Reward tensor.
next_states: Dict of successor state tensors.
next_internals: List of posterior internal state tensors.
update: Boolean tensor indicating whether this call happens during an update.
Returns:
Reference tensor(s).
"""
return None
def tf_loss_per_instance(self, states, internals, actions, terminal, reward, next_states, next_internals, update, reference=None):
"""
Creates the TensorFlow operations for calculating the loss per batch instance.
Args:
states: Dict of state tensors.
internals: List of prior internal state tensors.
actions: Dict of action tensors.
terminal: Terminal boolean tensor.
reward: Reward tensor.
next_states: Dict of successor state tensors.
next_internals: List of posterior internal state tensors.
update: Boolean tensor indicating whether this call happens during an update.
reference: Optional reference tensor(s), in case of a comparative loss.
Returns:
Loss per instance tensor.
"""
raise NotImplementedError
def tf_regularization_losses(self, states, internals, update):
"""
Creates the TensorFlow operations for calculating the regularization losses for the given input states.
Args:
states: Dict of state tensors.
internals: List of prior internal state tensors.
update: Boolean tensor indicating whether this call happens during an update.
Returns:
Dict of regularization loss tensors.
"""
return dict()
def tf_loss(self, states, internals, actions, terminal, reward, next_states, next_internals, update, reference=None):
"""
Creates the TensorFlow operations for calculating the full loss of a batch.
Args:
states: Dict of state tensors.
internals: List of prior internal state tensors.
actions: Dict of action tensors.
terminal: Terminal boolean tensor.
reward: Reward tensor.
next_states: Dict of successor state tensors.
next_internals: List of posterior internal state tensors.
update: Boolean tensor indicating whether this call happens during an update.
reference: Optional reference tensor(s), in case of a comparative loss.
Returns:
Loss tensor.
"""
# Mean loss per instance
loss_per_instance = self.fn_loss_per_instance(
states=states,
internals=internals,
actions=actions,
terminal=terminal,
reward=reward,
next_states=next_states,
next_internals=next_internals,
update=update,
reference=reference
)
self.memory.update_batch(loss_per_instance=loss_per_instance)
loss = tf.reduce_mean(input_tensor=loss_per_instance, axis=0)
# Loss without regularization summary
if 'losses' in self.summary_labels:
summary = tf.summary.scalar(name='loss-without-regularization', tensor=loss)
self.summaries.append(summary)
# Regularization losses
losses = self.fn_regularization_losses(states=states, internals=internals, update=update)
if len(losses) > 0:
loss += tf.add_n(inputs=list(losses.values()))
if 'regularization' in self.summary_labels:
for name, loss_val in losses.items():
summary = tf.summary.scalar(name=('regularization/' + name), tensor=loss_val)
self.summaries.append(summary)
# Total loss summary
if 'losses' in self.summary_labels or 'total-loss' in self.summary_labels:
summary = tf.summary.scalar(name='total-loss', tensor=loss)
self.summaries.append(summary)
return loss
def optimizer_arguments(self, states, internals, actions, terminal, reward, next_states, next_internals):
"""
Returns the optimizer arguments including the time, the list of variables to optimize,
and various functions which the optimizer might require to perform an update step.
Args:
states: Dict of state tensors.
internals: List of prior internal state tensors.
actions: Dict of action tensors.
terminal: Terminal boolean tensor.
reward: Reward tensor.
next_states: Dict of successor state tensors.
next_internals: List of posterior internal state tensors.
Returns:
Optimizer arguments as dict.
"""
arguments = dict(
time=self.global_timestep,
variables=self.get_variables(),
arguments=dict(
states=states,
internals=internals,
actions=actions,
terminal=terminal,
reward=reward,
next_states=next_states,
next_internals=next_internals,
update=tf.constant(value=True)
),
fn_reference=self.fn_reference,
fn_loss=self.fn_loss
)
if self.global_model is not None:
arguments['global_variables'] = self.global_model.get_variables()
return arguments
def tf_optimization(self, states, internals, actions, terminal, reward, next_states=None, next_internals=None):
"""
Creates the TensorFlow operations for performing an optimization update step based
on the given input states and actions batch.
Args:
states: Dict of state tensors.
internals: List of prior internal state tensors.
actions: Dict of action tensors.
terminal: Terminal boolean tensor.
reward: Reward tensor.
next_states: Dict of successor state tensors.
next_internals: List of posterior internal state tensors.
Returns:
The optimization operation.
"""
arguments = self.optimizer_arguments(
states=states,
internals=internals,
actions=actions,
terminal=terminal,
reward=reward,
next_states=next_states,
next_internals=next_internals
)
return self.optimizer.minimize(**arguments)
def tf_observe_timestep(self, states, internals, actions, terminal, reward):
# Store timestep in memory
stored = self.memory.store(
states=states,
internals=internals,
actions=actions,
terminal=terminal,
reward=reward
)
# Periodic optimization
with tf.control_dependencies(control_inputs=(stored,)):
unit = self.update_mode['unit']
batch_size = self.update_mode['batch_size']
frequency = self.update_mode.get('frequency', batch_size)
if unit == 'timesteps':
# Timestep-based batch
optimize = tf.logical_and(
x=tf.equal(x=(self.timestep % frequency), y=0),
y=tf.greater_equal(x=self.timestep, y=batch_size)
)
batch = self.memory.retrieve_timesteps(n=batch_size)
elif unit == 'episodes':
# Episode-based batch
optimize = tf.logical_and(
x=tf.equal(x=(self.episode % frequency), y=0),
y=tf.logical_and(
# Only update once per episode increment.
x=tf.greater(x=tf.count_nonzero(input_tensor=terminal), y=0),
y=tf.greater_equal(x=self.episode, y=batch_size)
)
)
batch = self.memory.retrieve_episodes(n=batch_size)
elif unit == 'sequences':
# Timestep-sequence-based batch
sequence_length = self.update_mode.get('length', 8)
optimize = tf.logical_and(
x=tf.equal(x=(self.timestep % frequency), y=0),
y=tf.greater_equal(x=self.timestep, y=(batch_size + sequence_length - 1))
)
batch = self.memory.retrieve_sequences(n=batch_size, sequence_length=sequence_length)
else:
raise TensorForceError("Invalid update unit: {}.".format(unit))
# Do not calculate gradients for memory-internal operations.
batch = util.map_tensors(
fn=(lambda tensor: tf.stop_gradient(input=tensor)),
tensors=batch
)
optimization = tf.cond(
pred=optimize,
true_fn=(lambda: self.fn_optimization(**batch)),
false_fn=tf.no_op
)
return optimization
def tf_import_experience(self, states, internals, actions, terminal, reward):
"""
Imports experiences into the TensorFlow memory structure. Can be used to import
off-policy data.
:param states: Dict of state values to import with keys as state names and values as values to set.
:param internals: Internal values to set, can be fetched from agent via agent.current_internals
if no values available.
:param actions: Dict of action values to import with keys as action names and values as values to set.
:param terminal: Terminal value(s)
:param reward: Reward value(s)
"""
return self.memory.store(
states=states,
internals=internals,
actions=actions,
terminal=terminal,
reward=reward
)
def create_operations(self, states, internals, actions, terminal, reward, deterministic):
# Import experience operation.
self.import_experience_output = self.fn_import_experience(
states=states,
internals=internals,
actions=actions,
terminal=terminal,
reward=reward
)
super(MemoryModel, self).create_operations(
states=states,
internals=internals,
actions=actions,
terminal=terminal,
reward=reward,
deterministic=deterministic
)
def get_variables(self, include_submodules=False, include_nontrainable=False):
model_variables = super(MemoryModel, self).get_variables(
include_submodules=include_submodules,
include_nontrainable=include_nontrainable
)
if include_nontrainable:
memory_variables = self.memory.get_variables()
model_variables += memory_variables
optimizer_variables = self.optimizer.get_variables()
# For some reason, some optimizer variables are only registered in the model.
for variable in optimizer_variables:
if variable in model_variables:
model_variables.remove(variable)
model_variables += optimizer_variables
return model_variables
def get_summaries(self):
model_summaries = super(MemoryModel, self).get_summaries()
memory_summaries = self.memory.get_summaries()
optimizer_summaries = self.optimizer.get_summaries()
return model_summaries + memory_summaries + optimizer_summaries
def import_experience(self, states, internals, actions, terminal, reward):
"""
Stores experiences.
"""
fetches = self.import_experience_output
feed_dict = self.get_feed_dict(
states=states,
internals=internals,
actions=actions,
terminal=terminal,
reward=reward
)
self.monitored_session.run(fetches=fetches, feed_dict=feed_dict)
| 41.004878
| 134
| 0.613015
|
74b872674163701c0c8f1bf84a2bdc3ae236e341
| 2,175
|
py
|
Python
|
lxd_image_server/tools/mirror.py
|
zfuller/lxd-image-server
|
18e195ad1e77629ec04c3f3bd78a60cb070c70bc
|
[
"Apache-2.0"
] | 40
|
2019-04-12T12:07:52.000Z
|
2022-03-31T10:57:02.000Z
|
lxd_image_server/tools/mirror.py
|
zfuller/lxd-image-server
|
18e195ad1e77629ec04c3f3bd78a60cb070c70bc
|
[
"Apache-2.0"
] | 8
|
2019-07-10T13:12:12.000Z
|
2021-10-20T23:17:14.000Z
|
lxd_image_server/tools/mirror.py
|
zfuller/lxd-image-server
|
18e195ad1e77629ec04c3f3bd78a60cb070c70bc
|
[
"Apache-2.0"
] | 11
|
2019-04-18T21:51:58.000Z
|
2022-03-26T15:57:05.000Z
|
import re
import logging
import subprocess
from threading import Lock
from pathlib import Path
import attr
from lxd_image_server.tools.config import Config
logger = logging.getLogger(__name__)
@attr.s
class Mirror():
name = attr.ib()
user = attr.ib()
key_path = attr.ib()
url = attr.ib()
remote = attr.ib()
img_dir = attr.ib()
def __attrs_post_init__(self):
self.root = {}
def update(self):
self._sync_path(self.img_dir)
def _sync_path(self, op_path):
command = ['rsync', '-azh', '-e', '/usr/bin/ssh -i ' + self.key_path +
' -l ' + self.user, op_path,
self.servername + ':' + str(Path(op_path).parent),
'--delete']
logger.debug('running: %s', command)
try:
subprocess.run(command).check_returncode()
except subprocess.CalledProcessError as error:
logger.error('Fail to synchronize: %s', error)
else:
logger.info("Path %s synced for mirror %s", op_path, self.name)
@property
def servername(self):
if self.remote:
return self.remote
match = re.search(r'https://([\w\.]*):?\d*', self.url)
if match:
return match.group(1)
else:
logger.error('Server %s has no host' % self.url)
class MirrorManager():
img_dir = '/var/www/simplestreams/images'
mirrors = {}
_lock = Lock()
@classmethod
def update(cls):
logger.info('Updating all mirrors')
mirrors = {}
with cls._lock:
mirrors = cls.mirrors.copy()
for _, mirror in cls.mirrors.items():
mirror.update()
@classmethod
def update_mirror_list(cls):
with cls._lock:
for name, mirror in Config.get('mirrors', {}).items():
cls.mirrors[name] = Mirror(
name,
mirror['user'],
mirror['key_path'],
mirror['url'],
mirror.get('remote'),
cls.img_dir
)
logger.info('Mirror list updated')
cls.update()
| 27.1875
| 78
| 0.537011
|
b795ca4916e999bdba52a7081cab52ed1f95c701
| 704
|
py
|
Python
|
ikats/__init__.py
|
IKATS/ikats_api
|
86f965e9ea83fde1fb64f187b294d383d267f77f
|
[
"Apache-2.0"
] | null | null | null |
ikats/__init__.py
|
IKATS/ikats_api
|
86f965e9ea83fde1fb64f187b294d383d267f77f
|
[
"Apache-2.0"
] | null | null | null |
ikats/__init__.py
|
IKATS/ikats_api
|
86f965e9ea83fde1fb64f187b294d383d267f77f
|
[
"Apache-2.0"
] | 1
|
2020-01-27T14:44:27.000Z
|
2020-01-27T14:44:27.000Z
|
# -*- coding: utf-8 -*-
"""
Copyright 2019 CS Systèmes d'Information
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from ikats.api import IkatsAPI
# API Version
__version__ = '1.0.0'
__author__ = 'fabien.tortora@c-s.fr'
| 29.333333
| 72
| 0.764205
|
8c69465c08e8834389f8c2d177d6b2faa955a3a0
| 527
|
py
|
Python
|
day/010/days_in_month.py
|
Wanzaz/100-days-of-code
|
a450a5c64f04cf1155fcc76e59a73f5b509fd8de
|
[
"MIT"
] | null | null | null |
day/010/days_in_month.py
|
Wanzaz/100-days-of-code
|
a450a5c64f04cf1155fcc76e59a73f5b509fd8de
|
[
"MIT"
] | null | null | null |
day/010/days_in_month.py
|
Wanzaz/100-days-of-code
|
a450a5c64f04cf1155fcc76e59a73f5b509fd8de
|
[
"MIT"
] | null | null | null |
def is_leap(year):
if year % 4 == 0:
if year % 100 == 0:
if year % 400 == 0:
return True
else:
return False
else:
return True
else:
return False
def days_in_month(year, month):
if month > 12 or year < 1:
return "Invalid month"
month_days = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
if is_leap(year) and month == 2:
return 29
return month_days[month - 1]
year = int(input("Enter a year: "))
month = int(input("Enter a month: "))
days = days_in_month(year, month)
print(days)
| 15.5
| 64
| 0.611006
|
ceca4a0c56a579920356ec9fd749f2f292697e5f
| 5,928
|
py
|
Python
|
Past_experiments/rE2C2.py
|
abogdanova/FedMed
|
72f238c31b6714c664e1b0e40204f9528f764182
|
[
"MIT"
] | 5
|
2019-07-23T14:49:46.000Z
|
2022-03-30T13:54:22.000Z
|
Past_experiments/rE2C2.py
|
abogdanova/FedMed
|
72f238c31b6714c664e1b0e40204f9528f764182
|
[
"MIT"
] | null | null | null |
Past_experiments/rE2C2.py
|
abogdanova/FedMed
|
72f238c31b6714c664e1b0e40204f9528f764182
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import, division, print_function
import collections
import numpy as np
from six.moves import range
import tensorflow as tf
import datetime
from tensorflow_federated import python as tff
from tensorflow.python.keras.optimizer_v2 import gradient_descent
from tensorflow.keras import layers
tf.compat.v1.enable_v2_behavior()
EXP_CODE = 'rE1C2'
NUM_EXAMPLES_PER_USER = 2000
BATCH_SIZE = 32
USERS = 5
NUM_EPOCHS = 2
CLASSES = 10
WIDTH = 32
HEIGHT = 32
CHANNELS = 3
def mane():
""" Run program """
cifar_train, cifar_test = tf.keras.datasets.cifar10.load_data()
federated_train_data = [get_distributed(cifar_train, u, 'r') for u in range(USERS)]
federated_test_data = [get_distributed(cifar_test, u, 'r') for u in range(USERS)]
sample_batch = federated_train_data[1][-2]
def model_fn():
keras_model = create_compiled_keras_model()
return tff.learning.from_compiled_keras_model(keras_model, sample_batch)
iterative_process = tff.learning.build_federated_averaging_process(model_fn)
evaluation = tff.learning.build_federated_evaluation(model_fn)
state = iterative_process.initialize()
fd_test_accuracy = []
fd_test_loss = []
fd_train_loss = []
for round_num in range(50):
selected = np.random.choice(5, 2, replace=False)
state, metrics = iterative_process.next(state, list(np.array(federated_train_data)[selected]))
test_metrics = evaluation(state.model, federated_test_data)
fd_train_loss.append(metrics[1])
fd_test_loss.append(test_metrics.loss)
fd_test_accuracy.append(test_metrics.sparse_categorical_accuracy)
try:
with open('Log/Exp10/'+ EXP_CODE + '.txt', 'w') as log:
print(EXP_CODE + "Train = {}".format(fd_train_loss), file=log)
print(EXP_CODE + "Test = {}".format(fd_test_loss), file=log)
print(EXP_CODE + "Accuracy = {}".format(fd_test_accuracy), file=log)
except IOError:
print('File Error')
def get_indices_realistic(y, u):
# split dataset into arrays of each class label
all_indices = [i for i, d in enumerate(y)]
shares_arr = [4000, 2000, 2000, 1000, 1000]
user_indices = []
for u in range(USERS):
user_indices.append([all_indices.pop(0) for i in range(shares_arr[u])])
return user_indices
def get_indices_unbalanced(y):
# split dataset into arrays of each class label
indices_array = []
for c in range(CLASSES):
indices_array.append([i for i, d in enumerate(y) if d == c])
# each user will have 2 classes excluded from their data sets, thus 250 examples * remaining 8 classes
class_shares = 250
# store indices for future use
user_indices = []
# auxilary index array to pop out pairs of classes missing at each user
class_index = list(range(CLASSES))
for u in range(USERS):
columns_out = [class_index.pop(0) for i in range(2)]
selected_columns = set(range(CLASSES)) - set(columns_out)
starting_index = u*class_shares
user_indices.append(
np.array(indices_array)[list(selected_columns)].T[starting_index:starting_index + class_shares]
.flatten())
return user_indices
def get_indices_unbalanced_completely(y):
# split dataset into arrays of each class label
indices_array = []
for c in range(CLASSES):
indices_array.append([i for i, d in enumerate(y) if d == c])
class_shares = CLASSES // min(CLASSES, USERS)
user_indices = []
for u in range(USERS):
user_indices.append(
np.array(
[indices_array.pop(0)[:NUM_EXAMPLES_PER_USER//class_shares] for j in range(class_shares)])
.flatten())
return user_indices
def get_indices_even(y):
# split dataset into arrays of each class label
indices_array = []
for c in range(CLASSES):
indices_array.append([i for i, d in enumerate(y) if d == c])
user_indices = []
class_shares = NUM_EXAMPLES_PER_USER // CLASSES
# take even shares of each class for every user
for u in range(USERS):
starting_index = u*class_shares
user_indices.append(np.array(indices_array).T[starting_index:starting_index + class_shares].flatten())
return user_indices
def get_distributed(source, u, distribution):
if distribution == 'i':
indices = get_indices_even(source[1])[u]
elif distribution == 'n':
indices = get_indices_unbalanced(source[1])[u]
elif distribution == 'r':
indices = get_indices_realistic(source[1][:10000], u)[u]
else:
indices = get_indices_unbalanced_completely(source[1])[u]
output_sequence = []
for repeat in range(NUM_EPOCHS):
for i in range(0, len(indices), BATCH_SIZE):
batch_samples = indices[i:i + BATCH_SIZE]
output_sequence.append({
'x': np.array([source[0][b] / 255.0 for b in batch_samples], dtype=np.float32),
'y': np.array([source[1][b] for b in batch_samples], dtype=np.int32)})
return output_sequence
def create_compiled_keras_model():
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(32,(3, 3),
activation="tanh",
padding="same",
input_shape=(WIDTH, HEIGHT, CHANNELS)),
tf.keras.layers.MaxPooling2D(pool_size=(2,2)),
tf.keras.layers.Conv2D(64, (3, 3), activation="tanh", padding="same"),
tf.keras.layers.MaxPooling2D(pool_size=(2,2)),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation="tanh"),
tf.keras.layers.Dense(10, activation=tf.nn.softmax)])
def loss_fn(y_true, y_pred):
return tf.reduce_mean(tf.keras.losses.sparse_categorical_crossentropy(y_true, y_pred))
model.compile(loss=loss_fn, optimizer="adam", metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])
return model
if __name__ == "__main__":
mane()
| 35.497006
| 113
| 0.680499
|
0e5c25305b9840c02a659aacce8df686557a103c
| 4,798
|
py
|
Python
|
source/mutableSources32/warps/resources/filter_bank.py
|
nodesetc/vb.mi-dev
|
461ef0031d41818ff94e3d05e3e4b96d2f7f30a0
|
[
"MIT"
] | 47
|
2020-05-11T09:45:44.000Z
|
2022-03-17T22:12:53.000Z
|
source/mutableSources32/warps/resources/filter_bank.py
|
robtherich/vb.mi-dev
|
4497b5917ed9680a170d3c9b87ac34e525e65978
|
[
"MIT"
] | 2
|
2021-04-07T09:14:37.000Z
|
2022-01-25T09:00:07.000Z
|
source/mutableSources32/warps/resources/filter_bank.py
|
robtherich/vb.mi-dev
|
4497b5917ed9680a170d3c9b87ac34e525e65978
|
[
"MIT"
] | 6
|
2020-08-06T11:09:18.000Z
|
2021-12-10T14:37:02.000Z
|
#!/usr/bin/python2.5
#
# Copyright 2014 Emilie Gillet.
#
# Author: Emilie Gillet (emilie.o.gillet@gmail.com)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# See http://creativecommons.org/licenses/MIT/ for more information.
#
# -----------------------------------------------------------------------------
#
# Lookup table definitions.
import numpy
import pylab
import scipy.signal
def pole_pair_to_f_fq(pole_pair):
fq = 1 - pole_pair.prod()
f = -(2 - fq - (pole_pair.sum())) ** 0.5
return f.real, fq.real
def modified_chamberlin(f, fq, x, mode='bp'):
lp = 0.0
bp = 0.0
y = numpy.zeros(x.shape)
x_ = 0.0
coefficient = 1.0 if mode == 'bp' else 0.0
for i in xrange(len(y)):
lp += f * bp
bp += -fq * bp -f * lp + (x[i] + x_ * coefficient)
x_ = x[i]
if mode =='bp':
y[i] = fq * bp
elif mode == 'lp':
y[i] = f * lp
elif mode == 'hp':
y[i] = x_ - lp * f - bp * fq
return y
SAMPLE_RATE = 96000
IR_SIZE = 2048
sample_rates = [SAMPLE_RATE / 12] * 13
sample_rates += [SAMPLE_RATE / 3] * 6
sample_rates += [SAMPLE_RATE] * 1
num_bands = len(sample_rates)
interval = 2 ** (1 / 3.0)
first_frequency = 110 / interval
frequencies = first_frequency * (interval ** numpy.arange(0, num_bands))
filters = []
responses = {}
reconstruction = {}
generate_figures = __name__ == '__main__'
for index, (frequency, sr) in enumerate(zip(frequencies, sample_rates)):
if not sr in reconstruction:
reconstruction[sr] = [0.0, 0.0]
responses[sr] = []
frequency = frequency / (sr * 0.5)
if index == 0:
w = frequency
z, p, k = scipy.signal.cheby1(4, 0.5, w, 'lowpass', output='zpk')
svf_mode = 'lp'
gain = 1.0
elif index == num_bands - 1:
w = frequency
z, p, k = scipy.signal.cheby1(4, 0.25, w, 'highpass', output='zpk')
svf_mode = 'hp'
gain = 21 * frequency
else:
w = [frequency / (interval ** 0.5), frequency * (interval ** 0.5)]
z, p, k = scipy.signal.butter(2, w, 'bandpass', output='zpk')
svf_mode = 'bp'
gain = 0.25
# Filter using direct form
out = numpy.eye(IR_SIZE, 1).ravel()
b, a = scipy.signal.zpk2tf(z, p, k)
out = scipy.signal.lfilter(b, a, out)
out = scipy.signal.lfilter(b, a, out)
reconstruction[sr][0] += out
responses[sr] += [out]
# Filter using modified Chamberlin filter
out = numpy.eye(IR_SIZE, 1).ravel() * gain
coefficients = [0, 0, 0]
for i in xrange(2):
f, fq = pole_pair_to_f_fq(p[i*2:i*2 + 2])
out = modified_chamberlin(f, fq, out, svf_mode)
out = modified_chamberlin(f, fq, out, svf_mode)
coefficients += [f, fq]
delay = (numpy.arange(len(out)) * out * out).sum() / (out * out).sum()
# Completely empirical fixes to the delay to maximize the flatness of the
# total impulse response.
if index == num_bands - 1:
delay += 4
coefficients[0] = SAMPLE_RATE / sr
coefficients[1] = numpy.floor(delay)
coefficients[2] = gain
filters += [('%3.0f_%d' % (frequency * 0.5 * sr, sr), coefficients)]
reconstruction[sr][1] += out
if generate_figures:
pylab.figure(figsize=(20,8))
n = len(responses.keys())
for row, sr in enumerate(sorted(responses.keys())):
f = numpy.arange(IR_SIZE / 2 + 1) / float(IR_SIZE) * sr
for column, plots in enumerate([reconstruction[sr], responses[sr]]):
pylab.subplot(2, n, column * n + row + 1)
for r in plots:
sy = numpy.log10(numpy.abs(numpy.fft.rfft(r)) + 1e-20) * 20.0
pylab.semilogx(f, sy)
pylab.xlim(80, sr / 2)
pylab.ylim(-36, 12)
pylab.xlabel('Frequency (Hz)')
pylab.ylabel('Gain (dB)')
if len(plots) == 2:
pylab.ylim(-4, 3)
#pylab.legend(['Direct form', 'Chamberlin'])
pylab.savefig('filter_bank.pdf')
# pylab.show()
pylab.close()
| 29.617284
| 79
| 0.634223
|
e3823bf5df3bd15e3bb4b7c3c3e7b2d7620b849f
| 447
|
py
|
Python
|
01 List/03_search_insert_position.py
|
kmanadkat/leetcoding-09-21
|
ef353edddc55727ff371ff421d560a0a9298d6b1
|
[
"MIT"
] | null | null | null |
01 List/03_search_insert_position.py
|
kmanadkat/leetcoding-09-21
|
ef353edddc55727ff371ff421d560a0a9298d6b1
|
[
"MIT"
] | null | null | null |
01 List/03_search_insert_position.py
|
kmanadkat/leetcoding-09-21
|
ef353edddc55727ff371ff421d560a0a9298d6b1
|
[
"MIT"
] | null | null | null |
from typing import List
######################################################################
# Time Complexity: O(N)
# Space Complexity: O(1)
######################################################################
def searchInsert(nums: List[int], target: int) -> int:
numsLength = len(nums)
if numsLength == 0:
return 0
for i in range(numsLength):
if nums[i] >= target:
return i
return numsLength
| 24.833333
| 70
| 0.41387
|
33102ea542f4e26bf1f2cbc4b52d2d81205d4c9e
| 49
|
py
|
Python
|
django_u2f/__init__.py
|
d4rker/django-u2f
|
949bb965468370219ee55482d11ca439bccd0f2a
|
[
"BSD-2-Clause"
] | 164
|
2015-01-24T19:50:33.000Z
|
2022-02-11T14:01:13.000Z
|
django_u2f/__init__.py
|
d4rker/django-u2f
|
949bb965468370219ee55482d11ca439bccd0f2a
|
[
"BSD-2-Clause"
] | 30
|
2015-01-01T03:47:31.000Z
|
2022-01-14T20:16:32.000Z
|
django_u2f/__init__.py
|
d4rker/django-u2f
|
949bb965468370219ee55482d11ca439bccd0f2a
|
[
"BSD-2-Clause"
] | 38
|
2015-07-29T12:36:55.000Z
|
2022-01-13T22:39:37.000Z
|
default_app_config = 'django_u2f.apps.U2FConfig'
| 24.5
| 48
| 0.836735
|
a034bc047341b4b912258cfd0cba6a045bdf3176
| 818
|
py
|
Python
|
devpi_remote_user/main.py
|
Polyconseil/devpi-remote_user
|
d033dfce899be44ac8f59688d6f625c48c700012
|
[
"MIT"
] | 1
|
2021-01-12T12:51:28.000Z
|
2021-01-12T12:51:28.000Z
|
devpi_remote_user/main.py
|
Polyconseil/devpi-remote_user
|
d033dfce899be44ac8f59688d6f625c48c700012
|
[
"MIT"
] | null | null | null |
devpi_remote_user/main.py
|
Polyconseil/devpi-remote_user
|
d033dfce899be44ac8f59688d6f625c48c700012
|
[
"MIT"
] | 1
|
2015-10-17T14:25:19.000Z
|
2015-10-17T14:25:19.000Z
|
from devpi_server.log import threadlog
def devpiserver_get_credentials(request):
"""Search request for X-Remote-User header.
Returns a tuple with (X-Remote-User, '') if credentials could be
extracted, or None if no credentials were found.
The first plugin to return credentials is used, the order of plugin
calls is undefined.
"""
if 'X-Remote-User' in request.headers:
remote_user = request.headers['X-Remote-User']
threadlog.info("Found X-Remote-User in request: %s", remote_user)
return remote_user, ''
def devpiserver_auth_user(userdict, username, password):
"""Since we accept all remote_user, no password checks are needed."""
threadlog.info("devpi-remoteuser accepting user: %s", username)
return {'status': 'ok', 'groups': ['remote_user']}
| 35.565217
| 73
| 0.702934
|
e90ad8f414d550fb9c26e87d35e91aa9546355a4
| 3,501
|
py
|
Python
|
azure-iot-hub/samples/iothub_registry_manager_edge_device_sample.py
|
nabeelmsft/azure-iot-sdk-python
|
51fa810907373fd2134af49bd03d3977ca7a9a8d
|
[
"MIT"
] | null | null | null |
azure-iot-hub/samples/iothub_registry_manager_edge_device_sample.py
|
nabeelmsft/azure-iot-sdk-python
|
51fa810907373fd2134af49bd03d3977ca7a9a8d
|
[
"MIT"
] | null | null | null |
azure-iot-hub/samples/iothub_registry_manager_edge_device_sample.py
|
nabeelmsft/azure-iot-sdk-python
|
51fa810907373fd2134af49bd03d3977ca7a9a8d
|
[
"MIT"
] | null | null | null |
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import sys
import os
import msrest
import uuid
import base64
from azure.iot.hub import IoTHubRegistryManager
from azure.iot.hub.models import Twin, TwinProperties, DeviceCapabilities
iothub_connection_str = os.getenv("IOTHUB_CONNECTION_STRING")
device_id = os.getenv("IOTHUB_NEW_DEVICE_ID")
def print_device_info(title, iothub_device):
print(title + ":")
print("device_id = {0}".format(iothub_device.device_id))
print("authentication.type = {0}".format(iothub_device.authentication.type))
print("authentication.symmetric_key = {0}".format(iothub_device.authentication.symmetric_key))
print(
"authentication.x509_thumbprint = {0}".format(iothub_device.authentication.x509_thumbprint)
)
print("connection_state = {0}".format(iothub_device.connection_state))
print(
"connection_state_updated_tTime = {0}".format(iothub_device.connection_state_updated_time)
)
print(
"cloud_to_device_message_count = {0}".format(iothub_device.cloud_to_device_message_count)
)
print("device_scope = {0}".format(iothub_device.device_scope))
print("etag = {0}".format(iothub_device.etag))
print("generation_id = {0}".format(iothub_device.generation_id))
print("last_activity_time = {0}".format(iothub_device.last_activity_time))
print("status = {0}".format(iothub_device.status))
print("status_reason = {0}".format(iothub_device.status_reason))
print("status_updated_time = {0}".format(iothub_device.status_updated_time))
print("")
# This sample creates and iotEdge device with SAS authentication
# For other authentication types use the appropriate create and update APIs:
# X509:
# new_device = iothub_registry_manager.create_device_with_x509(device_id, primary_thumbprint, secondary_thumbprint, status, iot_edge)
# Certificate authority:
# new_device = iothub_registry_manager.create_device_with_certificate_authority(device_id, status, iot_edge)
try:
# Create IoTHubRegistryManager
iothub_registry_manager = IoTHubRegistryManager.from_connection_string(iothub_connection_str)
# Create a device
primary_key = base64.b64encode(str(uuid.uuid4()).encode()).decode()
secondary_key = base64.b64encode(str(uuid.uuid4()).encode()).decode()
device_state = "enabled"
iot_edge = True
new_device = iothub_registry_manager.create_device_with_sas(
device_id, primary_key, secondary_key, device_state, iot_edge
)
print_device_info("create_device", new_device)
# Get device twin
twin = iothub_registry_manager.get_twin(device_id)
print(twin)
print(twin.capabilities)
print("")
# Delete the device
iothub_registry_manager.delete_device(device_id)
except msrest.exceptions.HttpOperationError as ex:
print("HttpOperationError error {0}".format(ex.response.text))
except Exception as ex:
print("Unexpected error {0}".format(ex))
except KeyboardInterrupt:
print("{} stopped".format(__file__))
finally:
print("{} finished".format(__file__))
| 43.222222
| 139
| 0.684947
|
f75a60f1a2fe0eed85c4f87e6c7ec5642e5cfb8e
| 378
|
py
|
Python
|
backup/ya_disk.py
|
frosthamster/Music-manager
|
c9dbe8c8830960456fa37e1618a36cdf705453d8
|
[
"Unlicense"
] | null | null | null |
backup/ya_disk.py
|
frosthamster/Music-manager
|
c9dbe8c8830960456fa37e1618a36cdf705453d8
|
[
"Unlicense"
] | null | null | null |
backup/ya_disk.py
|
frosthamster/Music-manager
|
c9dbe8c8830960456fa37e1618a36cdf705453d8
|
[
"Unlicense"
] | null | null | null |
from YaDiskClient.YaDiskClient import YaDisk, YaDiskException
from .chunk_partitioner import ChunkPartitioner
class YaDiskWithProgress(YaDisk):
def upload(self, file, path):
resp = self._sendRequest("PUT", path, data=ChunkPartitioner(file, 'Uploading library'))
if resp.status_code != 201:
raise YaDiskException(resp.status_code, resp.content)
| 37.8
| 95
| 0.743386
|
1ed0b68a49ac79a56b0d844904830641c7d32cf7
| 111
|
py
|
Python
|
programmers-lecture/2.sorting/2.reverse_sort.py
|
khh180cm/algorithm
|
f40990479b6a4ba466073defcd6e8ca771c0b886
|
[
"MIT"
] | null | null | null |
programmers-lecture/2.sorting/2.reverse_sort.py
|
khh180cm/algorithm
|
f40990479b6a4ba466073defcd6e8ca771c0b886
|
[
"MIT"
] | null | null | null |
programmers-lecture/2.sorting/2.reverse_sort.py
|
khh180cm/algorithm
|
f40990479b6a4ba466073defcd6e8ca771c0b886
|
[
"MIT"
] | null | null | null |
"""
내림차순 정렬
"""
L = [3, 8, 2, 7, 6, 10, 9]
L2 = sorted(L, reverse=True)
print(L2)
print(L)
L.sort()
print(L)
| 9.25
| 28
| 0.531532
|
e355a68a1204042c08bf0d67205b75fff1baa2f0
| 1,582
|
py
|
Python
|
examples/look/look/images.py
|
RioAtHome/falcon
|
edd9352e630dbbb6272370281fc5fa6d792df057
|
[
"Apache-2.0"
] | 8,217
|
2015-03-06T19:30:57.000Z
|
2022-03-30T14:54:36.000Z
|
examples/look/look/images.py
|
vestigegroup/falcon
|
d1a8bbb1465130b87531b0202131e7ac338eb35a
|
[
"Apache-2.0"
] | 1,637
|
2015-03-06T21:27:17.000Z
|
2022-03-31T06:27:19.000Z
|
examples/look/look/images.py
|
vestigegroup/falcon
|
d1a8bbb1465130b87531b0202131e7ac338eb35a
|
[
"Apache-2.0"
] | 1,064
|
2015-03-07T15:32:24.000Z
|
2022-03-25T17:23:40.000Z
|
import io
import mimetypes
import os
import uuid
import falcon
import msgpack
class Resource:
def __init__(self, image_store):
self._image_store = image_store
def on_get(self, req, resp):
doc = {
'images': [
{
'href': '/images/1eaf6ef1-7f2d-4ecc-a8d5-6e8adba7cc0e.png',
},
],
}
resp.data = msgpack.packb(doc, use_bin_type=True)
resp.content_type = 'application/msgpack'
resp.status = falcon.HTTP_200
def on_post(self, req, resp):
name = self._image_store.save(req.stream, req.content_type)
resp.status = falcon.HTTP_201
resp.location = '/images/' + name
class ImageStore:
_CHUNK_SIZE_BYTES = 4096
# Note the use of dependency injection for standard library
# methods. We'll use these later to avoid monkey-patching.
def __init__(self, storage_path, uuidgen=uuid.uuid4, fopen=io.open):
self._storage_path = storage_path
self._uuidgen = uuidgen
self._fopen = fopen
def save(self, image_stream, image_content_type):
ext = mimetypes.guess_extension(image_content_type)
name = '{uuid}{ext}'.format(uuid=self._uuidgen(), ext=ext)
image_path = os.path.join(self._storage_path, name)
with self._fopen(image_path, 'wb') as image_file:
while True:
chunk = image_stream.read(self._CHUNK_SIZE_BYTES)
if not chunk:
break
image_file.write(chunk)
return name
| 27.275862
| 79
| 0.609987
|
19472c862215ab2bdd19d206cca87451bbab2afe
| 19,953
|
py
|
Python
|
kivy/network/urlrequest.py
|
RiiotLabs/kivy
|
71bd76b2d1eaf93d251f9bc9b2a5fe04306327e1
|
[
"MIT"
] | 2
|
2021-05-16T09:46:14.000Z
|
2021-11-17T11:23:15.000Z
|
kivy/network/urlrequest.py
|
RiiotLabs/kivy
|
71bd76b2d1eaf93d251f9bc9b2a5fe04306327e1
|
[
"MIT"
] | 1
|
2016-11-11T13:45:42.000Z
|
2016-11-11T13:45:42.000Z
|
kivy/network/urlrequest.py
|
RiiotLabs/kivy
|
71bd76b2d1eaf93d251f9bc9b2a5fe04306327e1
|
[
"MIT"
] | 2
|
2020-03-28T10:18:00.000Z
|
2021-02-13T06:34:14.000Z
|
'''
UrlRequest
==========
.. versionadded:: 1.0.8
You can use the :class:`UrlRequest` to make asynchronous requests on the
web and get the result when the request is completed. The spirit is the
same as the XHR object in Javascript.
The content is also decoded if the Content-Type is
application/json and the result automatically passed through json.loads.
The syntax to create a request::
from kivy.network.urlrequest import UrlRequest
req = UrlRequest(url, on_success, on_redirect, on_failure, on_error,
on_progress, req_body, req_headers, chunk_size,
timeout, method, decode, debug, file_path, ca_file,
verify)
Only the first argument is mandatory: the rest are optional.
By default, a "GET" request will be sent. If the :attr:`UrlRequest.req_body` is
not None, a "POST" request will be sent. It's up to you to adjust
:attr:`UrlRequest.req_headers` to suit your requirements and the response
to the request will be accessible as the parameter called "result" on
the callback function of the on_success event.
Example of fetching weather in Paris::
def got_weather(req, results):
for key, value in results['weather'][0].items():
print(key, ': ', value)
req = UrlRequest(
'http://api.openweathermap.org/data/2.5/weather?q=Paris,fr',
got_weather)
Example of Posting data (adapted from httplib example)::
import urllib
def bug_posted(req, result):
print('Our bug is posted !')
print(result)
params = urllib.urlencode({'@number': 12524, '@type': 'issue',
'@action': 'show'})
headers = {'Content-type': 'application/x-www-form-urlencoded',
'Accept': 'text/plain'}
req = UrlRequest('bugs.python.org', on_success=bug_posted, req_body=params,
req_headers=headers)
If you want a synchronous request, you can call the wait() method.
'''
from collections import deque
from threading import Thread
from json import loads
from time import sleep
from kivy.compat import PY2
if PY2:
from httplib import HTTPConnection
from urlparse import urlparse, urlunparse
else:
from http.client import HTTPConnection
from urllib.parse import urlparse, urlunparse
try:
import ssl
HTTPSConnection = None
if PY2:
from httplib import HTTPSConnection
else:
from http.client import HTTPSConnection
except ImportError:
# depending the platform, if openssl support wasn't compiled before python,
# this class is not available.
pass
from kivy.clock import Clock
from kivy.weakmethod import WeakMethod
from kivy.logger import Logger
# list to save UrlRequest and prevent GC on un-referenced objects
g_requests = []
class UrlRequest(Thread):
'''A UrlRequest. See module documentation for usage.
.. versionchanged:: 1.5.1
Add `debug` parameter
.. versionchanged:: 1.0.10
Add `method` parameter
.. versionchanged:: 1.8.0
Parameter `decode` added.
Parameter `file_path` added.
Parameter `on_redirect` added.
Parameter `on_failure` added.
.. versionchanged:: 1.9.1
Parameter `ca_file` added.
Parameter `verify` added.
.. versionchanged:: 1.9.2
Parameters `proxy_host`, `proxy_port` and `proxy_headers` added.
:Parameters:
`url`: str
Complete url string to call.
`on_success`: callback(request, result)
Callback function to call when the result has been fetched.
`on_redirect`: callback(request, result)
Callback function to call if the server returns a Redirect.
`on_failure`: callback(request, result)
Callback function to call if the server returns a Client or
Server Error.
`on_error`: callback(request, error)
Callback function to call if an error occurs.
`on_progress`: callback(request, current_size, total_size)
Callback function that will be called to report progression of the
download. `total_size` might be -1 if no Content-Length has been
reported in the http response.
This callback will be called after each `chunk_size` is read.
`req_body`: str, defaults to None
Data to sent in the request. If it's not None, a POST will be done
instead of a GET.
`req_headers`: dict, defaults to None
Custom headers to add to the request.
`chunk_size`: int, defaults to 8192
Size of each chunk to read, used only when `on_progress` callback
has been set. If you decrease it too much, a lot of on_progress
callbacks will be fired and will slow down your download. If you
want to have the maximum download speed, increase the chunk_size
or don't use ``on_progress``.
`timeout`: int, defaults to None
If set, blocking operations will timeout after this many seconds.
`method`: str, defaults to 'GET' (or 'POST' if ``body`` is specified)
The HTTP method to use.
`decode`: bool, defaults to True
If False, skip decoding of the response.
`debug`: bool, defaults to False
If True, it will use the Logger.debug to print information
about url access/progression/errors.
`file_path`: str, defaults to None
If set, the result of the UrlRequest will be written to this path
instead of in memory.
`ca_file`: str, defaults to None
Indicates a SSL CA certificate file path to validate HTTPS
certificates against
`verify`: bool, defaults to True
If False, disables SSL CA certificate verification
`proxy_host`: str, defaults to None
If set, the proxy host to use for this connection.
`proxy_port`: int, defaults to None
If set, and `proxy_host` is also set, the port to use for
connecting to the proxy server.
`proxy_headers`: dict, defaults to None
If set, and `proxy_host` is also set, the headers to send to the
proxy server in the ``CONNECT`` request.
'''
def __init__(self, url, on_success=None, on_redirect=None,
on_failure=None, on_error=None, on_progress=None,
req_body=None, req_headers=None, chunk_size=8192,
timeout=None, method=None, decode=True, debug=False,
file_path=None, ca_file=None, verify=True, proxy_host=None,
proxy_port=None, proxy_headers=None):
super(UrlRequest, self).__init__()
self._queue = deque()
self._trigger_result = Clock.create_trigger(self._dispatch_result, 0)
self.daemon = True
self.on_success = WeakMethod(on_success) if on_success else None
self.on_redirect = WeakMethod(on_redirect) if on_redirect else None
self.on_failure = WeakMethod(on_failure) if on_failure else None
self.on_error = WeakMethod(on_error) if on_error else None
self.on_progress = WeakMethod(on_progress) if on_progress else None
self.decode = decode
self.file_path = file_path
self._debug = debug
self._result = None
self._error = None
self._is_finished = False
self._resp_status = None
self._resp_headers = None
self._resp_length = -1
self._chunk_size = chunk_size
self._timeout = timeout
self._method = method
self.ca_file = ca_file
self.verify = verify
self._proxy_host = proxy_host
self._proxy_port = proxy_port
self._proxy_headers = proxy_headers
#: Url of the request
self.url = url
#: Request body passed in __init__
self.req_body = req_body
#: Request headers passed in __init__
self.req_headers = req_headers
# save our request to prevent GC
g_requests.append(self)
self.start()
def run(self):
q = self._queue.appendleft
url = self.url
req_body = self.req_body
req_headers = self.req_headers
try:
result, resp = self._fetch_url(url, req_body, req_headers, q)
if self.decode:
result = self.decode_result(result, resp)
except Exception as e:
q(('error', None, e))
else:
q(('success', resp, result))
# using trigger can result in a missed on_success event
self._trigger_result()
# clean ourself when the queue is empty
while len(self._queue):
sleep(.1)
self._trigger_result()
# ok, authorize the GC to clean us.
if self in g_requests:
g_requests.remove(self)
def _fetch_url(self, url, body, headers, q):
# Parse and fetch the current url
trigger = self._trigger_result
chunk_size = self._chunk_size
report_progress = self.on_progress is not None
timeout = self._timeout
file_path = self.file_path
ca_file = self.ca_file
verify = self.verify
if self._debug:
Logger.debug('UrlRequest: {0} Fetch url <{1}>'.format(
id(self), url))
Logger.debug('UrlRequest: {0} - body: {1}'.format(
id(self), body))
Logger.debug('UrlRequest: {0} - headers: {1}'.format(
id(self), headers))
# parse url
parse = urlparse(url)
# translate scheme to connection class
cls = self.get_connection_for_scheme(parse.scheme)
# correctly determine host/port
port = None
host = parse.netloc.split(':')
if len(host) > 1:
port = int(host[1])
host = host[0]
# reconstruct path to pass on the request
path = parse.path
if parse.params:
path += ';' + parse.params
if parse.query:
path += '?' + parse.query
if parse.fragment:
path += '#' + parse.fragment
# create connection instance
args = {}
if timeout is not None:
args['timeout'] = timeout
if ca_file is not None and hasattr(ssl, 'create_default_context'):
ctx = ssl.create_default_context(cafile=ca_file)
ctx.verify_mode = ssl.CERT_REQUIRED
args['context'] = ctx
if not verify and parse.scheme == 'https' and (
hasattr(ssl, 'create_default_context')):
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
args['context'] = ctx
if self._proxy_host:
Logger.debug('UrlRequest: {0} - proxy via {1}:{2}'.format(
id(self), self._proxy_host, self._proxy_port
))
req = cls(self._proxy_host, self._proxy_port, **args)
if parse.scheme == 'https':
req.set_tunnel(host, port, self._proxy_headers)
else:
path = urlunparse(parse)
else:
req = cls(host, port, **args)
# send request
method = self._method
if method is None:
method = 'GET' if body is None else 'POST'
req.request(method, path, body, headers or {})
# read header
resp = req.getresponse()
# read content
if report_progress or file_path is not None:
try:
total_size = int(resp.getheader('content-length'))
except:
total_size = -1
# before starting the download, send a fake progress to permit the
# user to initialize his ui
if report_progress:
q(('progress', resp, (0, total_size)))
def get_chunks(fd=None):
bytes_so_far = 0
result = b''
while 1:
chunk = resp.read(chunk_size)
if not chunk:
break
if fd:
fd.write(chunk)
else:
result += chunk
bytes_so_far += len(chunk)
# report progress to user
if report_progress:
q(('progress', resp, (bytes_so_far, total_size)))
trigger()
return bytes_so_far, result
if file_path is not None:
with open(file_path, 'wb') as fd:
bytes_so_far, result = get_chunks(fd)
else:
bytes_so_far, result = get_chunks()
# ensure that results are dispatched for the last chunk,
# avoid trigger
if report_progress:
q(('progress', resp, (bytes_so_far, total_size)))
trigger()
else:
result = resp.read()
try:
if isinstance(result, bytes):
result = result.decode('utf-8')
except UnicodeDecodeError:
# if it's an image? decoding would not work
pass
req.close()
# return everything
return result, resp
def get_connection_for_scheme(self, scheme):
'''Return the Connection class for a particular scheme.
This is an internal function that can be expanded to support custom
schemes.
Actual supported schemes: http, https.
'''
if scheme == 'http':
return HTTPConnection
elif scheme == 'https' and HTTPSConnection is not None:
return HTTPSConnection
else:
raise Exception('No class for scheme %s' % scheme)
def decode_result(self, result, resp):
'''Decode the result fetched from url according to his Content-Type.
Currently supports only application/json.
'''
# Entry to decode url from the content type.
# For example, if the content type is a json, it will be automatically
# decoded.
content_type = resp.getheader('Content-Type', None)
if content_type is not None:
ct = content_type.split(';')[0]
if ct == 'application/json':
if isinstance(result, bytes):
result = result.decode('utf-8')
try:
return loads(result)
except:
return result
return result
def _dispatch_result(self, dt):
while True:
# Read the result pushed on the queue, and dispatch to the client
try:
result, resp, data = self._queue.pop()
except IndexError:
return
if resp:
# XXX usage of dict can be dangerous if multiple headers
# are set even if it's invalid. But it look like it's ok
# ? http://stackoverflow.com/questions/2454494/..
# ..urllib2-multiple-set-cookie-headers-in-response
self._resp_headers = dict(resp.getheaders())
self._resp_status = resp.status
if result == 'success':
status_class = resp.status // 100
if status_class in (1, 2):
if self._debug:
Logger.debug('UrlRequest: {0} Download finished with'
' {1} datalen'.format(id(self),
len(data)))
self._is_finished = True
self._result = data
if self.on_success:
func = self.on_success()
if func:
func(self, data)
elif status_class == 3:
if self._debug:
Logger.debug('UrlRequest: {} Download '
'redirected'.format(id(self)))
self._is_finished = True
self._result = data
if self.on_redirect:
func = self.on_redirect()
if func:
func(self, data)
elif status_class in (4, 5):
if self._debug:
Logger.debug('UrlRequest: {} Download failed with '
'http error {}'.format(id(self),
resp.status))
self._is_finished = True
self._result = data
if self.on_failure:
func = self.on_failure()
if func:
func(self, data)
elif result == 'error':
if self._debug:
Logger.debug('UrlRequest: {0} Download error '
'<{1}>'.format(id(self), data))
self._is_finished = True
self._error = data
if self.on_error:
func = self.on_error()
if func:
func(self, data)
elif result == 'progress':
if self._debug:
Logger.debug('UrlRequest: {0} Download progress '
'{1}'.format(id(self), data))
if self.on_progress:
func = self.on_progress()
if func:
func(self, data[0], data[1])
else:
assert(0)
@property
def is_finished(self):
'''Return True if the request has finished, whether it's a
success or a failure.
'''
return self._is_finished
@property
def result(self):
'''Return the result of the request.
This value is not determined until the request is finished.
'''
return self._result
@property
def resp_headers(self):
'''If the request has been completed, return a dictionary containing
the headers of the response. Otherwise, it will return None.
'''
return self._resp_headers
@property
def resp_status(self):
'''Return the status code of the response if the request is complete,
otherwise return None.
'''
return self._resp_status
@property
def error(self):
'''Return the error of the request.
This value is not determined until the request is completed.
'''
return self._error
@property
def chunk_size(self):
'''Return the size of a chunk, used only in "progress" mode (when
on_progress callback is set.)
'''
return self._chunk_size
def wait(self, delay=0.5):
'''Wait for the request to finish (until :attr:`resp_status` is not
None)
.. note::
This method is intended to be used in the main thread, and the
callback will be dispatched from the same thread
from which you're calling.
.. versionadded:: 1.1.0
'''
while self.resp_status is None:
self._dispatch_result(delay)
sleep(delay)
if __name__ == '__main__':
from pprint import pprint
def on_success(req, result):
pprint('Got the result:')
pprint(result)
def on_error(req, error):
pprint('Got an error:')
pprint(error)
req = UrlRequest('https://en.wikipedia.org/w/api.php?format'
'=json&action=query&titles=Kivy&prop=revisions&rvprop=content',
on_success, on_error)
while not req.is_finished:
sleep(1)
Clock.tick()
print('result =', req.result)
print('error =', req.error)
| 34.943958
| 79
| 0.564176
|
6d7089fdd4c52573d2d02b0d4de69c3e649d169f
| 11,328
|
py
|
Python
|
diff_gpmp2/datasets/generate_optimal_paths_gpmp2.py
|
mhmukadam/dgpmp2
|
b55c10f2a2fcff2cbf0e3291e0fff97fefa86661
|
[
"BSD-3-Clause"
] | 31
|
2020-08-12T19:07:29.000Z
|
2022-02-22T10:52:53.000Z
|
diff_gpmp2/datasets/generate_optimal_paths_gpmp2.py
|
mhmukadam/dgpmp2
|
b55c10f2a2fcff2cbf0e3291e0fff97fefa86661
|
[
"BSD-3-Clause"
] | null | null | null |
diff_gpmp2/datasets/generate_optimal_paths_gpmp2.py
|
mhmukadam/dgpmp2
|
b55c10f2a2fcff2cbf0e3291e0fff97fefa86661
|
[
"BSD-3-Clause"
] | 7
|
2020-08-14T14:41:39.000Z
|
2021-11-03T08:17:09.000Z
|
#!/usr/bin/env python
import os, sys
sys.path.insert(0, "..")
import matplotlib.pyplot as plt
import numpy as np
import time
import torch
import yaml
from ompl import base as ob
from ompl import geometric as og
from diff_gpmp2.env.env_2d import Env2D
from diff_gpmp2.robot_models import PointRobot2D
from diff_gpmp2.gpmp2 import GPMP2Planner, DiffGPMP2Planner
from diff_gpmp2.ompl_rrtstar import RRTStar
from diff_gpmp2.utils.planner_utils import path_to_traj_avg_vel
use_cuda = False
np.set_printoptions(threshold=np.nan, linewidth=np.inf)
torch.set_default_tensor_type(torch.DoubleTensor)
use_cuda = torch.cuda.is_available() if use_cuda else False
device = torch.device('cuda') if use_cuda else torch.device('cpu')
def load_prob_params(param_file, robot_file, env_file):
with open(param_file, 'r') as fp:
planner_data = yaml.load(fp)
planner_params = planner_data['gpmp2']['planner_params']
gp_params = planner_data['gpmp2']['gp_params']
obs_params = planner_data['gpmp2']['obs_params']
optim_params = planner_data['gpmp2']['optim_params']
with open(env_file, 'r') as fp:
env_data = yaml.load(fp)
with open(robot_file, 'r') as fp:
robot_data = yaml.load(fp)
gp_params['Q_c_inv'] = torch.tensor(gp_params['Q_c_inv'])
gp_params['K_s'] = torch.tensor(gp_params['K_s'])
gp_params['K_g'] = torch.tensor(gp_params['K_g'])
obs_params['cost_sigma'] = torch.tensor(obs_params['cost_sigma'])
obs_params['epsilon_dist'] = torch.tensor(obs_params['epsilon_dist'])
robot_data['sphere_radius'] = torch.tensor(robot_data['sphere_radius'])
print planner_params
print gp_params['Q_c_inv']
print env_data
print optim_params
print obs_params
return env_data, planner_params, gp_params, obs_params, optim_params, robot_data
def get_random_2d_confs(x_lims, y_lims, env, eps):
is_feas_start = False
is_feas_goal = False
min_dist_achieved = False
lbx = x_lims[0] + 0.5
lby = y_lims[0] + 0.5
ubx = x_lims[1] - 0.5
uby = y_lims[1] - 0.5
max_d = np.linalg.norm(np.array([ubx, uby]) - np.array([lbx, lby]))
while not is_feas_start:
start_x = lbx + torch.rand(1).item() * (ubx - lbx)
start_y = lby + torch.rand(1).item() * (uby - lby)
start_conf = torch.tensor([[start_x, start_y]], device=device)
is_feas_start = env.is_feasible(start_conf[0], eps)
num_tries = 0
while (not is_feas_goal) or (not min_dist_achieved):
goal_x = lbx + torch.rand(1).item() * (ubx - lbx)
goal_y = lby + torch.rand(1).item() * (uby - lby)
goal_conf = torch.tensor([[goal_x, goal_y ]], device=device)
is_feas_goal = env.is_feasible(goal_conf[0], eps)
if is_feas_goal:
dist = torch.norm(goal_conf - start_conf)
if dist.item() >= 0.6*max_d or num_tries > 15:
min_dist_achieved = True
else:
num_tries += 1
return start_conf, goal_conf
def straight_line_traj(start_conf, goal_conf, num_inter_states, state_dim):
start_vel = torch.tensor([[0., 0.]], device=device)
goal_vel = torch.tensor([[0., 0.]], device=device)
avg_vel = (goal_conf - start_conf)/num_inter_states
start = torch.cat((start_conf, start_vel), dim=1)
goal = torch.cat((goal_conf, goal_vel), dim=1)
th_init = torch.zeros((int(num_inter_states)+1, state_dim), device=device) #Straight line at constant velocity
for i in range(int(num_inter_states)+1):
th_init[i, 0:2] = start_conf*(num_inter_states - i)*1./num_inter_states*1. + goal_conf * i*1./num_inter_states*1. #+ np.array([0., 5.0])
th_init[i, 2:4] = avg_vel
return start, goal, th_init
def rrt_star_traj(start_conf, goal_conf, env_params, env, robot, planner_params, obs_params):
#RRTstar setup
space = ob.RealVectorStateSpace(2)
bounds = ob.RealVectorBounds(2)
bounds.setLow(env_params['x_lims'][0])
bounds.setHigh(env_params['x_lims'][1])
space.setBounds(bounds)
init_planner = RRTStar(space, bounds, env, robot, planner_params, obs_params)
init_path = init_planner.plan(start_conf, goal_conf, 4.0)
th_init = path_to_traj_avg_vel(init_path, planner_params['total_time_sec'], planner_params['dof'], device)
start_vel = torch.tensor([[0., 0.]], device=device)
goal_vel = torch.tensor([[0., 0.]], device=device)
start = torch.cat((start_conf, start_vel), dim=1)
goal = torch.cat((goal_conf, goal_vel), dim=1)
return start, goal, th_init
def is_feas_traj(th, env, eps):
feas_traj = True
for i in range(th.shape[0]):
if not env.is_feasible(th[i, 0:2], eps):
feas_traj=False
break
return feas_traj
def generate_start_goal(env_params, planner_params, ndims, num, env, robot, obs_params, rrt_star_init=False, fix_start_goal=False):
if ndims == 2:
x_lims = env_params['x_lims']
y_lims = env_params['y_lims']
eps = robot.get_sphere_radii()[0] + obs_params['epsilon_dist'] + 0.1
if num == 0 or fix_start_goal:
#Sample random diagonal problem
if fix_start_goal: rand_diag = 0
else: rand_diag = np.random.randint(0,4)
print rand_diag
start_noise = [0.0,0.0]#np.abs(np.random.normal(size=2))
goal_noise = [0.0,0.0]#np.abs(np.random.normal(size=2))
if rand_diag == 0:
start_conf = torch.tensor([[x_lims[0] + 0.2 + start_noise[0], y_lims[0] + 0.2+start_noise[1]]], device=device)
goal_conf = torch.tensor([[x_lims[1] - 0.2 - goal_noise[0], y_lims[1] - 0.2 - goal_noise[1]]], device=device)
elif rand_diag == 1:
start_conf = torch.tensor([[x_lims[1] - 0.2 - start_noise[0], y_lims[1] - 0.2 - start_noise[1]]], device=device)
goal_conf = torch.tensor([[x_lims[0] + 0.2 + goal_noise[0], y_lims[0] + 0.2 + goal_noise[1]]], device=device)
elif rand_diag == 2:
start_conf = torch.tensor([[x_lims[1] - 0.2 - start_noise[0], y_lims[0] + 0.2 + start_noise[1]]], device=device)
goal_conf = torch.tensor([[x_lims[0] + 0.2 + goal_noise[0], y_lims[1] - 0.2 - goal_noise[1]]], device=device)
elif rand_diag == 3:
start_conf = torch.tensor([[x_lims[0] + 0.2 + start_noise[0], y_lims[1] - 0.2 - start_noise[1]]], device=device)
goal_conf = torch.tensor([[x_lims[1] - 0.2 - goal_noise[0], y_lims[0] + 0.2 + goal_noise[1]]], device=device)
if not env.is_feasible(start_conf[0], eps) or not env.is_feasible(goal_conf[0], eps):
start_conf, goal_conf = get_random_2d_confs(x_lims, y_lims, env, eps.item())
else:
#Choose randomly
start_conf, goal_conf = get_random_2d_confs(x_lims, y_lims, env, eps.item())
#Generate initial trajectory once you have configuration
if not rrt_star_init:
start, goal, th_init = straight_line_traj(start_conf, goal_conf, planner_params['total_time_step'], planner_params['state_dim'])
else:
start, goal, th_init = rrt_star_traj(start_conf, goal_conf, env_params, env, robot, planner_params, obs_params)
return start, goal, th_init
def generate_trajs_and_save(folder, num_envs, probs_per_env, env_data, planner_params, gp_params, obs_params, optim_params, robot_data, out_folder_name, rrt_star_init=False, fix_start_goal=False):
for i in xrange(num_envs):
if env_data['dim'] == 2:
env_params = {'x_lims': env_data['x_lims'], 'y_lims': env_data['y_lims']}
env = Env2D(env_params)
if robot_data['type'] == 'point_robot':
robot = PointRobot2D(robot_data['sphere_radius'])
# print robot.get_sphere_radii()
im = plt.imread(folder + "/im_sdf/" + str(i) + "_im.png")
sdf = np.load(folder + "/im_sdf/" + str(i) + "_sdf.npy")
env.initialize_from_image(im, sdf)
imp = torch.tensor(im, device=device)
sdfp = torch.tensor(sdf, device=device)
for j in xrange(probs_per_env):
planner = DiffGPMP2Planner(gp_params, obs_params, planner_params, optim_params, env_params, robot, use_cuda=use_cuda)
start, goal, th_init = generate_start_goal(env_params, planner_params, env_data['dim'], j, env, robot, obs_params, rrt_star_init, fix_start_goal)
th_final,_, err_init, err_final, err_per_iter, err_ext_per_iter, k, time_taken = \
planner.forward(th_init.unsqueeze(0), start.unsqueeze(0), goal.unsqueeze(0), imp.unsqueeze(0).unsqueeze(0), sdfp.unsqueeze(0).unsqueeze(0))
print('Num iterations = %d, Time taken %f'%(k[0], time_taken[0]))
path_init = []
path_final = []
start_np = start.cpu().detach().numpy()[0]
goal_np = goal.cpu().detach().numpy()[0]
th_init_np = th_init.cpu().detach().numpy()
th_final_np = th_final[0].cpu().detach().numpy()
out_folder = os.path.join(folder, out_folder_name)
if not os.path.exists(out_folder):
os.makedirs(out_folder)
out_path = out_folder + "/" + "env_" + str(i) + "_prob_" + str(j)
np.savez(out_path, start=start_np, goal=goal_np, th_opt=th_final_np)
print('Saving meta data')
with open(os.path.join(folder, "meta.yaml"), 'w') as fp:
d = {'num_envs': num_envs,
'probs_per_env': probs_per_env,
'env_params': env_params,
'im_size': args.im_size}
yaml.dump(d, fp)
def generate_opt_trajs(args):
np.random.seed(args.seed_val)
torch.manual_seed(args.seed_val)
data_folder = os.path.abspath(args.data_folder)
param_file = os.path.join(data_folder, 'gpmp2_params.yaml')
robot_file = os.path.join(data_folder, "robot.yaml")
env_file = os.path.join(data_folder, "env_params.yaml")
train_folder = os.path.join(data_folder, "train")
test_folder = os.path.join(data_folder, "test")
out_folder_name = "opt_trajs_gpmp2"
env_data, planner_params, gp_params, obs_params, optim_params, robot_data = load_prob_params(param_file, robot_file, env_file)
#generate training environments
if args.train:
generate_trajs_and_save(train_folder, args.num_train_envs, args.probs_per_env, env_data, planner_params, gp_params, obs_params, optim_params, robot_data, out_folder_name, args.rrt_star_init, args.fix_start_goal)
if args.test:
generate_trajs_and_save(test_folder, args.num_test_envs, args.probs_per_env, env_data, planner_params, gp_params, obs_params, optim_params, robot_data, out_folder_name, args.rrt_star_init, args.fix_start_goal)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--data_folder', type=str, required=True, help="Relative path of output folder", default='.')
parser.add_argument('--num_train_envs', type=int, help='Number of environments.')
parser.add_argument('--num_test_envs', type=int, help='Number of environments.')
parser.add_argument('--im_size', type=int, required=True, help='Size of dataset images')
parser.add_argument('--probs_per_env', type=int, required=True, help='Number of planning problems per environment')
parser.add_argument('--seed_val', type=int, default=0, help='Random seed for generating dataset')
parser.add_argument('--rrt_star_init', action='store_true', help='Generate initial trajectory using rrtstar')
parser.add_argument('--train', action='store_true', help='Generate training data')
parser.add_argument('--test', action='store_true', help='Generate test data')
parser.add_argument('--fix_start_goal', action='store_true', help='Fix start and goal for all problems')
args = parser.parse_args()
generate_opt_trajs(args)
| 46.809917
| 215
| 0.697652
|
84f7829c0dbf61ab7e381afed3836b9833856264
| 6,568
|
py
|
Python
|
ironicclient/v1/chassis.py
|
NaohiroTamura/python-ironicclient
|
d23c02f64f977acc593e320d4cd5c65d327f19d4
|
[
"Apache-2.0"
] | null | null | null |
ironicclient/v1/chassis.py
|
NaohiroTamura/python-ironicclient
|
d23c02f64f977acc593e320d4cd5c65d327f19d4
|
[
"Apache-2.0"
] | null | null | null |
ironicclient/v1/chassis.py
|
NaohiroTamura/python-ironicclient
|
d23c02f64f977acc593e320d4cd5c65d327f19d4
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright © 2013 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ironicclient.common import base
from ironicclient.common.i18n import _
from ironicclient.common import utils
from ironicclient import exc
class Chassis(base.Resource):
def __repr__(self):
return "<Chassis %s>" % self._info
class ChassisManager(base.CreateManager):
resource_class = Chassis
_resource_name = 'chassis'
_creation_attributes = ['description', 'extra', 'uuid']
def list(self, marker=None, limit=None, sort_key=None,
sort_dir=None, detail=False, fields=None):
"""Retrieve a list of chassis.
:param marker: Optional, the UUID of a chassis, eg the last
chassis from a previous result set. Return
the next result set.
:param limit: The maximum number of results to return per
request, if:
1) limit > 0, the maximum number of chassis to return.
2) limit == 0, return the entire list of chassis.
3) limit param is NOT specified (None), the number of items
returned respect the maximum imposed by the Ironic API
(see Ironic's api.max_limit option).
:param sort_key: Optional, field used for sorting.
:param sort_dir: Optional, direction of sorting, either 'asc' (the
default) or 'desc'.
:param detail: Optional, boolean whether to return detailed information
about chassis.
:param fields: Optional, a list with a specified set of fields
of the resource to be returned. Can not be used
when 'detail' is set.
:returns: A list of chassis.
"""
if limit is not None:
limit = int(limit)
if detail and fields:
raise exc.InvalidAttribute(_("Can't fetch a subset of fields "
"with 'detail' set"))
filters = utils.common_filters(marker, limit, sort_key, sort_dir,
fields)
path = ''
if detail:
path += 'detail'
if filters:
path += '?' + '&'.join(filters)
if limit is None:
return self._list(self._path(path), "chassis")
else:
return self._list_pagination(self._path(path), "chassis",
limit=limit)
def list_nodes(self, chassis_id, marker=None, limit=None,
sort_key=None, sort_dir=None, detail=False, fields=None,
associated=None, maintenance=None, provision_state=None):
"""List all the nodes for a given chassis.
:param chassis_id: The UUID of the chassis.
:param marker: Optional, the UUID of a node, eg the last
node from a previous result set. Return
the next result set.
:param limit: The maximum number of results to return per
request, if:
1) limit > 0, the maximum number of nodes to return.
2) limit == 0, return the entire list of nodes.
3) limit param is NOT specified (None), the number of items
returned respect the maximum imposed by the Ironic API
(see Ironic's api.max_limit option).
:param sort_key: Optional, field used for sorting.
:param sort_dir: Optional, direction of sorting, either 'asc' (the
default) or 'desc'.
:param detail: Optional, boolean whether to return detailed information
about nodes.
:param fields: Optional, a list with a specified set of fields
of the resource to be returned. Can not be used
when 'detail' is set.
:param associated: Optional. Either a Boolean or a string
representation of a Boolean that indicates whether
to return a list of associated (True or "True") or
unassociated (False or "False") nodes.
:param maintenance: Optional. Either a Boolean or a string
representation of a Boolean that indicates whether
to return nodes in maintenance mode (True or
"True"), or not in maintenance mode (False or
"False").
:param provision_state: Optional. String value to get only nodes in
that provision state.
:returns: A list of nodes.
"""
if limit is not None:
limit = int(limit)
if detail and fields:
raise exc.InvalidAttribute(_("Can't fetch a subset of fields "
"with 'detail' set"))
filters = utils.common_filters(marker, limit, sort_key, sort_dir,
fields)
if associated is not None:
filters.append('associated=%s' % associated)
if maintenance is not None:
filters.append('maintenance=%s' % maintenance)
if provision_state is not None:
filters.append('provision_state=%s' % provision_state)
path = "%s/nodes" % chassis_id
if detail:
path += '/detail'
if filters:
path += '?' + '&'.join(filters)
if limit is None:
return self._list(self._path(path), "nodes")
else:
return self._list_pagination(self._path(path), "nodes",
limit=limit)
def get(self, chassis_id, fields=None):
return self._get(resource_id=chassis_id, fields=fields)
def delete(self, chassis_id):
return self._delete(resource_id=chassis_id)
def update(self, chassis_id, patch):
return self._update(resource_id=chassis_id, patch=patch)
| 38.409357
| 79
| 0.573843
|
b855f8c4c57afa45864c5aa6a988b54d94fe906c
| 30,683
|
py
|
Python
|
test/test_sparse_csr.py
|
pytorch/pytorch
|
cbd7bac914f8bb650291c1e1d8d240e01dba45c4
|
[
"Intel"
] | 60,067
|
2017-01-18T17:21:31.000Z
|
2022-03-31T21:37:45.000Z
|
test/test_sparse_csr.py
|
pytorch/pytorch
|
cbd7bac914f8bb650291c1e1d8d240e01dba45c4
|
[
"Intel"
] | 66,955
|
2017-01-18T17:21:38.000Z
|
2022-03-31T23:56:11.000Z
|
test/test_sparse_csr.py
|
pytorch/pytorch
|
cbd7bac914f8bb650291c1e1d8d240e01dba45c4
|
[
"Intel"
] | 19,210
|
2017-01-18T17:45:04.000Z
|
2022-03-31T23:51:56.000Z
|
import torch
import warnings
import unittest
import random
import itertools
from torch.testing import get_all_complex_dtypes, get_all_fp_dtypes, make_tensor
from torch.testing._internal.common_cuda import SM53OrLater, SM80OrLater, TEST_CUSPARSE_GENERIC
from torch.testing._internal.common_utils import \
(IS_MACOS, IS_WINDOWS, TestCase, run_tests, load_tests, coalescedonoff)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, skipCUDAIfNoCusparseGeneric,
precisionOverride)
from torch.testing._internal.common_dtype import floating_types, get_all_dtypes
# load_tests from torch.testing._internal.common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings
load_tests = load_tests
class TestSparseCSRSampler(TestCase):
def test_make_crow_indices(self):
# Here we test the correctness of the crow_indices algorithm
# and testing it on CPU and with int32 dtype will be
# sufficient.
device = torch.device('cpu')
index_dtype = torch.int32
for n_rows in range(1, 10):
for n_cols in range(1, 10):
for nnz in range(0, n_rows * n_cols + 1):
crow_indices = self._make_crow_indices(
n_rows, n_cols, nnz,
device=device, dtype=index_dtype)
self.assertEqual(len(crow_indices), n_rows + 1)
counts = crow_indices[1:] - crow_indices[:-1]
self.assertEqual(counts.sum(), nnz)
self.assertGreaterEqual(counts.min(), 0)
self.assertLessEqual(counts.max(), n_cols)
class TestSparseCSR(TestCase):
@onlyCPU
def test_csr_layout(self):
self.assertEqual(str(torch.sparse_csr), 'torch.sparse_csr')
self.assertEqual(type(torch.sparse_csr), torch.layout)
@dtypes(*get_all_dtypes())
def test_sparse_csr_constructor_shape_inference(self, device, dtype):
crow_indices = [0, 2, 4]
col_indices = [0, 1, 0, 1]
values = [1, 2, 3, 4]
sparse = torch.sparse_csr_tensor(torch.tensor(crow_indices, dtype=torch.int64),
torch.tensor(col_indices, dtype=torch.int64),
torch.tensor(values), dtype=dtype, device=device)
self.assertEqual(torch.tensor(crow_indices, dtype=torch.int64), sparse.crow_indices())
self.assertEqual((len(crow_indices) - 1, max(col_indices) + 1), sparse.shape)
self.assertEqual(dtype, sparse.dtype)
self.assertEqual(torch.device(device), sparse.device)
@dtypes(*get_all_dtypes())
def test_sparse_csr_constructor(self, device, dtype):
crow_indices = [0, 2, 4]
col_indices = [0, 1, 0, 1]
values = [1, 2, 3, 4]
for index_dtype in [torch.int32, torch.int64]:
sparse = torch.sparse_csr_tensor(torch.tensor(crow_indices, dtype=index_dtype),
torch.tensor(col_indices, dtype=index_dtype),
torch.tensor(values),
size=(2, 10),
dtype=dtype,
device=device)
self.assertEqual((2, 10), sparse.shape)
self.assertEqual(torch.tensor(crow_indices, dtype=index_dtype), sparse.crow_indices())
self.assertEqual(torch.tensor(col_indices, dtype=index_dtype), sparse.col_indices())
self.assertEqual(torch.tensor(values, dtype=dtype), sparse.values())
@dtypes(*get_all_dtypes())
def test_sparse_csr_constructor_from_lists(self, device, dtype):
# without size
sparse = torch.sparse_csr_tensor([0, 2, 4],
[0, 1, 0, 1],
[1, 2, 3, 4],
dtype=dtype,
device=device)
self.assertEqual((2, 2), sparse.shape)
self.assertEqual(4, sparse.numel())
self.assertEqual(torch.tensor([0, 2, 4], dtype=torch.int64, device=device), sparse.crow_indices())
self.assertEqual(torch.tensor([0, 1, 0, 1], dtype=torch.int64, device=device), sparse.col_indices())
self.assertEqual(torch.tensor([1, 2, 3, 4], dtype=dtype, device=device), sparse.values())
# with size
for sparse_csr_tensor in [torch.sparse_csr_tensor, torch._sparse_csr_tensor_unsafe]:
sparse = sparse_csr_tensor([0, 2, 4],
[0, 1, 0, 1],
[1, 2, 3, 4],
size=(2, 10),
dtype=dtype,
device=device)
self.assertEqual((2, 10), sparse.shape)
self.assertEqual(torch.tensor([0, 2, 4], dtype=torch.int64, device=device), sparse.crow_indices())
self.assertEqual(torch.tensor([0, 1, 0, 1], dtype=torch.int64, device=device), sparse.col_indices())
self.assertEqual(torch.tensor([1, 2, 3, 4], dtype=dtype, device=device), sparse.values())
def test_factory_type_invariants_check(self, device):
with self.assertRaisesRegex(RuntimeError, "both crow_indices and col_indices should have the same type."):
torch.sparse_csr_tensor(torch.tensor([0, 2, 4], dtype=torch.int64),
torch.tensor([0, 1, 0, 1], dtype=torch.int32),
torch.tensor([1, 2, 3, 4]),
device=device)
with self.assertRaisesRegex(RuntimeError, r"\"csr_construct_check\" not implemented for 'Short'"):
torch.sparse_csr_tensor(torch.tensor([0, 2, 4], dtype=torch.int16),
torch.tensor([0, 1, 0, 1], dtype=torch.int16),
torch.tensor([1, 2, 3, 4]),
device=device)
def test_factory_layout_invariants_check(self, device):
with self.assertRaisesRegex(RuntimeError, "expected values to be a strided and contiguous tensor"):
values = torch.tensor([1.], device=device).expand(4,)
torch.sparse_csr_tensor(torch.tensor([0, 2, 4], device=device),
torch.tensor([0, 1, 0, 1], device=device),
values)
with self.assertRaisesRegex(RuntimeError, "expected col_indices to be a strided and contiguous tensor"):
col_indices = torch.tensor([0], device=device).expand(4,)
torch.sparse_csr_tensor(torch.tensor([0, 2, 4]),
col_indices,
torch.tensor([1, 2, 3, 4]))
with self.assertRaisesRegex(RuntimeError, "expected crow_indices to be a strided and contiguous tensor"):
crow_indices = torch.arange(6, device=device)
torch.sparse_csr_tensor(crow_indices[::2],
torch.tensor([0, 1, 0, 1], device=device),
torch.tensor([1, 2, 3, 4]))
def test_factory_shape_invariants_check(self, device):
crow_indices = [0, 2, 4]
col_indices = [0, 1, 0, 1]
values = [1, 2, 3, 4]
size = (2, 10)
torch.sparse_csr_tensor(torch.tensor(crow_indices), torch.tensor(col_indices), torch.tensor(values), size,
device=device)
with self.assertRaisesRegex(RuntimeError, r"size of a CSR tensor must be of length 2, but got: 3"):
torch.sparse_csr_tensor(torch.tensor(crow_indices), torch.tensor(col_indices), torch.tensor(values),
size=(2, 10, 2),
device=device)
with self.assertRaisesRegex(RuntimeError, r"crow_indices must have dim\=1 but got crow_indices\.dim\(\)\=2"):
torch.sparse_csr_tensor(torch.tensor(crow_indices).repeat(2, 1),
torch.tensor(col_indices),
torch.tensor(values),
size,
device=device)
with self.assertRaisesRegex(RuntimeError, r"col_indices must have dim\=1 but got col_indices\.dim\(\)\=2"):
torch.sparse_csr_tensor(torch.tensor(crow_indices),
torch.tensor(col_indices).repeat(2, 1),
torch.tensor(values),
size,
device=device)
with self.assertRaisesRegex(RuntimeError, r"values must have dim\=1 but got values\.dim\(\)\=2"):
torch.sparse_csr_tensor(torch.tensor(crow_indices),
torch.tensor(col_indices),
torch.tensor(values).repeat(2, 1),
size,
device=device)
with self.assertRaisesRegex(RuntimeError,
r"crow_indices\.numel\(\) must be size\(0\) \+ 1, but got: 3"):
torch.sparse_csr_tensor(torch.tensor(crow_indices), torch.tensor(col_indices), torch.tensor(values), (1, 1),
device=device)
with self.assertRaisesRegex(RuntimeError,
r"col_indices and values must have equal sizes, " +
r"but got col_indices\.numel\(\): 3, values\.numel\(\): 4"):
torch.sparse_csr_tensor(torch.tensor(crow_indices), torch.tensor([0, 1, 0]), torch.tensor(values), size,
device=device)
def test_factory_indices_invariants_check(self, device):
crow_indices = [0, 2, 4]
col_indices = [0, 1, 0, 1]
values = [1, 2, 3, 4]
size = (2, 10)
with self.assertRaisesRegex(RuntimeError, "0th value of crow_indices must be 0."):
torch.sparse_csr_tensor(torch.tensor([-1, 0, 4]), torch.tensor(col_indices), torch.tensor(values), size,
device=device)
with self.assertRaisesRegex(RuntimeError,
"last value of crow_indices should be equal to the length of col_indices."):
torch.sparse_csr_tensor(torch.tensor([0, 2, 5]), torch.tensor(col_indices), torch.tensor(values), size,
device=device)
with self.assertRaisesRegex(RuntimeError,
r"at position i \= 2," +
r" this condition crow_indices\[i - 1\] <\= crow_indices\[i\] fails"):
torch.sparse_csr_tensor(torch.tensor([0, 5, 4]), torch.tensor(col_indices), torch.tensor(values), size,
device=device)
with self.assertRaisesRegex(RuntimeError, r"col_indices\.min\(\) should be greater or equal to zero"):
torch.sparse_csr_tensor(torch.tensor(crow_indices), torch.tensor([0, -1, 0, 1]), torch.tensor(values), size,
device=device)
with self.assertRaisesRegex(RuntimeError, r"size\(1\) should be greater than col_indices\.max\(\)"):
torch.sparse_csr_tensor(torch.tensor(crow_indices), torch.tensor([0, 11, 0, 1]), torch.tensor(values), size,
device=device)
@onlyCUDA
@dtypes(*get_all_dtypes())
def test_factory_device_type_inference(self, device, dtype):
cpu_cuda = ('cpu', 'cuda')
cpu_cuda_none = cpu_cuda + (None,)
for crow_indices_device, col_indices_device, values_device, device in itertools.product(cpu_cuda,
cpu_cuda,
cpu_cuda,
cpu_cuda_none):
for index_dtype in [torch.int32, torch.int64]:
crow_indices = torch.tensor([0, 2, 4], dtype=index_dtype, device=crow_indices_device)
col_indices = torch.tensor([0, 1, 0, 1], dtype=index_dtype, device=col_indices_device)
values = torch.tensor([1, 2, 3, 4], dtype=dtype, device=values_device)
if device is None and (crow_indices_device != col_indices_device or
crow_indices_device != values_device):
with self.assertRaises(RuntimeError):
torch.sparse_csr_tensor(crow_indices,
col_indices,
values,
size=(2, 10),
device=device)
else:
t = torch.sparse_csr_tensor(crow_indices,
col_indices,
values,
size=(2, 10),
device=device)
should_be_cuda = (device == 'cuda' or (device is None and values_device == 'cuda'))
self.assertEqual(should_be_cuda, t.is_cuda)
t.crow_indices().dtype == index_dtype
t.col_indices().dtype == index_dtype
t.values().dtype == dtype
t.crow_indices().device == t.values().device
t.col_indices().device == t.values().device
def test_sparse_csr_print(self, device):
orig_maxDiff = self.maxDiff
self.maxDiff = None
shape_nnz = [
((10, 10), 10),
((100, 10), 10),
((1000, 10), 10)
]
printed = []
for shape, nnz in shape_nnz:
values_shape = torch.Size((nnz,))
col_indices_shape = torch.Size((nnz,))
crow_indices_shape = torch.Size((shape[0] + 1,))
printed.append("# shape: {}".format(torch.Size(shape)))
printed.append("# nnz: {}".format(nnz))
printed.append("# crow_indices shape: {}".format(crow_indices_shape))
printed.append("# col_indices shape: {}".format(col_indices_shape))
printed.append("# values_shape: {}".format(values_shape))
for index_dtype in [torch.int32, torch.int64]:
for dtype in floating_types():
printed.append("########## {}/{} ##########".format(dtype, index_dtype))
x = torch.sparse_csr_tensor(torch.tensor([0, 2, 4], dtype=index_dtype),
torch.tensor([0, 1, 0, 1], dtype=index_dtype),
torch.tensor([1, 2, 3, 4]), dtype=dtype, device=device)
printed.append("# sparse tensor")
printed.append(str(x))
printed.append("# _crow_indices")
printed.append(str(x.crow_indices()))
printed.append("# _col_indices")
printed.append(str(x.col_indices()))
printed.append("# _values")
printed.append(str(x.values()))
printed.append('')
printed.append('')
self.assertExpected('\n'.join(printed))
self.maxDiff = orig_maxDiff
@dtypes(*get_all_dtypes())
def test_sparse_csr_from_dense(self, device, dtype):
dense = torch.tensor([[4, 5, 0], [0, 0, 0], [1, 0, 0]], dtype=dtype, device=device)
sparse = dense.to_sparse_csr()
self.assertEqual(torch.tensor([0, 2, 2, 3], dtype=torch.int64), sparse.crow_indices())
self.assertEqual(torch.tensor([0, 1, 0], dtype=torch.int64), sparse.col_indices())
self.assertEqual(torch.tensor([4, 5, 1], dtype=dtype), sparse.values())
dense = torch.tensor([[0, 0, 0], [0, 0, 1], [1, 0, 0]], dtype=dtype, device=device)
sparse = dense.to_sparse_csr()
self.assertEqual(torch.tensor([0, 0, 1, 2], dtype=torch.int64), sparse.crow_indices())
self.assertEqual(torch.tensor([2, 0], dtype=torch.int64), sparse.col_indices())
self.assertEqual(torch.tensor([1, 1], dtype=dtype), sparse.values())
dense = torch.tensor([[2, 2, 2], [2, 2, 2], [2, 2, 2]], dtype=dtype, device=device)
sparse = dense.to_sparse_csr()
self.assertEqual(torch.tensor([0, 3, 6, 9], dtype=torch.int64), sparse.crow_indices())
self.assertEqual(torch.tensor([0, 1, 2] * 3, dtype=torch.int64), sparse.col_indices())
self.assertEqual(torch.tensor([2] * 9, dtype=dtype), sparse.values())
@dtypes(*get_all_dtypes())
def test_sparse_csr_to_dense(self, device, dtype):
mn = [5, 2, 0]
for (m, n) in itertools.product(mn, mn):
size = (m, n)
dense = make_tensor(size, dtype=dtype, device=device)
sparse = dense.to_sparse_csr()
self.assertEqual(sparse.to_dense(), dense)
crow_indices = torch.tensor([0, 3, 5])
col_indices = torch.tensor([0, 1, 2, 0, 1])
values = torch.tensor([1, 2, 1, 3, 4], dtype=dtype)
csr = torch.sparse_csr_tensor(crow_indices, col_indices,
values, dtype=dtype, device=device)
dense = torch.tensor([[1, 2, 1], [3, 4, 0]], dtype=dtype, device=device)
self.assertEqual(csr.to_dense(), dense)
@coalescedonoff
@dtypes(torch.double)
def test_coo_to_csr_convert(self, device, dtype, coalesced):
with self.assertRaisesRegex(RuntimeError, "Input is supposed to be a vector"):
torch._convert_indices_from_coo_to_csr(
torch.randint(100, (5, 5), device=device),
size=100)
size = (5, 5)
sparse_dim = 2
nnz = 10
sparse_coo, _, _ = self.genSparseTensor(size, sparse_dim, nnz, coalesced, device, dtype)
sparse_csr = sparse_coo.to_sparse_csr()
self.assertTrue(sparse_csr.is_sparse_csr)
self.assertEqual(sparse_csr.to_dense(), sparse_coo.to_dense())
vec = torch.randn((5, 1), dtype=dtype, device=device)
coo_product = sparse_coo.matmul(vec)
csr_product = sparse_csr.matmul(vec)
self.assertEqual(coo_product, csr_product)
vec = torch.randn((100, 1), dtype=dtype, device=device)
index = torch.tensor([
[1, 0, 35, 14, 39, 6, 71, 66, 40, 27],
[92, 31, 62, 50, 22, 65, 89, 74, 56, 34],
], dtype=torch.int32)
values = torch.tensor([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], dtype=dtype, device=device)
coo = torch.sparse_coo_tensor(index, values, torch.Size([100, 100]), dtype=dtype, device=device)
csr = coo.to_sparse_csr()
self.assertEqual(coo.matmul(vec), csr.matmul(vec))
col_indices = torch.tensor([
31, 92, 65, 50, 34, 62, 22, 56, 74, 89
], dtype=torch.int64, device=device)
self.assertEqual(csr.col_indices(), col_indices)
values = torch.tensor([2, 1, 6, 4, 10, 3, 5, 9, 8, 7], dtype=dtype, device=device)
self.assertEqual(csr.values(), values)
@onlyCPU
@unittest.skipIf(IS_MACOS or IS_WINDOWS, "MKL doesn't work on windows or mac")
@dtypes(torch.float, torch.double)
def test_mkl_matvec_warnings(self, device, dtype):
if torch.has_mkl:
for index_dtype in [torch.int32, torch.int64]:
sp = torch.sparse_csr_tensor(torch.tensor([0, 2, 4]),
torch.tensor([0, 1, 0, 1]),
torch.tensor([1, 2, 3, 4], dtype=dtype, device=device))
vec = torch.randn((2, 1), dtype=dtype, device=device)
with warnings.catch_warnings(record=True) as w:
sp.matmul(vec)
self.assertEqual(len(w), 2)
self.assertIn("Pytorch is compiled with MKL LP64 and will convert crow_indices to int32",
str(w[0].message))
self.assertIn("Pytorch is compiled with MKL LP64 and will convert col_indices to int32",
str(w[1].message))
@dtypes(*get_all_dtypes())
def test_sparse_csr_from_dense_convert_error(self, device, dtype):
size = (4, 2, 4)
dense = make_tensor(size, dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, "Only 2D"):
sparse = dense.to_sparse_csr()
# TODO: Support auto generation of device check for sparse tensors
# See: https://github.com/pytorch/pytorch/issues/59058
@onlyCUDA
@dtypes(torch.double)
def test_matmul_device_mismatch(self, device, dtype):
cpu = torch.rand((10, 10))
cuda = cpu.cuda()
for s, m1, m2 in itertools.product((cpu, cuda), repeat=3):
csr = m1.to_sparse()
if s.device == csr.device == m2.device:
torch.addmm(s, csr, m2)
else:
with self.assertRaisesRegex(RuntimeError, "Expected all tensors to be on the same device"):
torch.addmm(s, csr, m2)
@skipCUDAIfNoCusparseGeneric
@dtypes(*torch.testing.floating_types())
@dtypesIfCUDA(*get_all_complex_dtypes(),
*get_all_fp_dtypes(include_half=SM53OrLater, include_bfloat16=SM80OrLater))
def test_csr_matvec(self, device, dtype):
side = 100
for index_dtype in [torch.int32, torch.int64]:
csr = self.genSparseCSRTensor((side, side), 1000, device=device, dtype=dtype, index_dtype=index_dtype)
vec = torch.randn(side, dtype=dtype, device=device)
res = csr.matmul(vec)
expected = csr.to_dense().matmul(vec)
self.assertEqual(res, expected)
bad_vec = torch.randn(side + 10, dtype=dtype, device=device)
err_msg = "mv: expected"
# CUDA path now uses generic meta/structured implementation
# TODO: move CPU path to not use `mv_sparse` function
if self.device_type == 'cuda':
err_msg = "size mismatch, got"
with self.assertRaisesRegex(RuntimeError, err_msg):
csr.matmul(bad_vec)
@dtypes(torch.double)
def test_mm(self, device, dtype):
def test_shape(di, dj, dk, nnz):
for index_dtype in [torch.int32, torch.int64]:
x = self.genSparseCSRTensor((di, dj), nnz, device=device, dtype=dtype, index_dtype=index_dtype)
t = torch.randn(di, dk, dtype=dtype, device=device)
y = torch.randn(dj, dk, dtype=dtype, device=device)
alpha = random.random()
beta = random.random()
# res = beta * t + alpha * (x @ y)
res = torch.addmm(t, x, y, beta=beta, alpha=alpha)
expected = torch.addmm(t, x.to_dense(), y, beta=beta, alpha=alpha)
self.assertEqual(res, expected)
res = torch.addmm(t, x, y)
expected = torch.addmm(t, x.to_dense(), y)
self.assertEqual(res, expected)
res = torch.mm(x, y)
expected = torch.mm(x.to_dense(), y)
self.assertEqual(res, expected)
for i in range(2, 5):
for j in range(2, 8):
for k in range(2, 8):
test_shape(i, j, k, i * j // 2)
test_shape(4, 4, 4, 0)
@dtypes(*floating_types())
@dtypesIfCUDA(*get_all_complex_dtypes(),
*get_all_fp_dtypes(include_half=SM53OrLater and TEST_CUSPARSE_GENERIC,
include_bfloat16=SM80OrLater and TEST_CUSPARSE_GENERIC))
@precisionOverride({torch.bfloat16: 1e-2, torch.float16: 1e-2})
def test_sparse_mm(self, device, dtype):
def test_shape(d1, d2, d3, nnz, transposed, index_dtype):
if transposed:
D = torch.randn(d3, d2, dtype=dtype, device=device).t_()
else:
D = torch.randn(d2, d3, dtype=dtype, device=device)
S = self.genSparseCSRTensor((d1, d2), nnz, device=device, dtype=dtype, index_dtype=index_dtype)
S_dense = S.to_dense()
self.assertEqual(torch.sparse.mm(S, D), torch.mm(S_dense, D))
for index_dtype in [torch.int32, torch.int64]:
test_shape(7, 8, 9, 20, False, index_dtype)
test_shape(7, 8, 9, 20, True, index_dtype)
@dtypes(*floating_types())
@dtypesIfCUDA(*get_all_complex_dtypes(),
*get_all_fp_dtypes(include_half=SM53OrLater and TEST_CUSPARSE_GENERIC,
include_bfloat16=SM80OrLater and TEST_CUSPARSE_GENERIC))
@precisionOverride({torch.bfloat16: 1e-2, torch.float16: 1e-2})
def test_sparse_addmm(self, device, dtype):
def test_shape(m, n, p, nnz, broadcast, index_dtype, alpha_beta=None):
if alpha_beta is None:
alpha = random.random()
beta = random.random()
else:
alpha, beta = alpha_beta
if broadcast:
D1 = make_tensor((), dtype=dtype, device=device)
else:
D1 = make_tensor([n, p], dtype=dtype, device=device)
D2 = make_tensor([m, p], dtype=dtype, device=device)
S = self.genSparseCSRTensor([n, m], nnz, dtype=dtype, device=device, index_dtype=index_dtype)
S_dense = S.to_dense()
Y = torch.sparse.addmm(D1, S, D2, beta=beta, alpha=alpha)
Y_dense = torch.addmm(D1, S_dense, D2, beta=beta, alpha=alpha)
self.assertEqual(Y, Y_dense)
for index_dtype in [torch.int32, torch.int64]:
test_shape(7, 8, 9, 20, False, index_dtype, None)
test_shape(7, 8, 9, 20, True, index_dtype, None)
test_shape(7, 8, 9, 20, False, index_dtype, (1, 0))
test_shape(7, 8, 9, 20, True, index_dtype, (1, 0))
test_shape(7, 8, 9, 20, False, index_dtype, (1, 1))
test_shape(7, 8, 9, 20, True, index_dtype, (1, 1))
@onlyCUDA
@dtypes(torch.float)
def test_addmm_errors(self, device, dtype):
# test that the errors are the same for dense and sparse versions
import re
def test1(*, is_sparse):
# shapes must be compatible for matrix multiplication
a = make_tensor((2, 3), dtype=dtype, device=device)
if is_sparse:
a_sparse = a.to_sparse_csr()
return torch.addmm(a, a_sparse, a)
else:
return torch.addmm(a, a, a)
def test2(*, is_sparse):
# mat2 must be a matrix
a = make_tensor((2, 3), dtype=dtype, device=device)
if is_sparse:
a_sparse = a.to_sparse_csr()
return torch.addmm(a, a_sparse, a.unsqueeze(0))
else:
return torch.addmm(a, a, a.unsqueeze(0))
def test3(*, is_sparse):
# the first input needs to be 1D or 2D
a = make_tensor((3, 3), dtype=dtype, device=device)
if is_sparse:
a_sparse = a.to_sparse_csr()
return torch.addmm(a.unsqueeze(0), a_sparse, a)
else:
return torch.addmm(a.unsqueeze(0), a, a)
for test in (test1, test2, test3):
try:
test(is_sparse=False)
except RuntimeError as msg:
with self.assertRaisesRegex(RuntimeError, re.escape(str(msg))):
test(is_sparse=True)
@onlyCUDA
@dtypes(torch.float)
def test_mm_errors(self, device, dtype):
# test that the errors are the same for dense and sparse versions
import re
def test1(*, is_sparse):
# shapes must be compatible for matrix multiplication
a = make_tensor((2, 3), dtype=dtype, device=device)
if is_sparse:
a_sparse = a.to_sparse_csr()
return torch.mm(a_sparse, a)
else:
return torch.mm(a, a)
def test2(*, is_sparse):
# mat2 must be a matrix
a = make_tensor((2, 3), dtype=dtype, device=device)
if is_sparse:
a_sparse = a.to_sparse_csr()
return torch.mm(a_sparse, a.unsqueeze(0))
else:
return torch.mm(a, a.unsqueeze(0))
for test in (test1, test2):
try:
test(is_sparse=False)
except RuntimeError as msg:
with self.assertRaisesRegex(RuntimeError, re.escape(str(msg))):
test(is_sparse=True)
@dtypes(torch.float, torch.double)
def test_add(self, device, dtype):
def _test_spadd_shape(nnz, shape):
x = self.genSparseCSRTensor(shape, nnz, dtype=dtype, device=device, index_dtype=torch.int32)
y = torch.randn(*shape, dtype=dtype, device=device)
r = random.random()
res = torch.add(y, x, alpha=r)
expected = y + r * x.to_dense()
self.assertEqual(res, expected)
# Non contiguous dense tensor
s = list(shape)
s[0] = shape[-1]
s[-1] = shape[0]
y = torch.randn(*s, dtype=torch.double, device=device)
y.transpose_(0, len(s) - 1)
r = random.random()
res = torch.add(y, x, alpha=r)
expected = y + r * x.to_dense()
self.assertEqual(res, expected)
_test_spadd_shape(10, [100, 100])
_test_spadd_shape(0, [100, 100])
_test_spadd_shape(10, [100, 1])
_test_spadd_shape(10, [1, 100])
@dtypes(*get_all_dtypes())
def test_coo_csr_conversion(self, device, dtype):
for m, n in itertools.product([5, 2, 0], [5, 2, 0]):
size = (m, n)
dense = make_tensor(size, dtype=dtype, device=device)
coo_sparse = dense.to_sparse()
csr_sparse = coo_sparse.to_sparse_csr()
self.assertEqual(csr_sparse.to_dense(), dense)
# e.g., TestSparseCSRCPU and TestSparseCSRCUDA
instantiate_device_type_tests(TestSparseCSR, globals())
if __name__ == '__main__':
run_tests()
| 49.171474
| 120
| 0.550011
|
0c985c0bc3b0ed3726f90c51b26a81c90854ed12
| 4,777
|
py
|
Python
|
Google/benchmarks/minigo/implementations/minigo-research-TF-tpu-v4-128/mask_flags.py
|
goswamig/training_results_v0.7
|
4278ce8a0f3d4db6b5e6054277724ca36278d7a3
|
[
"Apache-2.0"
] | 48
|
2020-07-29T18:09:23.000Z
|
2021-10-09T01:53:33.000Z
|
Google/benchmarks/minigo/implementations/minigo-research-TF-tpu-v4-128/mask_flags.py
|
goswamig/training_results_v0.7
|
4278ce8a0f3d4db6b5e6054277724ca36278d7a3
|
[
"Apache-2.0"
] | 9
|
2021-04-02T02:28:07.000Z
|
2022-03-26T18:23:59.000Z
|
Google/benchmarks/minigo/implementations/minigo-research-TF-tpu-v4-128/mask_flags.py
|
lablup/training_results_v0.7
|
f5bb59aa0f8b18b602763abe47d1d24d0d54b197
|
[
"Apache-2.0"
] | 42
|
2020-08-01T06:41:24.000Z
|
2022-01-20T10:33:08.000Z
|
# Lint as: python3
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=g-doc-args,g-short-docstring-punctuation,subprocess-run-check,g-doc-return-or-yield
"""Filters flagfile to only pass in flags that are defined.
Having one big flagfile is great for seeing all the configuration at a glance.
However, absl.flags will throw an error if you pass an undefined flag.
To solve this problem, we filter the global flagfile by running
python3 some_module.py --helpfull
to generate a list of all flags that some_module.py accepts. Then, we pass in
only those flags that are accepted by some_module.py and run as a subprocess
Usage example:
import mask_flags
mask_flags.run(['python3', 'train.py', '--custom_flag', '--flagfile=flags'])
# will be transformed into
subprocess.run(['python3', 'train.py', '--custom_flag',
'--train_only_flag=...', '--more_train_only=...''])
Command line usage example:
python3 -m mask_flags train.py --custom_flag --flagfile=flags
"""
import re
import subprocess
import sys
import time
from absl import flags
# Matches both
# --some_flag: Flag description
# --[no]bool_flag: Flag description
FLAG_HELP_RE_PY = re.compile(r'--((\[no\])?)([\w_-]+):')
FLAG_HELP_RE_CC = re.compile(r'-((\[no\])?)([\w_-]+) \(')
FLAG_RE = re.compile(r'--[\w_-]+')
def extract_valid_flags(subprocess_cmd):
"""Extracts the valid flags from a command by running it with --helpfull.
Args:
subprocess_cmd: List[str], what would be passed into subprocess.call()
i.e. ['python', 'train.py', '--flagfile=flags']
Returns:
['--foo=blah', '--more_flags']
"""
help_cmd = subprocess_cmd + ['--helpfull']
help_output = subprocess.run(help_cmd, stdout=subprocess.PIPE).stdout
help_output = help_output.decode('ascii')
if 'python' in subprocess_cmd[0]:
valid_flags = parse_helpfull_output(help_output)
else:
valid_flags = parse_helpfull_output(help_output, regex=FLAG_HELP_RE_CC)
return valid_flags
def parse_helpfull_output(help_output, regex=FLAG_HELP_RE_PY):
"""Parses the output of --helpfull.
Args:
help_output: str, the full output of --helpfull.
Returns:
A set of flags that are valid flags.
"""
valid_flags = set()
for _, no_prefix, flag_name in regex.findall(help_output):
valid_flags.add('--' + flag_name)
if no_prefix:
valid_flags.add('--no' + flag_name)
return valid_flags
def filter_flags(parsed_flags, valid_flags):
"""Return the subset of `parsed_flags` that are found in the list `valid_flags`"""
def valid_argv(argv):
"""Figures out if a flag parsed from the flagfile matches a flag in
the command about to be run.
"""
flagname_match = FLAG_RE.match(argv)
if not flagname_match:
return True
flagname = flagname_match.group()
return flagname in valid_flags
return list(filter(valid_argv, parsed_flags))
def prepare_subprocess_cmd(subprocess_cmd):
"""Prepares a subprocess command by running --helpfull and masking flags.
Args:
subprocess_cmd: List[str], what would be passed into subprocess.call()
i.e. ['python', 'train.py', '--flagfile=flags']
Returns:
['python', 'train.py', '--train_flag=blah', '--more_flags']
"""
valid_flags = extract_valid_flags(subprocess_cmd)
parsed_flags = flags.FlagValues().read_flags_from_files(subprocess_cmd[1:])
filtered_flags = filter_flags(parsed_flags, valid_flags)
return [subprocess_cmd[0]] + filtered_flags
def run(cmd):
"""Prepare and run a subprocess cmd, returning a CompletedProcess."""
print('Preparing the following cmd:')
cmd = prepare_subprocess_cmd(cmd)
print('Running the following cmd:')
print('\n'.join(cmd))
return subprocess.run(cmd, stdout=sys.stdout, stderr=sys.stderr)
def checked_run(cmd):
"""Prepare and run a subprocess cmd, checking for successful completion."""
completed_process = run(cmd)
if completed_process.returncode > 0:
print('Command failed! Hanging around in case someone needs a '
'docker connection. (Ctrl-C to quit now)')
time.sleep(300)
raise RuntimeError
return completed_process
if __name__ == '__main__':
sys.argv.pop(0)
checked_run(sys.argv)
| 32.944828
| 101
| 0.710069
|
ef847a5fbd1f849e93a1a47816421b2c404cf544
| 4,788
|
py
|
Python
|
examples/finegraind.py
|
zju-vipa/KamalEngine
|
0276eb062595d52472090fbcbcedcd76db8cfd44
|
[
"Apache-2.0"
] | 79
|
2019-07-04T11:19:31.000Z
|
2022-03-24T13:32:29.000Z
|
examples/finegraind.py
|
zju-vipa/KamalEngine
|
0276eb062595d52472090fbcbcedcd76db8cfd44
|
[
"Apache-2.0"
] | 4
|
2019-09-08T13:20:52.000Z
|
2021-06-15T12:07:37.000Z
|
examples/finegraind.py
|
zju-vipa/KamalEngine
|
0276eb062595d52472090fbcbcedcd76db8cfd44
|
[
"Apache-2.0"
] | 17
|
2019-07-23T09:48:45.000Z
|
2022-03-14T03:19:40.000Z
|
# Copyright 2020 Zhejiang Lab. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================
from kamal import vision, engine, callbacks
from kamal.vision import sync_transforms as sT
import kamal
import torch, time
from torch.utils.tensorboard import SummaryWriter
import argparse
parser = argparse.ArgumentParser()
parser.add_argument( '--dataset', required=True )
parser.add_argument( '--lr', type=float, default=0.01)
parser.add_argument( '--epochs', type=int, default=200)
parser.add_argument( '--pretrained', default=False, action='store_true')
args = parser.parse_args()
def main():
# Pytorch Part
if args.dataset=='stanford_dogs':
num_classes=120
train_dst = vision.datasets.StanfordDogs( 'data/StanfordDogs', split='train')
val_dst = vision.datasets.StanfordDogs( 'data/StanfordDogs', split='test')
elif args.dataset=='cub200':
num_classes=200
train_dst = vision.datasets.CUB200( 'data/CUB200', split='train')
val_dst = vision.datasets.CUB200( 'data/CUB200', split='test')
elif args.dataset=='fgvc_aircraft':
num_classes=102
train_dst = vision.datasets.FGVCAircraft( 'data/FGVCAircraft/', split='trainval')
val_dst = vision.datasets.FGVCAircraft( 'data/FGVCAircraft/', split='test')
elif args.dataset=='stanford_cars':
num_classes=196
train_dst = vision.datasets.StanfordCars( 'data/StanfordCars/', split='train')
val_dst = vision.datasets.StanfordCars( 'data/StanfordCars/', split='test')
else:
raise NotImplementedError
model = vision.models.classification.resnet18( num_classes=num_classes, pretrained=args.pretrained )
train_dst.transform = sT.Compose( [
sT.RandomResizedCrop(224),
sT.RandomHorizontalFlip(),
sT.ToTensor(),
sT.Normalize( mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225] )
] )
val_dst.transform = sT.Compose( [
sT.Resize(256),
sT.CenterCrop(224),
sT.ToTensor(),
sT.Normalize( mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225] )
] )
train_loader = torch.utils.data.DataLoader( train_dst, batch_size=32, shuffle=True, num_workers=4 )
val_loader = torch.utils.data.DataLoader( val_dst, batch_size=32, num_workers=4 )
TOTAL_ITERS=len(train_loader) * args.epochs
device = torch.device( 'cuda' if torch.cuda.is_available() else 'cpu' )
optim = torch.optim.SGD( model.parameters(), lr=args.lr, momentum=0.9, weight_decay=5e-4 )
sched = torch.optim.lr_scheduler.CosineAnnealingLR( optim, T_max=TOTAL_ITERS )
# KAE Part
# Predefined task & metrics
task = kamal.tasks.StandardTask.classification()
metric = kamal.tasks.StandardMetrics.classification()
# Evaluator and Trainer
evaluator = engine.evaluator.BasicEvaluator( val_loader, metric=metric, progress=True )
trainer = engine.trainer.BasicTrainer(
logger=kamal.utils.logger.get_logger(args.dataset),
tb_writer=SummaryWriter(log_dir='run/%s-%s'%(args.dataset, time.asctime().replace( ' ', '_' )) )
)
# setup trainer
trainer.setup( model=model,
task=task,
dataloader=train_loader,
optimizer=optim,
device=device )
trainer.add_callback(
engine.DefaultEvents.AFTER_STEP(every=10),
callbacks=callbacks.MetricsLogging(keys=('total_loss', 'lr')))
trainer.add_callback(
engine.DefaultEvents.AFTER_STEP,
callbacks=callbacks.LRSchedulerCallback(schedulers=[sched]))
ckpt_callback = trainer.add_callback(
engine.DefaultEvents.AFTER_EPOCH,
callbacks=callbacks.EvalAndCkpt(model=model, evaluator=evaluator, metric_name='acc', ckpt_prefix=args.dataset) )
trainer.run(start_iter=0, max_iter=TOTAL_ITERS)
ckpt_callback.callback.final_ckpt(ckpt_dir='pretrained', add_md5=True)
if __name__=='__main__':
main()
| 46.485437
| 120
| 0.644319
|
48dc96203fa9c60cec5c273e2297766ffe9f7756
| 2,780
|
py
|
Python
|
firestore/setup.py
|
vlasenkoalexey/google-cloud-python
|
037a7619d9ce135047e0913d3b1daa0f1f8a48b8
|
[
"Apache-2.0"
] | null | null | null |
firestore/setup.py
|
vlasenkoalexey/google-cloud-python
|
037a7619d9ce135047e0913d3b1daa0f1f8a48b8
|
[
"Apache-2.0"
] | 3
|
2019-06-20T05:20:15.000Z
|
2019-06-27T05:01:16.000Z
|
firestore/setup.py
|
HemangChothani/google-cloud-python
|
df185b9129cf38e2a48927a62e9d2e255cc5f115
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import os
import setuptools
# Package metadata.
name = "google-cloud-firestore"
description = "Google Cloud Firestore API client library"
version = "1.5.0"
# Should be one of:
# 'Development Status :: 3 - Alpha'
# 'Development Status :: 4 - Beta'
# 'Development Status :: 5 - Production/Stable'
release_status = "Development Status :: 4 - Beta"
dependencies = [
"google-api-core[grpc] >= 1.14.0, < 2.0.0dev",
"google-cloud-core >= 1.0.3, < 2.0dev",
"pytz",
]
extras = {}
# Setup boilerplate below this line.
package_root = os.path.abspath(os.path.dirname(__file__))
readme_filename = os.path.join(package_root, "README.rst")
with io.open(readme_filename, encoding="utf-8") as readme_file:
readme = readme_file.read()
# Only include packages under the 'google' namespace. Do not include tests,
# benchmarks, etc.
packages = [
package for package in setuptools.find_packages() if package.startswith("google")
]
# Determine which namespaces are needed.
namespaces = ["google"]
if "google.cloud" in packages:
namespaces.append("google.cloud")
setuptools.setup(
name=name,
version=version,
description=description,
long_description=readme,
author="Google LLC",
author_email="googleapis-packages@google.com",
license="Apache 2.0",
url="https://github.com/GoogleCloudPlatform/google-cloud-python",
classifiers=[
release_status,
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Operating System :: OS Independent",
"Topic :: Internet",
],
platforms="Posix; MacOS X; Windows",
packages=packages,
namespace_packages=namespaces,
install_requires=dependencies,
extras_require=extras,
python_requires=">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*",
include_package_data=True,
zip_safe=False,
)
| 30.549451
| 85
| 0.684892
|
2820ff182cf9f798ce835ef12616b6d3b86c2c96
| 2,272
|
py
|
Python
|
GithubApiDemo/github_utils.py
|
eric-nord/githubApiDemo
|
ca9f05d60f099d6a35765f31cfdd1c6ad4618fde
|
[
"MIT"
] | null | null | null |
GithubApiDemo/github_utils.py
|
eric-nord/githubApiDemo
|
ca9f05d60f099d6a35765f31cfdd1c6ad4618fde
|
[
"MIT"
] | null | null | null |
GithubApiDemo/github_utils.py
|
eric-nord/githubApiDemo
|
ca9f05d60f099d6a35765f31cfdd1c6ad4618fde
|
[
"MIT"
] | null | null | null |
"""GitHub util module.
Functions for pulling data from GitHub APIs
"""
__version__ = "0.0.1"
__author__ = "Eric Nord"
import utils
def get_orgs():
""" Provides list of github organization urls based on authenticated user. """
url = "https://api.github.com/user/orgs"
org_urls = []
orgs = utils.get_json(url)
for org in orgs:
org_urls.append(org["url"])
return org_urls
def list_members(orgs):
"""Provides a list of Member urls per organizations.
param orgs either a list of urls pointing to organizations or a single org name
return list of member urls
"""
members =[]
if isinstance(orgs, list):
#if list of orgs for each org get members list
for url in orgs:
#append /member to url - member_url is not valid canidate without a member list
url = url + "/members"
print("Checking " + url)
members_data = utils.get_json(url)
for member in members_data:
members.append(member["url"])
return members
else:
#build url from input org name and return member list
url = "https://api.github.com/orgs/" + orgs + "/members"
members_data = utils.get_json(url)
#check for invalid GitHub credentials or invalid github org name
try:
for member in members_data:
members.append(member["url"])
return members
except TypeError:
if(members_data["message"] == "Not Found"):
print("That organization doesn't exist try again\n")
raise SystemExit
elif(members_data["message"] == "Bad credentials"):
print("Please verify GitHub credentials are correct in config.py")
raise SystemExit
else:
print (members_data)
raise SystemExit
def check_for_null(attribute, memberUrls):
"""Provides a list of Member urls that have [attribute] is null.
param attribute to check for null value
params memberUrls List of member urls to check
return list of member urls with null [attribute] field
"""
attributeNotFound =[]
for url in memberUrls:
member_data = utils.get_json(url)
if member_data[attribute] is None:
#TODO: TBD Could grab email here if speed was an issue
attributeNotFound.append(url)
return attributeNotFound
| 26.418605
| 85
| 0.667694
|
979a13cdc20be84ee6e8e8d35412e6cfa80a1b89
| 3,879
|
py
|
Python
|
event_handler/sources.py
|
neilton-ciandt/fourkeys
|
a11b1f76a055de8aa31c3556bd15fc89ce8c61af
|
[
"Apache-2.0"
] | null | null | null |
event_handler/sources.py
|
neilton-ciandt/fourkeys
|
a11b1f76a055de8aa31c3556bd15fc89ce8c61af
|
[
"Apache-2.0"
] | null | null | null |
event_handler/sources.py
|
neilton-ciandt/fourkeys
|
a11b1f76a055de8aa31c3556bd15fc89ce8c61af
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hmac
from hashlib import sha1
import os
from google.cloud import secretmanager
PROJECT_NAME = os.environ.get("PROJECT_NAME")
class EventSource(object):
"""
A source of event data being delivered to the webhook
"""
def __init__(self, signature_header, verification_func):
self.signature = signature_header
self.verification = verification_func
def github_verification(signature, body):
"""
Verifies that the signature received from the github event is accurate
"""
if not signature:
raise Exception("Github signature is empty")
expected_signature = "sha1="
try:
# Get secret from Cloud Secret Manager
secret = get_secret(PROJECT_NAME, "event-handler", "latest")
# Compute the hashed signature
hashed = hmac.new(secret, body, sha1)
expected_signature += hashed.hexdigest()
except Exception as e:
print(e)
return hmac.compare_digest(signature, expected_signature)
def circleci_verification(signature, body):
"""
Verifies that the signature received from the circleci event is accurate
"""
if not signature:
raise Exception("CircleCI signature is empty")
expected_signature = "v1="
try:
# Get secret from Cloud Secret Manager
secret = get_secret(PROJECT_NAME, "event-handler", "latest")
# Compute the hashed signature
hashed = hmac.new(secret, body, 'sha256')
expected_signature += hashed.hexdigest()
except Exception as e:
print(e)
return hmac.compare_digest(signature, expected_signature)
def simple_token_verification(token, body):
"""
Verifies that the token received from the event is accurate
"""
if not token:
raise Exception("Token is empty")
secret = get_secret(PROJECT_NAME, "event-handler", "1")
return secret.decode() == token
def get_secret(project_name, secret_name, version_num):
"""
Returns secret payload from Cloud Secret Manager
"""
try:
client = secretmanager.SecretManagerServiceClient()
name = client.secret_version_path(
project_name, secret_name, version_num
)
secret = client.access_secret_version(name)
return secret.payload.data
except Exception as e:
print(e)
def get_source(headers):
"""
Gets the source from the User-Agent header
"""
if "X-Gitlab-Event" in headers:
return "gitlab"
if "tekton" in headers.get("Ce-Type", ""):
return "tekton"
if "GitHub-Hookshot" in headers.get("User-Agent", ""):
return "github"
if "Circleci-Event-Type" in headers:
return "circleci"
if "Kanbanize" in headers.get("User-Agent", ""):
return "kanbanize"
return headers.get("User-Agent")
AUTHORIZED_SOURCES = {
"github": EventSource(
"X-Hub-Signature", github_verification
),
"gitlab": EventSource(
"X-Gitlab-Token", simple_token_verification
),
"tekton": EventSource(
"tekton-secret", simple_token_verification
),
"circleci": EventSource(
"Circleci-Signature", circleci_verification
),
"kanbanize": EventSource(
"X-Kanbanize-Token", simple_token_verification
),
}
| 27.510638
| 76
| 0.669502
|
ae74fd64c076c93d4599e9c629fb73f4fcfe78fb
| 512
|
py
|
Python
|
Usefulfunctions/TestDFShift.py
|
DinhLamPham/PredictingESN
|
f8e6b8f9c0a2d4c052c6178f4b4fe793055050ff
|
[
"Apache-2.0"
] | 2
|
2021-04-20T01:11:02.000Z
|
2021-07-18T05:36:05.000Z
|
Usefulfunctions/TestDFShift.py
|
DinhLamPham/PredictingESN
|
f8e6b8f9c0a2d4c052c6178f4b4fe793055050ff
|
[
"Apache-2.0"
] | null | null | null |
Usefulfunctions/TestDFShift.py
|
DinhLamPham/PredictingESN
|
f8e6b8f9c0a2d4c052c6178f4b4fe793055050ff
|
[
"Apache-2.0"
] | null | null | null |
import pandas as pd
from pandas import DataFrame, concat
actList = ['ac1', 'ac2', 'ac3', 'ac4', 'ac5', 'ac6', 'ac7', 'ac8', 'ac9']
perList = ['per1', 'per2', 'per3', 'per4', 'per5', 'per6', 'per7', 'per8', 'per9']
n_in = 3
# [actList.append("END") for _ in range(n_in)]
dfAct = DataFrame(actList)
dfX = DataFrame()
for i in range(0, n_in):
currentVar = dfAct.shift(-i)
dfX = concat([dfX, currentVar], axis=1)
dfX.dropna(inplace=True)
[perList.pop(0) for _ in range(n_in-1)]
dfy = DataFrame(perList)
| 25.6
| 82
| 0.630859
|
5f577867e368ab91c52fad97ed04fc36d86beb98
| 4,987
|
py
|
Python
|
run_tests.py
|
ctw/line_profiler
|
5994c08bec2e4845a2b74d6c5f2d5c9b526f696f
|
[
"PSF-2.0",
"BSD-3-Clause"
] | 1
|
2016-06-24T16:03:49.000Z
|
2016-06-24T16:03:49.000Z
|
run_tests.py
|
ctw/line_profiler
|
5994c08bec2e4845a2b74d6c5f2d5c9b526f696f
|
[
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null |
run_tests.py
|
ctw/line_profiler
|
5994c08bec2e4845a2b74d6c5f2d5c9b526f696f
|
[
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
from os.path import dirname, join, abspath
import sqlite3
import sys
import os
import re
def is_cibuildwheel():
"""Check if run with cibuildwheel."""
return 'CIBUILDWHEEL' in os.environ
def temp_rename_kernprof(repo_dir):
"""
Hacky workaround so kernprof.py doesn't get covered twice (installed and local).
This needed to combine the .coverage files, since file paths need to be unique.
"""
original_path = repo_dir + '/kernprof.py'
tmp_path = original_path + '.tmp'
if os.path.isfile(original_path):
os.rename(original_path, tmp_path)
elif os.path.isfile(tmp_path):
os.rename(tmp_path, original_path)
def replace_docker_path(path, runner_project_dir):
"""Update path to a file installed in a temp venv to runner_project_dir."""
pattern = re.compile(r"\/tmp\/.+?\/site-packages")
return pattern.sub(runner_project_dir, path)
def update_coverag_file(coverage_path, runner_project_dir):
"""
Since the paths inside of docker vary from the runner paths,
the paths in the .coverage file need to be adjusted to combine them,
since 'coverage combine <folder>' checks if the file paths exist.
"""
try:
sqliteConnection = sqlite3.connect(coverage_path)
cursor = sqliteConnection.cursor()
print('Connected to Coverage SQLite')
read_file_query = 'SELECT id, path from file'
cursor.execute(read_file_query)
old_records = cursor.fetchall()
new_records = [(replace_docker_path(path, runner_project_dir), _id) for _id, path in old_records]
print('Updated coverage file paths:\n', new_records)
sql_update_query = 'Update file set path = ? where id = ?'
cursor.executemany(sql_update_query, new_records)
sqliteConnection.commit()
print('Coverage Updated successfully')
cursor.close()
except sqlite3.Error as error:
print('Failed to coverage: ', error)
finally:
if sqliteConnection:
sqliteConnection.close()
print('The sqlite connection is closed')
def copy_coverage_cibuildwheel_docker(runner_project_dir):
"""
When run with cibuildwheel under linux, the tests run in the folder /project
inside docker and the coverage files need to be copied to the output folder.
"""
coverage_path = '/project/tests/.coverage'
if os.path.isfile(coverage_path):
update_coverag_file(coverage_path, runner_project_dir)
env_hash = hash((sys.version, os.environ.get('AUDITWHEEL_PLAT', '')))
os.makedirs('/output', exist_ok=True)
os.rename(coverage_path, '/output/.coverage.{}'.format(env_hash))
if __name__ == '__main__':
cwd = os.getcwd()
repo_dir = abspath(dirname(__file__))
test_dir = join(repo_dir, 'tests')
print('cwd = {!r}'.format(cwd))
if is_cibuildwheel():
# rename kernprof.py to kernprof.py.tmp
temp_rename_kernprof(repo_dir)
import pytest
# Prefer testing the installed version, but fallback to testing the
# development version.
try:
import ubelt as ub
except ImportError:
print('running this test script requires ubelt')
raise
# Statically check if ``line_profiler`` is installed outside of the repo.
# To do this, we make a copy of PYTHONPATH, remove the repodir, and use
# ubelt to check to see if ``line_profiler`` can be resolved to a path.
temp_path = list(map(abspath, sys.path))
if repo_dir in temp_path:
temp_path.remove(repo_dir)
modpath = ub.modname_to_modpath('line_profiler', sys_path=temp_path)
if modpath is not None:
# If it does, then import it. This should cause the installed version
# to be used on further imports even if the repo_dir is in the path.
print('Using installed version of line_profiler')
module = ub.import_module_from_path(modpath, index=0)
print('Installed module = {!r}'.format(module))
else:
print('No installed version of line_profiler found')
try:
print('Changing dirs to test_dir={!r}'.format(test_dir))
os.chdir(test_dir)
package_name = 'line_profiler'
pytest_args = [
'--cov-config', '../pyproject.toml',
'--cov-report', 'html',
'--cov-report', 'term',
'--cov-report', 'xml',
'--cov=' + package_name,
'--cov=' + 'kernprof',
]
if is_cibuildwheel():
pytest_args.append('--cov-append')
pytest_args = pytest_args + sys.argv[1:]
sys.exit(pytest.main(pytest_args))
finally:
os.chdir(cwd)
if is_cibuildwheel():
# restore kernprof.py from kernprof.py.tmp
temp_rename_kernprof(repo_dir)
# for CIBW under linux
copy_coverage_cibuildwheel_docker('/home/runner/work/line_profiler/line_profiler')
print('Restoring cwd = {!r}'.format(cwd))
| 35.621429
| 105
| 0.658512
|
39727e4394b24834fc04e78fe0f36128af54b58c
| 609
|
py
|
Python
|
samples/advanced_client.py
|
DarkmatterVale/hurricane
|
94e9e56fcc6d73f5f76ad1fe9e3e4f248549fe7b
|
[
"MIT"
] | 1
|
2017-01-13T00:17:09.000Z
|
2017-01-13T00:17:09.000Z
|
samples/advanced_client.py
|
DarkmatterVale/hurricane
|
94e9e56fcc6d73f5f76ad1fe9e3e4f248549fe7b
|
[
"MIT"
] | null | null | null |
samples/advanced_client.py
|
DarkmatterVale/hurricane
|
94e9e56fcc6d73f5f76ad1fe9e3e4f248549fe7b
|
[
"MIT"
] | null | null | null |
import multiprocessing
from hurricane import SlaveNode
def run_node():
client = SlaveNode(debug=True, master_node='127.0.0.1')
client.initialize()
client.wait_for_initialize()
while True:
task_data = client.wait_for_task()
print("[*] Task name: " + str(task_data["name"]))
client.finish_task(generated_data={"completion_status" : "success"})
if __name__ == '__main__':
for node_num in range(multiprocessing.cpu_count()):
print("[*] Starting another node...")
node_process = multiprocessing.Process(target=run_node)
node_process.start()
| 29
| 76
| 0.676519
|
6a1c8b6c1ac42df10365ea42758d89ba0d92fac8
| 15,153
|
py
|
Python
|
src/bindings/python/tests/test_onnx/test_ops_reduction.py
|
artkuli/openvino
|
eb2fb5bf7df36ae55e3251816999b801ce053335
|
[
"Apache-2.0"
] | 1,127
|
2018-10-15T14:36:58.000Z
|
2020-04-20T09:29:44.000Z
|
src/bindings/python/tests/test_onnx/test_ops_reduction.py
|
artkuli/openvino
|
eb2fb5bf7df36ae55e3251816999b801ce053335
|
[
"Apache-2.0"
] | 439
|
2018-10-20T04:40:35.000Z
|
2020-04-19T05:56:25.000Z
|
src/bindings/python/tests/test_onnx/test_ops_reduction.py
|
tuxedcat/openvino
|
5939cb1b363ebb56b73c2ad95d8899961a084677
|
[
"Apache-2.0"
] | 414
|
2018-10-17T05:53:46.000Z
|
2020-04-16T17:29:53.000Z
|
# -*- coding: utf-8 -*-
# Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
import onnx
import pytest
from tests.runtime import get_runtime
from tests.test_onnx.utils import (
run_node,
import_onnx_model,
)
reduce_data = np.array([[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]], dtype=np.float32)
reduce_axis_parameters = [
(0,),
(1,),
(2,),
(0, 1),
(0, 2),
(1, 2),
(0, 1, 2),
]
reduce_operation_parameters_as_attr = [
("ReduceMax", np.max),
("ReduceMin", np.min),
("ReduceMean", np.mean),
("ReduceProd", np.prod),
]
reduce_operation_parameters_as_const = [
("ReduceSum", np.sum),
]
def import_and_compute(op_type, input_data, **node_attrs):
data_inputs = [np.array(input_data)]
node = onnx.helper.make_node(op_type, inputs=["x"], outputs=["y"], **node_attrs)
return run_node(node, data_inputs).pop()
def import_and_compute_with_axes_as_const(op_type, data, axes, **node_attrs):
data_input = np.array(data)
axes_input = np.array(axes, dtype=int)
axes_const_node = onnx.helper.make_node(
"Constant",
inputs=[],
outputs=["const_axes"],
value=onnx.helper.make_tensor(
name="const_axes",
data_type=onnx.TensorProto.INT64,
dims=axes_input.shape,
vals=axes_input.flatten(),
),
)
node = onnx.helper.make_node(op_type, inputs=["x", "const_axes"], outputs=["y"], **node_attrs)
graph = onnx.helper.make_graph(
[axes_const_node, node],
"test_graph",
[onnx.helper.make_tensor_value_info("x", onnx.TensorProto.FLOAT, data_input.shape)],
[onnx.helper.make_tensor_value_info("y", onnx.TensorProto.FLOAT, ())],
)
model = onnx.helper.make_model(graph, producer_name="ngraph ONNX Importer")
model.opset_import[0].version = 13
ng_model_function = import_onnx_model(model)
runtime = get_runtime()
computation = runtime.computation(ng_model_function)
return computation(data_input)[0]
@pytest.mark.parametrize(("operation", "ref_operation"),
reduce_operation_parameters_as_attr + reduce_operation_parameters_as_const)
def test_reduce_operation_keepdims_none_axes(operation, ref_operation):
assert np.array_equal(import_and_compute(operation, reduce_data, keepdims=True),
ref_operation(reduce_data, keepdims=True))
@pytest.mark.parametrize(("operation", "ref_operation"), reduce_operation_parameters_as_attr)
@pytest.mark.parametrize("axes", reduce_axis_parameters)
def test_reduce_operation_keepdims_with_axes_as_attr(operation, ref_operation, axes):
assert np.array_equal(import_and_compute(operation, reduce_data, axes=axes, keepdims=True),
ref_operation(reduce_data, keepdims=True, axis=axes))
@pytest.mark.parametrize(("operation", "ref_operation"), reduce_operation_parameters_as_const)
@pytest.mark.parametrize("axes", reduce_axis_parameters)
def test_reduce_operation_keepdims_with_axes_as_const(operation, ref_operation, axes):
assert np.array_equal(import_and_compute_with_axes_as_const(operation, reduce_data, axes, keepdims=True),
ref_operation(reduce_data, keepdims=True, axis=axes))
@pytest.mark.parametrize("axes", [
None,
(0,),
(1,),
(2,),
(0, 1),
(0, 2),
(1, 2),
(0, 1, 2)])
@pytest.mark.parametrize(("operation", "ref_operation"), reduce_operation_parameters_as_attr)
def test_reduce_operation_no_keepdims_axes_as_attr(operation, ref_operation, axes):
if axes:
assert np.array_equal(import_and_compute(operation, reduce_data, axes=axes, keepdims=False),
ref_operation(reduce_data, keepdims=False, axis=axes))
else:
assert np.array_equal(import_and_compute(operation, reduce_data, keepdims=False),
ref_operation(reduce_data, keepdims=False))
@pytest.mark.parametrize("axes", [
None,
(0,),
(1,),
(2,),
(0, 1),
(0, 2),
(1, 2),
(0, 1, 2)])
@pytest.mark.parametrize(("operation", "ref_operation"), reduce_operation_parameters_as_const)
def test_reduce_operation_no_keepdims_axes_as_const(operation, ref_operation, axes):
if axes:
assert np.array_equal(import_and_compute_with_axes_as_const(operation,
reduce_data,
axes,
keepdims=False),
ref_operation(reduce_data, keepdims=False, axis=axes))
else:
assert np.array_equal(import_and_compute(operation, reduce_data, keepdims=False),
ref_operation(reduce_data, keepdims=False))
@pytest.mark.parametrize("reduction_axes", [(0,), (0, 2), (0, 1, 2)])
def test_reduce_l1(reduction_axes):
shape = [2, 4, 3, 2]
np.random.seed(133391)
input_data = np.random.uniform(-100, 100, shape).astype(np.float32)
expected = np.sum(np.abs(input_data), keepdims=True, axis=reduction_axes)
node = onnx.helper.make_node("ReduceL1", inputs=["x"], outputs=["y"], axes=reduction_axes)
ng_result = np.array(run_node(node, [input_data]).pop())
assert np.array_equal(expected.shape, ng_result.shape)
assert np.allclose(expected, ng_result)
expected = np.sum(np.abs(input_data), keepdims=False, axis=reduction_axes)
node = onnx.helper.make_node("ReduceL1", inputs=["x"], outputs=["y"], keepdims=0, axes=reduction_axes)
ng_result = np.array(run_node(node, [input_data]).pop())
assert np.array_equal(expected.shape, ng_result.shape)
assert np.allclose(expected, ng_result)
def test_reduce_l1_default_axes():
shape = [2, 4, 3, 2]
np.random.seed(133391)
input_data = np.random.uniform(-100, 100, shape).astype(np.float32)
expected = np.sum(np.abs(input_data), keepdims=True)
node = onnx.helper.make_node("ReduceL1", inputs=["x"], outputs=["y"])
ng_result = np.array(run_node(node, [input_data]).pop())
assert np.array_equal(expected.shape, ng_result.shape)
assert np.allclose(expected, ng_result)
expected = np.array(np.sum(np.abs(input_data), keepdims=False))
node = onnx.helper.make_node("ReduceL1", inputs=["x"], outputs=["y"], keepdims=0)
ng_result = np.array(run_node(node, [input_data]).pop())
assert np.array_equal(expected.shape, ng_result.shape)
assert np.allclose(expected, ng_result)
@pytest.mark.parametrize("reduction_axes", [(0,), (0, 2), (0, 1, 2)])
def test_reduce_l2(reduction_axes):
shape = [2, 4, 3, 2]
np.random.seed(133391)
input_data = np.random.uniform(-100, 100, shape).astype(np.float32)
expected = np.sqrt(np.sum(np.square(input_data), keepdims=True, axis=reduction_axes))
node = onnx.helper.make_node("ReduceL2", inputs=["x"], outputs=["y"], axes=reduction_axes)
raw_result = run_node(node, [input_data])
ng_result = np.array(raw_result.pop())
assert np.array_equal(expected.shape, ng_result.shape)
assert np.allclose(expected, ng_result)
expected = np.sqrt(np.sum(np.square(input_data), keepdims=False, axis=reduction_axes))
node = onnx.helper.make_node("ReduceL2", inputs=["x"], outputs=["y"], keepdims=0, axes=reduction_axes)
ng_result = np.array(run_node(node, [input_data]).pop())
assert np.array_equal(expected.shape, ng_result.shape)
assert np.allclose(expected, ng_result)
def test_reduce_l2_default_axes():
shape = [2, 4, 3, 2]
np.random.seed(133391)
input_data = np.random.uniform(-100, 100, shape).astype(np.float32)
expected = np.sqrt(np.sum(np.square(input_data), keepdims=True))
node = onnx.helper.make_node("ReduceL2", inputs=["x"], outputs=["y"])
ng_result = np.array(run_node(node, [input_data]).pop())
assert np.array_equal(expected.shape, ng_result.shape)
assert np.allclose(expected, ng_result)
expected = np.array(np.sqrt(np.sum(np.square(input_data), keepdims=False)))
node = onnx.helper.make_node("ReduceL2", inputs=["x"], outputs=["y"], keepdims=0)
ng_result = np.array(run_node(node, [input_data]).pop())
assert np.array_equal(expected.shape, ng_result.shape)
assert np.allclose(expected, ng_result)
@pytest.mark.parametrize("reduction_axes", [(0,), (0, 2), (0, 1, 2)])
def test_reduce_log_sum(reduction_axes):
shape = [2, 4, 3, 2]
np.random.seed(133391)
input_data = np.random.uniform(0, 1, shape).astype(np.float32)
expected = np.log(np.sum(input_data, keepdims=True, axis=reduction_axes))
node = onnx.helper.make_node("ReduceLogSum", inputs=["x"], outputs=["y"], axes=reduction_axes)
ng_result = run_node(node, [input_data]).pop()
assert np.array_equal(expected.shape, ng_result.shape)
assert np.allclose(expected, ng_result)
expected = np.log(np.sum(input_data, keepdims=False, axis=reduction_axes))
node = onnx.helper.make_node("ReduceLogSum", inputs=["x"], outputs=["y"], keepdims=0, axes=reduction_axes)
ng_result = run_node(node, [input_data]).pop()
assert np.array_equal(expected.shape, ng_result.shape)
assert np.allclose(expected, ng_result)
def test_reduce_log_sum_default_axes():
shape = [2, 4, 3, 2]
np.random.seed(133391)
input_data = np.random.uniform(0, 1, shape).astype(np.float32)
expected = np.log(np.sum(input_data, keepdims=True))
node = onnx.helper.make_node("ReduceLogSum", inputs=["x"], outputs=["y"])
ng_result = np.array(run_node(node, [input_data]).pop())
assert np.array_equal(expected.shape, ng_result.shape)
assert np.allclose(expected, ng_result)
expected = np.log(np.sum(input_data, keepdims=False))
node = onnx.helper.make_node("ReduceLogSum", inputs=["x"], outputs=["y"], keepdims=0)
ng_result = np.array(run_node(node, [input_data]).pop())
assert np.array_equal(expected.shape, ng_result.shape)
assert np.allclose(expected, ng_result)
def test_reduce_log_sum_exp():
def logsumexp(data, axis=None, keepdims=True):
return np.log(np.sum(np.exp(data), axis=axis, keepdims=keepdims))
data = np.array([[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]], dtype=np.float32)
assert np.array_equal(import_and_compute("ReduceLogSumExp", data), logsumexp(data, keepdims=True))
assert np.array_equal(import_and_compute("ReduceLogSumExp", data, keepdims=0), logsumexp(data, keepdims=False))
assert np.array_equal(import_and_compute("ReduceLogSumExp", data, axes=(1,)), logsumexp(data, keepdims=True, axis=(1,)))
assert np.array_equal(
import_and_compute("ReduceLogSumExp", data, axes=(1,), keepdims=0),
logsumexp(data, keepdims=False, axis=(1,)),
)
assert np.array_equal(import_and_compute("ReduceLogSumExp", data, axes=(0, 2)), logsumexp(data, keepdims=True, axis=(0, 2)))
assert np.array_equal(
import_and_compute("ReduceLogSumExp", data, axes=(0, 2), keepdims=0),
logsumexp(data, keepdims=False, axis=(0, 2)),
)
assert np.array_equal(
import_and_compute("ReduceLogSumExp", data, axes=(0, 1, 2)),
logsumexp(data, keepdims=True, axis=(0, 1, 2)),
)
assert np.array_equal(
import_and_compute("ReduceLogSumExp", data, axes=(0, 1, 2), keepdims=0),
logsumexp(data, keepdims=False, axis=(0, 1, 2)),
)
@pytest.mark.parametrize("reduction_axes", [(0,), (0, 2), (0, 1, 2)])
def test_reduce_sum_square(reduction_axes):
shape = [2, 4, 3, 2]
np.random.seed(133391)
input_data = np.random.uniform(-100, 100, shape).astype(np.float32)
expected = np.sum(np.square(input_data), keepdims=True, axis=reduction_axes)
node = onnx.helper.make_node("ReduceSumSquare", inputs=["x"], outputs=["y"], axes=reduction_axes)
ng_result = np.array(run_node(node, [input_data]).pop())
assert np.array_equal(expected.shape, ng_result.shape)
assert np.allclose(expected, ng_result)
expected = np.sum(np.square(input_data), keepdims=False, axis=reduction_axes)
node = onnx.helper.make_node("ReduceSumSquare", inputs=["x"], outputs=["y"], keepdims=0, axes=reduction_axes)
ng_result = np.array(run_node(node, [input_data]).pop())
assert np.array_equal(expected.shape, ng_result.shape)
assert np.allclose(expected, ng_result)
def test_reduce_sum_square_default_axes():
shape = [2, 4, 3, 2]
np.random.seed(133391)
input_data = np.random.uniform(-100, 100, shape).astype(np.float32)
expected = np.sum(np.square(input_data), keepdims=True)
node = onnx.helper.make_node("ReduceSumSquare", inputs=["x"], outputs=["y"])
ng_result = np.array(run_node(node, [input_data]).pop())
assert np.array_equal(expected.shape, ng_result.shape)
assert np.allclose(expected, ng_result)
expected = np.sum(np.square(input_data), keepdims=False)
node = onnx.helper.make_node("ReduceSumSquare", inputs=["x"], outputs=["y"], keepdims=0)
ng_result = np.array(run_node(node, [input_data]).pop())
assert np.array_equal(expected.shape, ng_result.shape)
assert np.allclose(expected, ng_result)
def test_reduce_argmin():
def argmin(ndarray, axis, keepdims=False):
res = np.argmin(ndarray, axis=axis)
if keepdims:
res = np.expand_dims(res, axis=axis)
return res
data = np.array([[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]], dtype=np.float32)
assert np.array_equal(import_and_compute("ArgMin", data, axis=0), argmin(data, keepdims=True, axis=0))
assert np.array_equal(import_and_compute("ArgMin", data, axis=0, keepdims=0), argmin(data, keepdims=False, axis=0))
assert np.array_equal(import_and_compute("ArgMin", data, axis=1), argmin(data, keepdims=True, axis=1))
assert np.array_equal(import_and_compute("ArgMin", data, axis=1, keepdims=0), argmin(data, keepdims=False, axis=1))
assert np.array_equal(import_and_compute("ArgMin", data, axis=2), argmin(data, keepdims=True, axis=2))
assert np.array_equal(import_and_compute("ArgMin", data, axis=2, keepdims=0), argmin(data, keepdims=False, axis=2))
def test_reduce_argmax():
def argmax(ndarray, axis, keepdims=False):
res = np.argmax(ndarray, axis=axis)
if keepdims:
res = np.expand_dims(res, axis=axis)
return res
data = np.array([[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]], dtype=np.float32)
assert np.array_equal(import_and_compute("ArgMax", data, axis=0), argmax(data, keepdims=True, axis=0))
assert np.array_equal(import_and_compute("ArgMax", data, axis=0, keepdims=0), argmax(data, keepdims=False, axis=0))
assert np.array_equal(import_and_compute("ArgMax", data, axis=1), argmax(data, keepdims=True, axis=1))
assert np.array_equal(import_and_compute("ArgMax", data, axis=1, keepdims=0), argmax(data, keepdims=False, axis=1))
assert np.array_equal(import_and_compute("ArgMax", data, axis=2), argmax(data, keepdims=True, axis=2))
assert np.array_equal(import_and_compute("ArgMax", data, axis=2, keepdims=0), argmax(data, keepdims=False, axis=2))
| 43.543103
| 128
| 0.6765
|
798fe96e8912464de16f17dcb1bd703407dd9fe1
| 13,179
|
py
|
Python
|
TimeWrapper_JE/venv/Lib/site-packages/pip/_vendor/html5lib/treebuilders/etree.py
|
JE-Chen/je_old_repo
|
a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5
|
[
"MIT"
] | 227
|
2021-01-20T05:34:32.000Z
|
2022-03-29T12:43:05.000Z
|
TimeWrapper_JE/venv/Lib/site-packages/pip/_vendor/html5lib/treebuilders/etree.py
|
JE-Chen/je_old_repo
|
a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5
|
[
"MIT"
] | 55
|
2020-09-07T02:12:51.000Z
|
2022-03-23T02:37:42.000Z
|
TimeWrapper_JE/venv/Lib/site-packages/pip/_vendor/html5lib/treebuilders/etree.py
|
JE-Chen/je_old_repo
|
a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5
|
[
"MIT"
] | 239
|
2021-01-28T02:59:53.000Z
|
2022-03-29T08:02:17.000Z
|
from __future__ import absolute_import, division, unicode_literals
# pylint:disable=protected-access
from pip._vendor.six import text_type
import re
from copy import copy
from . import base
from .. import _ihatexml
from .. import constants
from ..constants import namespaces
from .._utils import moduleFactoryFactory
tag_regexp = re.compile("{([^}]*)}(.*)")
def getETreeBuilder(ElementTreeImplementation, fullTree=False):
ElementTree = ElementTreeImplementation
ElementTreeCommentType = ElementTree.Comment("asd").tag
class Element(base.Node):
def __init__(self, name, namespace=None):
self._name = name
self._namespace = namespace
self._element = ElementTree.Element(self._getETreeTag(name,
namespace))
if namespace is None:
self.nameTuple = namespaces["html"], self._name
else:
self.nameTuple = self._namespace, self._name
self.parent = None
self._childNodes = []
self._flags = []
def _getETreeTag(self, name, namespace):
if namespace is None:
etree_tag = name
else:
etree_tag = "{%s}%s" % (namespace, name)
return etree_tag
def _setName(self, name):
self._name = name
self._element.tag = self._getETreeTag(self._name, self._namespace)
def _getName(self):
return self._name
name = property(_getName, _setName)
def _setNamespace(self, namespace):
self._namespace = namespace
self._element.tag = self._getETreeTag(self._name, self._namespace)
def _getNamespace(self):
return self._namespace
namespace = property(_getNamespace, _setNamespace)
def _getAttributes(self):
return self._element.attrib
def _setAttributes(self, attributes):
el_attrib = self._element.attrib
el_attrib.clear()
if attributes:
# calling .items _always_ allocates, and the above truthy check is cheaper than the
# allocation on average
for key, value in attributes.items():
if isinstance(key, tuple):
name = "{%s}%s" % (key[2], key[1])
else:
name = key
el_attrib[name] = value
attributes = property(_getAttributes, _setAttributes)
def _getChildNodes(self):
return self._childNodes
def _setChildNodes(self, value):
del self._element[:]
self._childNodes = []
for element in value:
self.insertChild(element)
childNodes = property(_getChildNodes, _setChildNodes)
def hasContent(self):
"""Return true if the node has children or text"""
return bool(self._element.text or len(self._element))
def appendChild(self, node):
self._childNodes.append(node)
self._element.append(node._element)
node.parent = self
def insertBefore(self, node, refNode):
index = list(self._element).index(refNode._element)
self._element.insert(index, node._element)
node.parent = self
def removeChild(self, node):
self._childNodes.remove(node)
self._element.remove(node._element)
node.parent = None
def insertText(self, data, insertBefore=None):
if not(len(self._element)):
if not self._element.text:
self._element.text = ""
self._element.text += data
elif insertBefore is None:
# Insert the text as the tail of the last child element
if not self._element[-1].tail:
self._element[-1].tail = ""
self._element[-1].tail += data
else:
# Insert the text before the specified node
children = list(self._element)
index = children.index(insertBefore._element)
if index > 0:
if not self._element[index - 1].tail:
self._element[index - 1].tail = ""
self._element[index - 1].tail += data
else:
if not self._element.text:
self._element.text = ""
self._element.text += data
def cloneNode(self):
element = type(self)(self.name, self.namespace)
if self._element.attrib:
element._element.attrib = copy(self._element.attrib)
return element
def reparentChildren(self, newParent):
if newParent.childNodes:
newParent.childNodes[-1]._element.tail += self._element.text
else:
if not newParent._element.text:
newParent._element.text = ""
if self._element.text is not None:
newParent._element.text += self._element.text
self._element.text = ""
base.Node.reparentChildren(self, newParent)
class Comment(Element):
def __init__(self, data):
# Use the superclass constructor to set all properties on the
# wrapper element
self._element = ElementTree.Comment(data)
self.parent = None
self._childNodes = []
self._flags = []
def _getData(self):
return self._element.text
def _setData(self, value):
self._element.text = value
data = property(_getData, _setData)
class DocumentType(Element):
def __init__(self, name, publicId, systemId):
Element.__init__(self, "<!DOCTYPE>")
self._element.text = name
self.publicId = publicId
self.systemId = systemId
def _getPublicId(self):
return self._element.get("publicId", "")
def _setPublicId(self, value):
if value is not None:
self._element.set("publicId", value)
publicId = property(_getPublicId, _setPublicId)
def _getSystemId(self):
return self._element.get("systemId", "")
def _setSystemId(self, value):
if value is not None:
self._element.set("systemId", value)
systemId = property(_getSystemId, _setSystemId)
class Document(Element):
def __init__(self):
Element.__init__(self, "DOCUMENT_ROOT")
class DocumentFragment(Element):
def __init__(self):
Element.__init__(self, "DOCUMENT_FRAGMENT")
def testSerializer(element):
rv = []
def serializeElement(element, indent=0):
if not(hasattr(element, "tag")):
element = element.getroot()
if element.tag == "<!DOCTYPE>":
if element.get("publicId") or element.get("systemId"):
publicId = element.get("publicId") or ""
systemId = element.get("systemId") or ""
rv.append("""<!DOCTYPE %s "%s" "%s">""" %
(element.text, publicId, systemId))
else:
rv.append("<!DOCTYPE %s>" % (element.text,))
elif element.tag == "DOCUMENT_ROOT":
rv.append("#document")
if element.text is not None:
rv.append("|%s\"%s\"" % (' ' * (indent + 2), element.text))
if element.tail is not None:
raise TypeError("Document node cannot have tail")
if hasattr(element, "attrib") and len(element.attrib):
raise TypeError("Document node cannot have attributes")
elif element.tag == ElementTreeCommentType:
rv.append("|%s<!-- %s -->" % (' ' * indent, element.text))
else:
assert isinstance(element.tag, text_type), \
"Expected unicode, got %s, %s" % (type(element.tag), element.tag)
nsmatch = tag_regexp.match(element.tag)
if nsmatch is None:
name = element.tag
else:
ns, name = nsmatch.groups()
prefix = constants.prefixes[ns]
name = "%s %s" % (prefix, name)
rv.append("|%s<%s>" % (' ' * indent, name))
if hasattr(element, "attrib"):
attributes = []
for name, value in element.attrib.items():
nsmatch = tag_regexp.match(name)
if nsmatch is not None:
ns, name = nsmatch.groups()
prefix = constants.prefixes[ns]
attr_string = "%s %s" % (prefix, name)
else:
attr_string = name
attributes.append((attr_string, value))
for name, value in sorted(attributes):
rv.append('|%s%s="%s"' % (' ' * (indent + 2), name, value))
if element.text:
rv.append("|%s\"%s\"" % (' ' * (indent + 2), element.text))
indent += 2
for child in element:
serializeElement(child, indent)
if element.tail:
rv.append("|%s\"%s\"" % (' ' * (indent - 2), element.tail))
serializeElement(element, 0)
return "\n".join(rv)
def tostring(element): # pylint:disable=unused-variable
"""Serialize an element and its child nodes to a string"""
rv = []
filter = _ihatexml.InfosetFilter()
def serializeElement(element):
if isinstance(element, ElementTree.ElementTree):
element = element.getroot()
if element.tag == "<!DOCTYPE>":
if element.get("publicId") or element.get("systemId"):
publicId = element.get("publicId") or ""
systemId = element.get("systemId") or ""
rv.append("""<!DOCTYPE %s PUBLIC "%s" "%s">""" %
(element.text, publicId, systemId))
else:
rv.append("<!DOCTYPE %s>" % (element.text,))
elif element.tag == "DOCUMENT_ROOT":
if element.text is not None:
rv.append(element.text)
if element.tail is not None:
raise TypeError("Document node cannot have tail")
if hasattr(element, "attrib") and len(element.attrib):
raise TypeError("Document node cannot have attributes")
for child in element:
serializeElement(child)
elif element.tag == ElementTreeCommentType:
rv.append("<!--%s-->" % (element.text,))
else:
# This is assumed to be an ordinary element
if not element.attrib:
rv.append("<%s>" % (filter.fromXmlName(element.tag),))
else:
attr = " ".join(["%s=\"%s\"" % (
filter.fromXmlName(name), value)
for name, value in element.attrib.items()])
rv.append("<%s %s>" % (element.tag, attr))
if element.text:
rv.append(element.text)
for child in element:
serializeElement(child)
rv.append("</%s>" % (element.tag,))
if element.tail:
rv.append(element.tail)
serializeElement(element)
return "".join(rv)
class TreeBuilder(base.TreeBuilder): # pylint:disable=unused-variable
documentClass = Document
doctypeClass = DocumentType
elementClass = Element
commentClass = Comment
fragmentClass = DocumentFragment
implementation = ElementTreeImplementation
def testSerializer(self, element):
return testSerializer(element)
def getDocument(self):
if fullTree:
return self.document._element
else:
if self.defaultNamespace is not None:
return self.document._element.find(
"{%s}html" % self.defaultNamespace)
else:
return self.document._element.find("html")
def getFragment(self):
return base.TreeBuilder.getFragment(self)._element
return locals()
getETreeModule = moduleFactoryFactory(getETreeBuilder)
| 38.311047
| 100
| 0.51142
|
ebad4574d715767f90a51dc09411165819ca2411
| 2,353
|
py
|
Python
|
BS/BS_034_240221/main.py
|
Aleksey-Voko/Word_forms_bases
|
f14173cef830e7a514dfaefba3bbbf0c02a3ac0f
|
[
"MIT"
] | null | null | null |
BS/BS_034_240221/main.py
|
Aleksey-Voko/Word_forms_bases
|
f14173cef830e7a514dfaefba3bbbf0c02a3ac0f
|
[
"MIT"
] | null | null | null |
BS/BS_034_240221/main.py
|
Aleksey-Voko/Word_forms_bases
|
f14173cef830e7a514dfaefba3bbbf0c02a3ac0f
|
[
"MIT"
] | null | null | null |
from BS.utils import (read_src_bs, get_string_list_from_file,
get_socket_word_form, save_list_to_file)
def check_unique_strings():
word_forms_bases = read_src_bs('src_dict/БС 23.02.21.txt')
bs_title_forms = [x.title_word_form for x in word_forms_bases]
bs_title_str_forms = [
' '.join(filter(None, [
x.name,
x.idf,
' '.join(x.info),
x.note.replace('.*', '').strip(),
]))
for x in bs_title_forms
]
# Повторяющиеся строки
repeating_lines = get_string_list_from_file(
'src_dict/Повторы в группах. Повторяющиеся строки.txt')
r_lines_resp = []
r_lines_not_resp = []
for line in repeating_lines:
socket_form = get_socket_word_form(line)
str_form = ' '.join(filter(None, [
socket_form.name,
socket_form.idf,
' '.join(socket_form.info),
socket_form.note.replace('*', '').strip(),
]))
if bs_title_str_forms.count(str_form) == 1:
r_lines_resp.append(line)
else:
r_lines_not_resp.append(line)
save_list_to_file(
r_lines_resp,
'out/Повторы в группах. Повторяющиеся строки. П.4 Правил соблюдается.txt'
)
save_list_to_file(
r_lines_not_resp,
'out/Повторы в группах. Повторяющиеся строки. П.4 Правил не соблюдается.txt'
)
# Уникальные строки
unique_lines = get_string_list_from_file(
'src_dict/Повторы в группах. Уникальные строки.txt')
u_lines_resp = []
u_lines_not_resp = []
for line in unique_lines:
socket_form = get_socket_word_form(line)
str_form = ' '.join(filter(None, [
socket_form.name,
socket_form.idf,
' '.join(socket_form.info),
socket_form.note.replace('*', '').strip(),
]))
if bs_title_str_forms.count(str_form) == 1:
u_lines_resp.append(line)
else:
u_lines_not_resp.append(line)
save_list_to_file(
u_lines_resp,
'out/Повторы в группах. Уникальные строки. П.4 Правил соблюдается.txt'
)
save_list_to_file(
u_lines_not_resp,
'out/Повторы в группах. Уникальные строки. П.4 Правил не соблюдается.txt'
)
if __name__ == '__main__':
check_unique_strings()
| 29.049383
| 84
| 0.607735
|
7c1709cfc5fe7447657bf3f0551a61105b84fcdf
| 544
|
py
|
Python
|
scripts/term_processing_utils.py
|
henrikolsvik/maplegf
|
f7b801eed42dd24001c64a172128e8089c7aa7f4
|
[
"MIT"
] | 1
|
2021-05-04T08:21:25.000Z
|
2021-05-04T08:21:25.000Z
|
scripts/term_processing_utils.py
|
henrikolsvik/masteroppgave
|
f7b801eed42dd24001c64a172128e8089c7aa7f4
|
[
"MIT"
] | null | null | null |
scripts/term_processing_utils.py
|
henrikolsvik/masteroppgave
|
f7b801eed42dd24001c64a172128e8089c7aa7f4
|
[
"MIT"
] | null | null | null |
def find_sequences_matching_metadata_and_write_matching_metadata(sequence_data, metadata, metadata_out_filename):
sequences = []
metadata_out = open(metadata_out_filename, "a+")
for metadata_item in metadata:
for sequence in sequence_data:
if metadata_item[0] in sequence[0] and sequence[0] not in [x[0] for x in sequences]:
metadata_out.write(str(metadata_item[0]) + "," + str(metadata_item[1]) + "\n")
sequences.append(sequence)
metadata_out.close()
return sequences
| 38.857143
| 113
| 0.683824
|
e0c1f033cc48af511ba052be812ffadff7fb46ef
| 679
|
py
|
Python
|
logs/views.py
|
henrique-c-ladeira/logerror
|
374591ab5b93b72a4cceead9a67d3052299eb826
|
[
"MIT"
] | null | null | null |
logs/views.py
|
henrique-c-ladeira/logerror
|
374591ab5b93b72a4cceead9a67d3052299eb826
|
[
"MIT"
] | null | null | null |
logs/views.py
|
henrique-c-ladeira/logerror
|
374591ab5b93b72a4cceead9a67d3052299eb826
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
from logs.models import ErrorLog
from .helpers import internalError, badRequest, getRequestBody, methodNotAllowed, ok
from django.views.decorators.csrf import csrf_exempt
from django.core.exceptions import ValidationError
# Create your views here.
@csrf_exempt
def log(request):
if (request.method != 'POST'):
return methodNotAllowed()
try:
body = getRequestBody(request.body)
error_log = ErrorLog.create(**body)
error_log.save()
return ok()
except (ValidationError, TypeError) as error:
return badRequest(error)
except Exception as error:
return internalError(error)
| 29.521739
| 84
| 0.721649
|
448e083b8084d7b08cdd71b4a563471d5d07e3fa
| 14,209
|
py
|
Python
|
old_old_autodir/conf.py
|
avcopan/filesystem
|
e52341a2b77b5e79b0e2cee73f48735d00fd6209
|
[
"Apache-2.0"
] | null | null | null |
old_old_autodir/conf.py
|
avcopan/filesystem
|
e52341a2b77b5e79b0e2cee73f48735d00fd6209
|
[
"Apache-2.0"
] | null | null | null |
old_old_autodir/conf.py
|
avcopan/filesystem
|
e52341a2b77b5e79b0e2cee73f48735d00fd6209
|
[
"Apache-2.0"
] | null | null | null |
""" conformer filesystem
"""
import os
import numbers
import functools
import numpy
import autofile
import autoinf
from autodir.id_ import is_identifier as _is_identifier
from autodir.id_ import directory_identifiers_at as _directory_identifiers_at
from autodir import par
from autodir import util
from autodir.run import information as _run_information
OPT_RUN_NAME = par.DirectoryName.Run.OPT
GRAD_RUN_NAME = par.DirectoryName.Run.GRAD
HESS_RUN_NAME = par.DirectoryName.Run.HESS
def identifiers(prefix):
""" list of existing conformer identifiers
"""
dir_path = base_path(prefix)
return _directory_identifiers_at(dir_path)
def update_trajectory_file(prefix):
""" update the trajectory file at this prefix
"""
rids = identifiers(prefix)
enes = [read_energy_file(prefix, rid) for rid in rids]
geos = [read_geometry_file(prefix, rid) for rid in rids]
# sort them by energy
srt_idxs = numpy.argsort(enes)
srt_rids = tuple(map(rids.__getitem__, srt_idxs))
srt_enes = tuple(map(enes.__getitem__, srt_idxs))
srt_geos = tuple(map(geos.__getitem__, srt_idxs))
comments = ["rid: {}, energy: {}".format(rid, str(ene))
for rid, ene in zip(srt_rids, srt_enes)]
write_trajectory_file(prefix, srt_geos, comments)
# path definitions
BASE_DIR_NAME = 'CONFS'
def base_path(prefix):
""" base directory path
"""
dir_names = (BASE_DIR_NAME,)
dir_path = os.path.join(prefix, *dir_names)
return dir_path
def directory_path(prefix, rid):
""" conformer directory path
"""
assert _is_identifier(rid)
prefix = base_path(prefix)
dir_path = os.path.join(prefix, rid)
return dir_path
def run_directory_path(prefix, rid):
""" path to the optimization run directory
"""
dir_path = directory_path(prefix, rid)
run_dir_name = par.DirectoryName.RUN
run_dir_path = os.path.join(dir_path, run_dir_name)
return run_dir_path
# filesystem create/read/write functions
def create_base(prefix):
""" create the filesystem base path
"""
util.create_directory(
prefix=prefix, dir_path=base_path(prefix))
def create(prefix, rid):
""" create this filesystem path
"""
util.create_directory(
prefix=prefix, dir_path=directory_path(prefix, rid))
def create_run_directory(prefix, rid):
""" create optimization run directory path
"""
util.create_directory(
prefix=prefix, dir_path=run_directory_path(prefix, rid))
# base
def base_information(nsamp, tors_info):
""" base information object
"""
tors_info = autoinf.Info(**dict(tors_info))
assert isinstance(nsamp, numbers.Integral)
inf_obj = autoinf.Info(nsamp=nsamp, tors_info=tors_info)
assert autoinf.matches_function_signature(inf_obj, base_information)
return inf_obj
BASE_INFORMATION_FILE = util.DataFile(
file_name=autofile.name.information(par.FilePrefix.CONF),
dir_path_=base_path,
writer_=autofile.write.information,
reader_=autofile.read.information,
checker_=functools.partial(
autoinf.matches_function_signature, function=base_information),
)
def base_information_file_path(prefix):
""" base directory information file path
"""
return BASE_INFORMATION_FILE.path([prefix])
def has_base_information_file(prefix):
""" does this filesystem have a base information file?
"""
return BASE_INFORMATION_FILE.exists([prefix])
def write_base_information_file(prefix, base_inf_obj):
""" write the base information file to its filesystem path
"""
BASE_INFORMATION_FILE.write([prefix], base_inf_obj)
def read_base_information_file(prefix):
""" read the base information file from its filesystem path
"""
return BASE_INFORMATION_FILE.read([prefix])
# base variable zmatrix
BASE_VMATRIX_FILE = util.DataFile(
file_name=autofile.name.vmatrix(par.FilePrefix.CONF),
dir_path_=base_path,
writer_=autofile.write.vmatrix,
reader_=autofile.read.vmatrix,
)
def base_vmatrix_file_path(prefix):
""" base variable information file path
"""
return BASE_VMATRIX_FILE.path([prefix])
def has_base_vmatrix_file(prefix):
""" does this filesystem have a base variable information file?
"""
return BASE_VMATRIX_FILE.exists([prefix])
def write_base_vmatrix_file(prefix, base_inf_obj):
""" write the base variable information file to its filesystem path
"""
BASE_VMATRIX_FILE.write([prefix], base_inf_obj)
def read_base_vmatrix_file(prefix):
""" read the base variable information file from its filesystem path
"""
return BASE_VMATRIX_FILE.read([prefix])
# geometry files
# # information file
INFORMATION_FILE = util.DataFile(
file_name=autofile.name.information(par.FilePrefix.GEOM),
dir_path_=directory_path,
writer_=autofile.write.information,
reader_=autofile.read.information,
checker_=functools.partial(
autoinf.matches_function_signature, function=_run_information),
)
def information_file_path(prefix, rid):
""" gradient information file path
"""
return INFORMATION_FILE.path([prefix, rid])
def has_information_file(prefix, rid):
""" does this filesystem have a gradient information file?
"""
return INFORMATION_FILE.exists([prefix, rid])
def write_information_file(prefix, rid, grad_inp_str):
""" write the gradient information file to its filesystem path
"""
INFORMATION_FILE.write([prefix, rid], grad_inp_str)
def read_information_file(prefix, rid):
""" read the gradient information file from its filesystem path
"""
return INFORMATION_FILE.read([prefix, rid])
# # input file
INPUT_FILE = util.DataFile(
file_name=autofile.name.input_file(par.FilePrefix.GEOM),
dir_path_=directory_path,
)
def input_file_path(prefix, rid):
""" input file path
"""
return INPUT_FILE.path([prefix, rid])
def has_input_file(prefix, rid):
""" does this filesystem have a input file?
"""
return INPUT_FILE.exists([prefix, rid])
def write_input_file(prefix, rid, inp_str):
""" write the input file to its filesystem path
"""
INPUT_FILE.write([prefix, rid], inp_str)
def read_input_file(prefix, rid):
""" read the input file from its filesystem path
"""
return INPUT_FILE.read([prefix, rid])
# # geometry file
GEOMETRY_FILE = util.DataFile(
file_name=autofile.name.geometry(par.FilePrefix.GEOM),
dir_path_=directory_path,
writer_=autofile.write.geometry,
reader_=autofile.read.geometry,
)
def geometry_file_path(prefix, rid):
""" geometry file path
"""
return GEOMETRY_FILE.path([prefix, rid])
def has_geometry_file(prefix, rid):
""" does this filesystem have a geometry file?
"""
return GEOMETRY_FILE.exists([prefix, rid])
def write_geometry_file(prefix, rid, geo):
""" write the geometry file to its filesystem path
"""
GEOMETRY_FILE.write([prefix, rid], geo)
def read_geometry_file(prefix, rid):
""" read the geometry file from its filesystem path
"""
return GEOMETRY_FILE.read([prefix, rid])
# # the energy file
ENERGY_FILE = util.DataFile(
file_name=autofile.name.energy(par.FilePrefix.GEOM),
dir_path_=directory_path,
writer_=autofile.write.energy,
reader_=autofile.read.energy,
)
def energy_file_path(prefix, rid):
""" energy file path
"""
return ENERGY_FILE.path([prefix, rid])
def has_energy_file(prefix, rid):
""" does this filesystem have a energy file?
"""
return ENERGY_FILE.exists([prefix, rid])
def write_energy_file(prefix, rid, ene):
""" write the energy file to its filesystem path
"""
ENERGY_FILE.write([prefix, rid], ene)
def read_energy_file(prefix, rid):
""" read the energy file from its filesystem path
"""
return ENERGY_FILE.read([prefix, rid])
# gradient
# # the gradient information file
GRADIENT_INFORMATION_FILE = util.DataFile(
file_name=autofile.name.information(par.FilePrefix.GRAD),
dir_path_=directory_path,
writer_=autofile.write.information,
reader_=autofile.read.information,
checker_=functools.partial(
autoinf.matches_function_signature, function=_run_information),
)
def gradient_information_file_path(prefix, rid):
""" gradient information file path
"""
return GRADIENT_INFORMATION_FILE.path([prefix, rid])
def has_gradient_information_file(prefix, rid):
""" does this filesystem have a gradient information file?
"""
return GRADIENT_INFORMATION_FILE.exists([prefix, rid])
def write_gradient_information_file(prefix, rid, grad_inp_str):
""" write the gradient information file to its filesystem path
"""
GRADIENT_INFORMATION_FILE.write([prefix, rid], grad_inp_str)
def read_gradient_information_file(prefix, rid):
""" read the gradient information file from its filesystem path
"""
return GRADIENT_INFORMATION_FILE.read([prefix, rid])
# # gradient input file
GRADIENT_INPUT_FILE = util.DataFile(
file_name=autofile.name.input_file(par.FilePrefix.GRAD),
dir_path_=directory_path,
)
def gradient_input_file_path(prefix, rid):
""" gradient input file path
"""
return GRADIENT_INPUT_FILE.path([prefix, rid])
def has_gradient_input_file(prefix, rid):
""" does this filesystem have a gradient input file?
"""
return GRADIENT_INPUT_FILE.exists([prefix, rid])
def write_gradient_input_file(prefix, rid, grad_inp_str):
""" write the gradient input file to its filesystem path
"""
GRADIENT_INPUT_FILE.write([prefix, rid], grad_inp_str)
def read_gradient_input_file(prefix, rid):
""" read the gradient input file from its filesystem path
"""
return GRADIENT_INPUT_FILE.read([prefix, rid])
# # the gradient file
GRADIENT_FILE = util.DataFile(
file_name=autofile.name.gradient(par.FilePrefix.GRAD),
dir_path_=directory_path,
writer_=autofile.write.gradient,
reader_=autofile.read.gradient,
)
def gradient_file_path(prefix, rid):
""" gradient file path
"""
return GRADIENT_FILE.path([prefix, rid])
def has_gradient_file(prefix, rid):
""" does this filesystem have a gradient file?
"""
return GRADIENT_FILE.exists([prefix, rid])
def write_gradient_file(prefix, rid, ene):
""" write the gradient file to its filesystem path
"""
GRADIENT_FILE.write([prefix, rid], ene)
def read_gradient_file(prefix, rid):
""" read the gradient file from its filesystem path
"""
return GRADIENT_FILE.read([prefix, rid])
# hessian
# # the hessian information file
HESSIAN_INFORMATION_FILE = util.DataFile(
file_name=autofile.name.information(par.FilePrefix.HESS),
dir_path_=directory_path,
writer_=autofile.write.information,
reader_=autofile.read.information,
checker_=functools.partial(
autoinf.matches_function_signature, function=_run_information),
)
def hessian_information_file_path(prefix, rid):
""" hessian information file path
"""
return HESSIAN_INFORMATION_FILE.path([prefix, rid])
def has_hessian_information_file(prefix, rid):
""" does this filesystem have a hessian information file?
"""
return HESSIAN_INFORMATION_FILE.exists([prefix, rid])
def write_hessian_information_file(prefix, rid, grad_inp_str):
""" write the hessian information file to its filesystem path
"""
HESSIAN_INFORMATION_FILE.write([prefix, rid], grad_inp_str)
def read_hessian_information_file(prefix, rid):
""" read the hessian information file from its filesystem path
"""
return HESSIAN_INFORMATION_FILE.read([prefix, rid])
# # hessian input file
HESSIAN_INPUT_FILE = util.DataFile(
file_name=autofile.name.input_file(par.FilePrefix.HESS),
dir_path_=directory_path,
)
def hessian_input_file_path(prefix, rid):
""" hessian input file path
"""
return HESSIAN_INPUT_FILE.path([prefix, rid])
def has_hessian_input_file(prefix, rid):
""" does this filesystem have a hessian input file?
"""
return HESSIAN_INPUT_FILE.exists([prefix, rid])
def write_hessian_input_file(prefix, rid, grad_inp_str):
""" write the hessian input file to its filesystem path
"""
HESSIAN_INPUT_FILE.write([prefix, rid], grad_inp_str)
def read_hessian_input_file(prefix, rid):
""" read the hessian input file from its filesystem path
"""
return HESSIAN_INPUT_FILE.read([prefix, rid])
# # the hessian file
HESSIAN_FILE = util.DataFile(
file_name=autofile.name.hessian(par.FilePrefix.HESS),
dir_path_=directory_path,
writer_=autofile.write.hessian,
reader_=autofile.read.hessian,
)
def hessian_file_path(prefix, rid):
""" hessian file path
"""
return HESSIAN_FILE.path([prefix, rid])
def has_hessian_file(prefix, rid):
""" does this filesystem have a hessian file?
"""
return HESSIAN_FILE.exists([prefix, rid])
def write_hessian_file(prefix, rid, ene):
""" write the hessian file to its filesystem path
"""
HESSIAN_FILE.write([prefix, rid], ene)
def read_hessian_file(prefix, rid):
""" read the hessian file from its filesystem path
"""
return HESSIAN_FILE.read([prefix, rid])
# # trajectory file
def _raise_not_implemented(*args, **kwargs):
""" dummy function to raise NotImplementedError and quit """
assert args or not args or kwargs or not kwargs
raise NotImplementedError
TRAJECTORY_FILE = util.DataFile(
file_name=autofile.name.trajectory(par.FilePrefix.CONF),
dir_path_=base_path,
writer_=(lambda args: autofile.write.trajectory(*args)),
reader_=_raise_not_implemented,
)
def trajectory_file_path(prefix):
""" base directory information file path
"""
return TRAJECTORY_FILE.path([prefix])
def has_trajectory_file(prefix):
""" does this filesystem have a base information file?
"""
return TRAJECTORY_FILE.exists([prefix])
def write_trajectory_file(prefix, geos, comments):
""" write the base information file to its filesystem path
"""
TRAJECTORY_FILE.write([prefix], [geos, comments])
def read_trajectory_file(prefix):
""" read the base information file from its filesystem path
"""
return TRAJECTORY_FILE.read([prefix])
| 26.07156
| 77
| 0.720952
|
b47153b32f2b74562db572ab7dcc7da66ae727c7
| 416
|
py
|
Python
|
hw6/hasker/questions/urls.py
|
mcherdakov/otus-python-professional
|
405101c6492c0e8343783d42be4a0070e5fa9e54
|
[
"MIT"
] | null | null | null |
hw6/hasker/questions/urls.py
|
mcherdakov/otus-python-professional
|
405101c6492c0e8343783d42be4a0070e5fa9e54
|
[
"MIT"
] | null | null | null |
hw6/hasker/questions/urls.py
|
mcherdakov/otus-python-professional
|
405101c6492c0e8343783d42be4a0070e5fa9e54
|
[
"MIT"
] | null | null | null |
from django.urls import path
from . import views
urlpatterns = [
path('', views.index_view, name='index'),
path('create/', views.create_question_view, name='create'),
path('<int:pk>/', views.question_detail_view, name='question'),
path('vote/', views.vote_view, name='vote'),
path('vote_best/', views.vote_best_view, name='vote_best'),
path('search/', views.search_view, name='search'),
]
| 29.714286
| 67
| 0.673077
|
968e06cd3f6be617689a9aef35957f8d70be89a4
| 1,703
|
py
|
Python
|
scripts/configure.py
|
markosamuli/macos-machine
|
5ec7c7e892abff30c788bbf0b937489ca7c41d1c
|
[
"MIT"
] | 11
|
2017-10-30T10:43:55.000Z
|
2021-08-23T01:48:42.000Z
|
scripts/configure.py
|
markosamuli/macos-machine
|
5ec7c7e892abff30c788bbf0b937489ca7c41d1c
|
[
"MIT"
] | 5
|
2017-10-15T21:35:10.000Z
|
2020-09-06T00:06:49.000Z
|
scripts/configure.py
|
markosamuli/macos-machine
|
5ec7c7e892abff30c788bbf0b937489ca7c41d1c
|
[
"MIT"
] | 3
|
2017-10-12T21:27:37.000Z
|
2019-12-21T14:59:19.000Z
|
#!/usr/bin/env python3
"""Configure local settings"""
import sys
from pathlib import Path
# Add project root to to sys.path
FILE = Path(__file__).resolve()
SCRIPTS_DIR, PROJECT_ROOT = FILE.parent, FILE.parents[1]
sys.path.append(str(PROJECT_ROOT))
# Remove the current scripts/ directory from sys.path
try:
sys.path.remove(str(SCRIPTS_DIR))
except ValueError: # Already removed
pass
# pylint: disable=import-error
# pylint: disable=wrong-import-position
from machine import settings # noqa: E402
# pylint: disable=unused-import
import machine.config # noqa: E402,F401
# pylint: enable=unused-import
# pylint: enable=wrong-import-position
# pylint: enable=import-error
def display_usage(command):
"""Display usage help"""
print("Usage: %s [option] [value]" % command)
sys.exit(1)
def display_option(name):
"""Display current option value"""
value = settings.get_option(name)
print("%s: %s" % (name, value))
def update_option(name, new_value):
"""Update option value"""
value = settings.get_option(name)
if new_value in settings.TRUTHY:
if not value:
settings.enable_option(name)
print("%s is now enabled" % name)
elif new_value in settings.FALSY:
if value:
settings.disable_option(name)
print("%s is now disabled" % name)
else:
if value != new_value:
value = settings.set_option(name, new_value)
print("%s: %s" % (name, value))
if __name__ == "__main__":
if len(sys.argv) == 3:
update_option(sys.argv[1], sys.argv[2])
elif len(sys.argv) == 2:
display_option(sys.argv[1])
else:
display_usage(sys.argv[0])
| 26.2
| 56
| 0.657076
|
8c36ac5904488abdc029e487c748f5ff9b63955e
| 1,190
|
py
|
Python
|
google/ads/googleads/v8/enums/types/keyword_plan_keyword_annotation.py
|
wxxlouisa/google-ads-python
|
f24137966f6bfcb765a9b1fae79f2d23041825fe
|
[
"Apache-2.0"
] | 285
|
2018-10-05T16:47:58.000Z
|
2022-03-31T00:58:39.000Z
|
google/ads/googleads/v8/enums/types/keyword_plan_keyword_annotation.py
|
wxxlouisa/google-ads-python
|
f24137966f6bfcb765a9b1fae79f2d23041825fe
|
[
"Apache-2.0"
] | 425
|
2018-09-10T13:32:41.000Z
|
2022-03-31T14:50:05.000Z
|
google/ads/googleads/v8/enums/types/keyword_plan_keyword_annotation.py
|
wxxlouisa/google-ads-python
|
f24137966f6bfcb765a9b1fae79f2d23041825fe
|
[
"Apache-2.0"
] | 369
|
2018-11-28T07:01:00.000Z
|
2022-03-28T09:53:22.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v8.enums",
marshal="google.ads.googleads.v8",
manifest={"KeywordPlanKeywordAnnotationEnum",},
)
class KeywordPlanKeywordAnnotationEnum(proto.Message):
r"""Container for enumeration of keyword plan keyword
annotations.
"""
class KeywordPlanKeywordAnnotation(proto.Enum):
r"""Enumerates keyword plan annotations that can be requested."""
UNSPECIFIED = 0
UNKNOWN = 1
KEYWORD_CONCEPT = 2
__all__ = tuple(sorted(__protobuf__.manifest))
| 30.512821
| 74
| 0.72437
|
4d44d251261b6af989872ec4a61627a805a5eb73
| 15,168
|
py
|
Python
|
djangobb_forum/migrations/0001_initial.py
|
tuffnatty/DjangoBB
|
423607675e9501f6ff5579d86c30540e377c4742
|
[
"BSD-3-Clause"
] | 121
|
2016-02-16T09:05:15.000Z
|
2022-03-30T21:17:15.000Z
|
djangobb_forum/migrations/0001_initial.py
|
tuffnatty/DjangoBB
|
423607675e9501f6ff5579d86c30540e377c4742
|
[
"BSD-3-Clause"
] | 12
|
2015-05-09T09:27:05.000Z
|
2016-02-05T14:44:33.000Z
|
djangobb_forum/migrations/0001_initial.py
|
tuffnatty/DjangoBB
|
423607675e9501f6ff5579d86c30540e377c4742
|
[
"BSD-3-Clause"
] | 76
|
2016-02-11T16:35:52.000Z
|
2022-01-25T13:26:06.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from djangobb_forum.models import MARKUP_CHOICES, THEME_CHOICES, TZ_CHOICES
import djangobb_forum.fields
import django.utils.timezone
from django.conf import settings
from djangobb_forum import settings as forum_settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('auth', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Attachment',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('size', models.IntegerField(verbose_name='Size')),
('content_type', models.CharField(max_length=255, verbose_name='Content type')),
('path', models.CharField(max_length=255, verbose_name='Path')),
('name', models.TextField(verbose_name='Name')),
('hash', models.CharField(default='', max_length=40, verbose_name='Hash', db_index=True, blank=True)),
],
),
migrations.CreateModel(
name='Ban',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('ban_start', models.DateTimeField(default=django.utils.timezone.now, verbose_name='Ban start')),
('ban_end', models.DateTimeField(null=True, verbose_name='Ban end', blank=True)),
('reason', models.TextField(verbose_name='Reason')),
('user', models.OneToOneField(related_name='ban_users', verbose_name='Banned user', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'Ban',
'verbose_name_plural': 'Bans',
},
),
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=80, verbose_name='Name')),
('position', models.IntegerField(default=0, verbose_name='Position', blank=True)),
('groups', models.ManyToManyField(help_text='Only users from these groups can see this category', to='auth.Group', verbose_name='Groups', blank=True)),
],
options={
'ordering': ['position'],
'verbose_name': 'Category',
'verbose_name_plural': 'Categories',
},
),
migrations.CreateModel(
name='Forum',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=80, verbose_name='Name')),
('position', models.IntegerField(default=0, verbose_name='Position', blank=True)),
('description', models.TextField(default='', verbose_name='Description', blank=True)),
('updated', models.DateTimeField(auto_now=True, verbose_name='Updated')),
('post_count', models.IntegerField(default=0, verbose_name='Post count', blank=True)),
('topic_count', models.IntegerField(default=0, verbose_name='Topic count', blank=True)),
('forum_logo', djangobb_forum.fields.ExtendedImageField(default='', upload_to=b'djangobb_forum/forum_logo', verbose_name='Forum Logo', blank=True)),
('category', models.ForeignKey(related_name='forums', verbose_name='Category', to='djangobb_forum.Category')),
],
options={
'ordering': ['position'],
'verbose_name': 'Forum',
'verbose_name_plural': 'Forums',
},
),
migrations.CreateModel(
name='Poll',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('question', models.CharField(max_length=200)),
('choice_count', models.PositiveSmallIntegerField(default=1, help_text='How many choices are allowed simultaneously.')),
('active', models.BooleanField(default=True, help_text='Can users vote to this poll or just see the result?')),
('deactivate_date', models.DateTimeField(help_text='Point of time after this poll would be automatic deactivated', null=True, blank=True)),
],
),
migrations.CreateModel(
name='PollChoice',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('choice', models.CharField(max_length=200)),
('votes', models.IntegerField(default=0, editable=False)),
('poll', models.ForeignKey(related_name='choices', to='djangobb_forum.Poll')),
],
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Created')),
('updated', models.DateTimeField(null=True, verbose_name='Updated', blank=True)),
('markup', models.CharField(default=forum_settings.DEFAULT_MARKUP, max_length=15, verbose_name='Markup', choices=MARKUP_CHOICES)),
('body', models.TextField(verbose_name='Message')),
('body_html', models.TextField(verbose_name='HTML version')),
('user_ip', models.GenericIPAddressField(null=True, verbose_name='User IP', blank=True)),
],
options={
'ordering': ['created'],
'get_latest_by': 'created',
'verbose_name': 'Post',
'verbose_name_plural': 'Posts',
},
),
migrations.CreateModel(
name='PostTracking',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('topics', djangobb_forum.fields.JSONField(null=True, blank=True)),
('last_read', models.DateTimeField(null=True, blank=True)),
('user', djangobb_forum.fields.AutoOneToOneField(to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'Post tracking',
'verbose_name_plural': 'Post tracking',
},
),
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('status', models.CharField(max_length=30, verbose_name='Status', blank=True)),
('site', models.URLField(verbose_name='Site', blank=True)),
('jabber', models.CharField(max_length=80, verbose_name='Jabber', blank=True)),
('icq', models.CharField(max_length=12, verbose_name='ICQ', blank=True)),
('msn', models.CharField(max_length=80, verbose_name='MSN', blank=True)),
('aim', models.CharField(max_length=80, verbose_name='AIM', blank=True)),
('yahoo', models.CharField(max_length=80, verbose_name='Yahoo', blank=True)),
('location', models.CharField(max_length=30, verbose_name='Location', blank=True)),
('signature', models.TextField(default='', max_length=1024, verbose_name='Signature', blank=True)),
('signature_html', models.TextField(default='', max_length=1024, verbose_name='Signature', blank=True)),
('time_zone', models.CharField(default=settings.TIME_ZONE, max_length=50, verbose_name='Time zone', choices=TZ_CHOICES)),
('language', models.CharField(default='', max_length=5, verbose_name='Language', choices=settings.LANGUAGES)),
('avatar', djangobb_forum.fields.ExtendedImageField(default='', upload_to=b'djangobb_forum/avatars', verbose_name='Avatar', blank=True)),
('theme', models.CharField(default='default', max_length=80, verbose_name='Theme', choices=THEME_CHOICES)),
('show_avatar', models.BooleanField(default=True, verbose_name='Show avatar')),
('show_signatures', models.BooleanField(default=True, verbose_name='Show signatures')),
('show_smilies', models.BooleanField(default=True, verbose_name='Show smilies')),
('privacy_permission', models.IntegerField(default=1, verbose_name='Privacy permission', choices=[(0, 'Display your e-mail address.'), (1, 'Hide your e-mail address but allow form e-mail.'), (2, 'Hide your e-mail address and disallow form e-mail.')])),
('auto_subscribe', models.BooleanField(default=False, help_text='Auto subscribe all topics you have created or reply.', verbose_name='Auto subscribe')),
('markup', models.CharField(default=forum_settings.DEFAULT_MARKUP, max_length=15, verbose_name='Default markup', choices=MARKUP_CHOICES)),
('post_count', models.IntegerField(default=0, verbose_name='Post count', blank=True)),
('user', djangobb_forum.fields.AutoOneToOneField(related_name='forum_profile', verbose_name='User', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'Profile',
'verbose_name_plural': 'Profiles',
},
),
migrations.CreateModel(
name='Report',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('zapped', models.BooleanField(default=False, verbose_name='Zapped')),
('created', models.DateTimeField(verbose_name='Created', blank=True)),
('reason', models.TextField(default='', max_length='1000', verbose_name='Reason', blank=True)),
('post', models.ForeignKey(verbose_name='Post', to='djangobb_forum.Post')),
('reported_by', models.ForeignKey(related_name='reported_by', verbose_name='Reported by', to=settings.AUTH_USER_MODEL)),
('zapped_by', models.ForeignKey(related_name='zapped_by', verbose_name='Zapped by', blank=True, to=settings.AUTH_USER_MODEL, null=True)),
],
options={
'verbose_name': 'Report',
'verbose_name_plural': 'Reports',
},
),
migrations.CreateModel(
name='Reputation',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('time', models.DateTimeField(auto_now_add=True, verbose_name='Time')),
('sign', models.IntegerField(default=0, verbose_name='Sign', choices=[(1, 'PLUS'), (-1, 'MINUS')])),
('reason', models.TextField(max_length=1000, verbose_name='Reason')),
('from_user', models.ForeignKey(related_name='reputations_from', verbose_name='From', to=settings.AUTH_USER_MODEL)),
('post', models.ForeignKey(related_name='post', verbose_name='Post', to='djangobb_forum.Post')),
('to_user', models.ForeignKey(related_name='reputations_to', verbose_name='To', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'Reputation',
'verbose_name_plural': 'Reputations',
},
),
migrations.CreateModel(
name='Topic',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=255, verbose_name='Subject')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Created')),
('updated', models.DateTimeField(null=True, verbose_name='Updated')),
('views', models.IntegerField(default=0, verbose_name='Views count', blank=True)),
('sticky', models.BooleanField(default=False, verbose_name='Sticky')),
('closed', models.BooleanField(default=False, verbose_name='Closed')),
('post_count', models.IntegerField(default=0, verbose_name='Post count', blank=True)),
('forum', models.ForeignKey(related_name='topics', verbose_name='Forum', to='djangobb_forum.Forum')),
('last_post', models.ForeignKey(related_name='last_topic_post', blank=True, to='djangobb_forum.Post', null=True)),
('subscribers', models.ManyToManyField(related_name='subscriptions', verbose_name='Subscribers', to=settings.AUTH_USER_MODEL, blank=True)),
('user', models.ForeignKey(verbose_name='User', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['-updated'],
'get_latest_by': 'updated',
'verbose_name': 'Topic',
'verbose_name_plural': 'Topics',
},
),
migrations.AddField(
model_name='post',
name='topic',
field=models.ForeignKey(related_name='posts', verbose_name='Topic', to='djangobb_forum.Topic'),
),
migrations.AddField(
model_name='post',
name='updated_by',
field=models.ForeignKey(verbose_name='Updated by', blank=True, to=settings.AUTH_USER_MODEL, null=True),
),
migrations.AddField(
model_name='post',
name='user',
field=models.ForeignKey(related_name='posts', verbose_name='User', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='poll',
name='topic',
field=models.ForeignKey(to='djangobb_forum.Topic'),
),
migrations.AddField(
model_name='poll',
name='users',
field=models.ManyToManyField(help_text='Users who has voted this poll.', to=settings.AUTH_USER_MODEL, blank=True),
),
migrations.AddField(
model_name='forum',
name='last_post',
field=models.ForeignKey(related_name='last_forum_post', blank=True, to='djangobb_forum.Post', null=True),
),
migrations.AddField(
model_name='forum',
name='moderators',
field=models.ManyToManyField(to=settings.AUTH_USER_MODEL, verbose_name='Moderators', blank=True),
),
migrations.AddField(
model_name='attachment',
name='post',
field=models.ForeignKey(related_name='attachments', verbose_name='Post', to='djangobb_forum.Post'),
),
migrations.AlterUniqueTogether(
name='reputation',
unique_together=set([('from_user', 'post')]),
),
]
| 58.338462
| 268
| 0.599156
|
5c2d7806059a80c8c53efe1b804c30527607d1a1
| 385
|
py
|
Python
|
dockerenv/utils.py
|
subdir/devenv
|
fcc4796068032e6fe6b3d744763d3ea58cbf3a02
|
[
"MIT"
] | null | null | null |
dockerenv/utils.py
|
subdir/devenv
|
fcc4796068032e6fe6b3d744763d3ea58cbf3a02
|
[
"MIT"
] | null | null | null |
dockerenv/utils.py
|
subdir/devenv
|
fcc4796068032e6fe6b3d744763d3ea58cbf3a02
|
[
"MIT"
] | null | null | null |
import tempfile
import shutil
import os.path
from contextlib import contextmanager
def resource(fname):
return os.path.abspath(os.path.join(os.path.dirname(__file__), '..', fname))
@contextmanager
def make_tmpdir(base_dir=None):
dirpath = tempfile.mkdtemp(dir=base_dir)
os.chmod(dirpath, 0755)
try:
yield dirpath
finally:
shutil.rmtree(dirpath)
| 20.263158
| 80
| 0.714286
|
f4a4897e840ae100231878749d692b64a6af4420
| 1,011
|
py
|
Python
|
util/Generator.py
|
smartsnake/PasswordGenerator
|
985f05b81271d7a18c0f99fc77870754c48102d5
|
[
"MIT"
] | null | null | null |
util/Generator.py
|
smartsnake/PasswordGenerator
|
985f05b81271d7a18c0f99fc77870754c48102d5
|
[
"MIT"
] | null | null | null |
util/Generator.py
|
smartsnake/PasswordGenerator
|
985f05b81271d7a18c0f99fc77870754c48102d5
|
[
"MIT"
] | null | null | null |
import random
class Generator:
def __init__(self):
#No space char
self.all_charactors = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z',
'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
'1', '2', '3', '4', '5', '6', '7', '8', '9', '0',
'!', '@', '#', '$', '%', '^', '&', '*', '(', ')', ';', ':', '\'', '"', ',', '<', '>', '.', '/', '?', '[', ']', '{', '}', '\\', '|', '-', '_', '+', '='
]
def random_char(self, list_of_chars):
return random.choice(list_of_chars)
def generate_password(self, char_length):
random_password = ''
if char_length <=0:
raise SystemExit(1)
while(char_length > 0):
random_password += self.random_char(self.all_charactors)
char_length -= 1
return random_password
| 40.44
| 162
| 0.368942
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.