max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
tests/proxy/test_proxy.py
|
jodal/pykka
| 796
|
12775151
|
<filename>tests/proxy/test_proxy.py
import pytest
import pykka
from pykka import ActorDeadError, ActorProxy
class NestedObject:
pass
@pytest.fixture(scope="module")
def actor_class(runtime):
class ActorForProxying(runtime.actor_class):
a_nested_object = pykka.traversable(NestedObject())
a_class_attr = "class_attr"
def __init__(self):
super(runtime.actor_class, self).__init__()
self.an_instance_attr = "an_instance_attr"
def a_method(self):
pass
return ActorForProxying
@pytest.fixture
def proxy(actor_class):
proxy = ActorProxy(actor_class.start())
yield proxy
proxy.stop()
def test_eq_to_self(proxy):
assert proxy == proxy
def test_is_hashable(proxy):
assert hash(proxy) == hash(proxy)
def test_eq_to_another_proxy_for_same_actor_and_attr_path(proxy):
proxy2 = proxy.actor_ref.proxy()
assert proxy == proxy2
def test_not_eq_to_proxy_with_different_attr_path(proxy):
assert proxy != proxy.a_nested_object
def test_repr_is_wrapped_in_lt_and_gt(proxy):
result = repr(proxy)
assert result.startswith("<")
assert result.endswith(">")
def test_repr_reveals_that_this_is_a_proxy(proxy):
assert "ActorProxy" in repr(proxy)
def test_repr_contains_actor_class_name(proxy):
assert "ActorForProxying" in repr(proxy)
def test_repr_contains_actor_urn(proxy):
assert proxy.actor_ref.actor_urn in repr(proxy)
def test_repr_contains_attr_path(proxy):
assert "a_nested_object" in repr(proxy.a_nested_object)
def test_str_contains_actor_class_name(proxy):
assert "ActorForProxying" in str(proxy)
def test_str_contains_actor_urn(proxy):
assert proxy.actor_ref.actor_urn in str(proxy)
def test_dir_on_proxy_lists_attributes_of_the_actor(proxy):
result = dir(proxy)
assert "a_class_attr" in result
assert "an_instance_attr" in result
assert "a_method" in result
def test_dir_on_proxy_lists_private_attributes_of_the_proxy(proxy):
result = dir(proxy)
assert "__class__" in result
assert "__dict__" in result
assert "__getattr__" in result
assert "__setattr__" in result
def test_refs_proxy_method_returns_a_proxy(actor_class):
proxy_from_ref_proxy = actor_class.start().proxy()
assert isinstance(proxy_from_ref_proxy, ActorProxy)
proxy_from_ref_proxy.stop().get()
def test_proxy_constructor_raises_exception_if_actor_is_dead(actor_class):
actor_ref = actor_class.start()
actor_ref.stop()
with pytest.raises(ActorDeadError) as exc_info:
ActorProxy(actor_ref)
assert str(exc_info.value) == f"{actor_ref} not found"
def test_actor_ref_may_be_retrieved_from_proxy_if_actor_is_dead(proxy):
proxy.actor_ref.stop()
assert not proxy.actor_ref.is_alive()
def test_actor_proxy_does_not_expose_proxy_to_self(runtime, log_handler):
class Actor(runtime.actor_class):
def __init__(self):
super().__init__()
self.self_proxy = self.actor_ref.proxy()
self.foo = "bar"
actor_ref = Actor.start()
try:
proxy = actor_ref.proxy()
assert proxy.foo.get() == "bar"
with pytest.raises(AttributeError, match="has no attribute 'self_proxy'"):
proxy.self_proxy.foo.get()
finally:
actor_ref.stop()
log_handler.wait_for_message("warning")
with log_handler.lock:
assert len(log_handler.messages["warning"]) == 2
log_record = log_handler.messages["warning"][0]
assert (
"attribute 'self_proxy' is a proxy to itself. "
"Consider making it private by renaming it to '_self_proxy'."
) in log_record.getMessage()
| 2.390625
| 2
|
tnmlearn/examples/base_learning_model.py
|
t2wain/machine-learning
| 0
|
12775152
|
# -*- coding: utf-8 -*-
from sklearn.metrics import classification_report
from keras.callbacks import ModelCheckpoint
from keras.utils import plot_model
import matplotlib.pyplot as plt
import numpy as np
import os
from tnmlearn.callbacks import TrainingMonitor
# %%
class BaseLearningModel:
def __init__(self):
self.callbacks = []
def buildTrainMonCB_(self, outputpath):
# construct the set of callbacks
figPath = os.path.sep.join([outputpath, "{}.png".format(
os.getpid())])
jsonPath = os.path.sep.join([outputpath, "{}.json".format(
os.getpid())])
self.callbacks.append(TrainingMonitor(figPath, jsonPath=jsonPath))
def buildModelChkPointCB_(self, weightpath):
# construct the callback to save only the *best* model to disk
# based on the validation loss
fname = os.path.sep.join([weightpath,
"weights-{epoch:03d}-{val_loss:.4f}.hdf5"])
checkpoint = ModelCheckpoint(fname, monitor="val_loss", mode="min",
save_best_only=True, verbose=1)
self.callbacks.append(checkpoint)
def fit_(self, epochs=100, batch_size=32):
# train the model using SGD
print("[INFO] training network...")
H = self.model.fit(self.trainX, self.trainY,
callbacks=self.callbacks,
validation_data=(self.testX, self.testY),
epochs=epochs, batch_size=batch_size)
self.H = H
return H
def plotModel_(self, outputpath):
plot_model(self.model, to_file=outputpath, show_shapes=True)
def plot(self):
# plot the training loss and accuracy
H = self.H
epochs = len(H.history['loss'])
plt.style.use("ggplot")
plt.figure()
plt.plot(np.arange(0, epochs), H.history["loss"], label="train_loss")
plt.plot(np.arange(0, epochs), H.history["val_loss"], label="val_loss")
plt.plot(np.arange(0, epochs), H.history["acc"], label="train_acc")
plt.plot(np.arange(0, epochs), H.history["val_acc"], label="val_acc")
plt.title("Training Loss and Accuracy")
plt.xlabel("Epoch #")
plt.ylabel("Loss/Accuracy")
plt.legend()
plt.show()
def evaluate_(self, batch_size):
# evaluate the network
print("[INFO] evaluating network...")
predictions = self.model.predict(self.testX, batch_size=batch_size)
print(classification_report(self.testY.argmax(axis=1),
predictions.argmax(axis=1), target_names=self.classNames))
| 2.703125
| 3
|
installed/keyring/keyring/util/escape.py
|
jscherer26/Icarra
| 1
|
12775153
|
<reponame>jscherer26/Icarra<filename>installed/keyring/keyring/util/escape.py
"""
escape/unescape routines available for backends which need
alphanumeric usernames, services, or other values
"""
import string, re
LEGAL_CHARS = string.letters + string.digits
ESCAPE_CHAR = "_"
def escape(value):
"""Escapes given value so the result consists of alphanumeric chars and underscore
only, and alphanumeric chars are preserved"""
def escape_char(c, legal = LEGAL_CHARS):
# Single char escape. Either normal char, or _<hexcode>
if c in legal:
return c
else:
return "%s%X" % (ESCAPE_CHAR, ord(c))
return "".join( escape_char(c) for c in value )
def unescape(value):
"""Reverts escape"""
re_esc = re.compile("_([0-9A-F]{2})")
return re_esc.sub(lambda i: chr(int(i.group(1),16)), value)
| 2.96875
| 3
|
ThreeBotPackages/threebot/calendar/radicale/tests/helpers.py
|
Pishoy/jumpscaleX_threebot
| 1
|
12775154
|
<reponame>Pishoy/jumpscaleX_threebot<filename>ThreeBotPackages/threebot/calendar/radicale/tests/helpers.py
# This file is part of Radicale Server - Calendar Server
# Copyright © 2008 <NAME>
# Copyright © 2008 <NAME>
# Copyright © 2008-2017 <NAME>
#
# This library is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Radicale. If not, see <http://www.gnu.org/licenses/>.
"""
Radicale Helpers module.
This module offers helpers to use in tests.
"""
import os
EXAMPLES_FOLDER = os.path.join(os.path.dirname(__file__), "static")
def get_file_content(file_name):
try:
with open(os.path.join(EXAMPLES_FOLDER, file_name), encoding="utf-8") as fd:
return fd.read()
except IOError:
print("Couldn't open the file %s" % file_name)
| 1.953125
| 2
|
ABC073/ABC073b.py
|
VolgaKurvar/AtCoder
| 0
|
12775155
|
# ABC073b
import sys
input = sys.stdin.readline
sys.setrecursionlimit(10**6)
n = int(input())
l = [list(map(int, input().split())) for _ in range(n)]
ans = 0
for i in l:
ans += i[1]-i[0]+1
print(ans)
| 2.296875
| 2
|
acceptance_tests/features/steps/view_collection_exercise_details_ready_for_review_state.py
|
ONSdigital/rasrm-acceptance-tests
| 2
|
12775156
|
<reponame>ONSdigital/rasrm-acceptance-tests<filename>acceptance_tests/features/steps/view_collection_exercise_details_ready_for_review_state.py
from behave import given, when, then
from acceptance_tests.features.pages import collection_exercise, collection_exercise_details
from common.browser_utilities import is_text_present_with_retry, wait_for
@given('the collection exercise is Scheduled')
def collection_exercise_exists_and_scheduled_displayed(context):
collection_exercise_details.go_to(context.short_name, context.period)
ce_state = collection_exercise_details.get_status()
assert collection_exercise.is_scheduled(ce_state), ce_state
@given('the collection exercise has a loaded sample and collection instruments')
def collection_exercise__exists_and_loaded_sample_cis(context):
collection_exercise_details.go_to(context.short_name, context.period)
ce_state = collection_exercise_details.get_status()
assert collection_exercise.is_scheduled(ce_state), ce_state
collection_exercise_details.load_sample('resources/sample_files/business-survey-sample-date.csv')
success_text = collection_exercise_details.get_sample_success_text()
assert success_text == 'Sample loaded successfully'
collection_exercise_details.load_collection_instrument(
test_file='resources/collection_instrument_files/064_201803_0001.xlsx')
success_text = collection_exercise_details.get_success_panel_text()
assert success_text == 'Collection instrument loaded'
@when('the user navigates to the survey details page')
def navigate_to_collection_exercise_details(context):
collection_exercise.go_to(context.short_name)
@then('the status of the collection exercise is Ready for Review')
def collection_exercise_is_ready_for_review(context):
collection_exercises = wait_for(collection_exercise.get_collection_exercises, 16, 2)
# Status updated async so wait until updated
assert is_text_present_with_retry('Ready for review', 10)
assert context.period in (ce['exercise_ref'] for ce in collection_exercises)
@given('the user has loaded the sample')
@when('the user loads the sample')
def load_sample(_):
collection_exercise_details.load_sample('resources/sample_files/business-survey-sample-date.csv')
success_text = collection_exercise_details.get_sample_success_text()
assert success_text == 'Sample loaded successfully'
@given('the user has loaded the collection instruments')
@when('the user loads the collection instruments')
def load_collection_instruments(_):
collection_exercise_details.load_collection_instrument(
test_file='resources/collection_instrument_files/064_201803_0001.xlsx')
success_text = collection_exercise_details.get_success_panel_text()
assert success_text == 'Collection instrument loaded'
@then('the collection exercise is Ready for Review')
def ce_details_state_is_ready_for_review(_):
assert is_text_present_with_retry('Ready for review', 10)
| 2.15625
| 2
|
src/generator.py
|
DailoxFH/ytasagroup
| 0
|
12775157
|
<reponame>DailoxFH/ytasagroup<gh_stars>0
import string
import random
from urllib.parse import unquote
from src.cookies import escape
def generate_random(iterations, lower=False):
if lower:
return ''.join(
random.choice(string.ascii_uppercase + string.ascii_lowercase + string.digits) for _ in range(iterations))
else:
return ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(iterations))
def get_id_from_link(link):
link = escape(link)
to_check = ["/watch?v=", "/embed/", "/v/", "youtu.be"]
if any(substring in link for substring in to_check):
splitted_url = link.split('/')
id = splitted_url[-1].replace("watch?v=", "")
if len(id) != 11:
return False
return id
elif len(link) == 11 and '/' not in link:
return link
else:
return False
def convert(event):
event_to_update = "NOTHING"
try:
event = int(event)
except:
return event_to_update
if event == 0:
event_to_update = "ENDED"
elif event == 1:
event_to_update = "PLAYING"
elif event == 2:
event_to_update = "PAUSED"
elif event == 3:
event_to_update = "BUFFERING"
return event_to_update
def check_if_room_exists(rooms, roomid):
try:
# noinspection PyStatementEffect
rooms[escape(roomid)]
return True
except KeyError:
return False
def update_dict(base_dict, to_add):
try:
for k, v in to_add.items():
base_dict.__setitem__(k, v)
except AttributeError:
return base_dict
return base_dict
def unquote_cookies(cookie_dict):
new_cookies = {}
for k, v in cookie_dict.items():
new_cookies[unquote(k)] = v
return new_cookies
| 2.421875
| 2
|
source/gui/test/rttovgui_unittest_class.py
|
bucricket/projectMAScorrection
| 0
|
12775158
|
<gh_stars>0
'''
Created on May 14, 2014
@author: pascale
'''
import unittest
import rmodel
class RttovGuiUnitTest(unittest.TestCase):
def test_dummy(self):
pass
def check_option(self, p):
if p is None:
return
if p.isPC():
self.assertTrue(p.myOption["ADDPC"])
self.assertTrue(p.myOption.statusOption["ADDPC"])
else:
self.assertFalse(p.myOption["ADDPC"])
self.assertTrue(p.myOption.statusOption["DO_LAMBERTIAN"])
if p.myCoeffs.hasPC():
self.assertTrue(p.myOption.statusOption["ADDPC"])
else:
self.assertFalse(p.myOption.statusOption["ADDPC"])
self.assertFalse(p.myOption["ADDPC"])
if p.myCoeffs.isPCClouds() and p.myProfile.hasClouds():
self.assertTrue(p.myOption.statusOption["ADDPC"])
if p.myCoeffs.hasSolar():
if not p.isPC():
self.assertTrue(p.myOption.statusOption["ADDSOLAR"])
else:
self.assertFalse(p.myOption.statusOption["ADDSOLAR"])
self.assertFalse(p.myOption["ADDSOLAR"])
if not p.myCoeffs.isMW():
self.assertFalse(p.myOption.statusOption["FASTEM_VERSION"])
self.assertFalse(p.myOption.statusOption["CLW_DATA"])
self.assertFalse(p.myOption["CLW_DATA"])
if p.myCoeffs.isMW():
self.assertTrue(p.myOption.statusOption["FASTEM_VERSION"])
self.assertTrue(p.myOption.statusOption["SUPPLY_FOAM_FRACTION"])
if p.myProfile["CLW"] is not None:
self.assertTrue(p.myOption.statusOption["CLW_DATA"])
self.assertTrue(p.myOption.statusOption["DO_LAMBERTIAN"])
self.assertFalse(p.myOption.statusOption["ADDSOLAR"])
self.assertFalse(p.myOption.statusOption["DO_NLTE_CORRECTION"])
self.assertFalse(p.myOption.statusOption["ADDAEROSL"])
self.assertFalse(p.myOption.statusOption["ADDCLOUDS"])
self.assertFalse(p.myOption.statusOption["CLDSTR_THRESHOLD"])
self.assertFalse(p.myOption.statusOption["OZONE_DATA"])
self.assertFalse(p.myOption.statusOption["CO2_DATA"])
self.assertFalse(p.myOption.statusOption["N2O_DATA"])
self.assertFalse(p.myOption.statusOption["CO_DATA"])
self.assertFalse(p.myOption.statusOption["CH4_DATA"])
self.assertFalse(p.myOption.statusOption["ADDPC"])
self.assertFalse(p.myOption.statusOption["IPCBND"])
self.assertFalse(p.myOption.statusOption["IPCREG"])
self.assertFalse(p.myOption.statusOption["ADDRADREC"])
self.assertFalse(p.myOption["ADDSOLAR"])
self.assertFalse(p.myOption["DO_NLTE_CORRECTION"])
self.assertFalse(p.myOption["ADDAEROSL"])
self.assertFalse(p.myOption["ADDCLOUDS"])
self.assertFalse(p.myOption["OZONE_DATA"])
self.assertFalse(p.myOption["CO2_DATA"])
self.assertFalse(p.myOption["N2O_DATA"])
self.assertFalse(p.myOption["CO_DATA"])
self.assertFalse(p.myOption["CH4_DATA"])
self.assertFalse(p.myOption["ADDPC"])
self.assertFalse(p.myOption["ADDRADREC"])
else: # not MW
self.assertFalse(p.myOption.statusOption["FASTEM_VERSION"])
self.assertFalse(p.myOption.statusOption["CLW_DATA"])
self.assertFalse(p.myOption["CLW_DATA"])
if p.isPC(): # not MW not PC
for gas in ("CO", "CO2", "N2O", "CH4"):
if p.myCoeffs.hasGas(gas) and p.myProfile.hasGas(gas):
self.assertFalse(p.myOption[gas + "_DATA"])
self.assertFalse(
p.myOption.statusOption[gas + "_DATA"])
self.assertFalse(p.myOption["ADDAEROSL"])
self.assertFalse(p.myOption.statusOption["ADDAEROSL"])
self.assertFalse(p.myOption["DO_NLTE_CORRECTION"])
self.assertFalse(p.myOption.statusOption["DO_NLTE_CORRECTION"])
else:
for gas in ("CO", "CO2", "N2O", "CH4"):
if p.myCoeffs.hasGas(gas) and p.myProfile.hasGas(gas):
self.assertTrue(p.myOption.statusOption[gas + "_DATA"])
else:
self.assertFalse(p.myOption[gas + "_DATA"])
self.assertFalse(
p.myOption.statusOption[gas + "_DATA"])
if p.myCoeffs.hasAerosols() and p.myProfile.hasAerosols():
self.assertTrue(p.myOption.statusOption["ADDAEROSL"])
else:
self.assertFalse(p.myOption["ADDAEROSL"])
self.assertFalse(p.myOption.statusOption["ADDAEROSL"])
if p.myCoeffs.hasGas("O3") and p.myProfile.hasGas("O3"):
self.assertTrue(p.myOption.statusOption["OZONE_DATA"])
else:
self.assertFalse(p.myOption["OZONE_DATA"])
self.assertFalse(p.myOption.statusOption["OZONE_DATA"])
if not p.myCoeffs.hasNLTE():
self.assertFalse(p.myOption.statusOption["DO_NLTE_CORRECTION"])
self.assertFalse(p.myOption["DO_NLTE_CORRECTION"])
if p.myOption["ADDCLOUDS"]:
self.assertTrue(p.myOption.statusOption["CLDSTR_THRESHOLD"])
if p.myOption["ADDPC"]:
self.assertFalse(p.myOption.statusOption["CLDSTR_SIMPLE"])
self.assertFalse(p.myOption["CLDSTR_SIMPLE"])
else:
self.assertTrue(p.myOption.statusOption["CLDSTR_SIMPLE"])
else:
self.assertFalse(p.myOption.statusOption["CLDSTR_SIMPLE"])
self.assertFalse(p.myOption["CLDSTR_SIMPLE"])
self.assertFalse(p.myOption.statusOption["CLDSTR_THRESHOLD"])
if not p.myOption.statusOption["ADDCLOUDS"]:
self.assertFalse(p.myOption.statusOption["CLDSTR_SIMPLE"])
self.assertFalse(p.myOption["CLDSTR_SIMPLE"])
self.assertFalse(p.myOption.statusOption["CLDSTR_THRESHOLD"])
class test_test(RttovGuiUnitTest):
def test_check_option(self):
print "test rttovgui_unittest_class"
profileName = "../rttov_tests/cldaer101lev_allgas.H5"
p = rmodel.project.Project()
profileName = p.config.ENV[
"RTTOV_GUI_PROFILE_DIR"] + "/us76_43lev_allgas.H5"
print "openProfile", profileName
p.openProfile(profileName)
coefFile = p.config.ENV['RTTOV_GUI_COEFF_DIR'] + \
"/rttov9pred101L/rtcoef_metop_2_iasi.H5"
pcFile = p.config.ENV['RTTOV_GUI_COEFF_DIR'] + \
"/pc/pccoef_metop_2_iasi.H5"
p.myCoeffs.fileName["standard"] = coefFile
p.myCoeffs.fileName["PC"] = pcFile
p.loadCoefficients()
p.ctrlCoherence()
self.check_option(p)
if __name__ == "__main__":
unittest.main()
| 2.171875
| 2
|
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/lms/djangoapps/email_marketing/migrations/0010_auto_20180425_0800.py
|
osoco/better-ways-of-thinking-about-software
| 3
|
12775159
|
# Generated by Django 1.11.12 on 2018-04-25 12:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('email_marketing', '0009_remove_emailmarketingconfiguration_sailthru_activation_template'),
]
operations = [
migrations.AddField(
model_name='emailmarketingconfiguration',
name='sailthru_verification_failed_template',
field=models.CharField(blank=True, help_text='Sailthru send template to use on failed ID verification.', max_length=20),
),
migrations.AddField(
model_name='emailmarketingconfiguration',
name='sailthru_verification_passed_template',
field=models.CharField(blank=True, help_text='Sailthru send template to use on passed ID verification.', max_length=20),
),
]
| 1.765625
| 2
|
QoS_PG/test_mlp&lstm/Lstm_test.py
|
swc1326/Policy-Gradient
| 0
|
12775160
|
import numpy as np
import tensorflow as tf
import csv
def classify_state(X, n_state):
up = 80
if (0 <= X <= 2.5):
return n_state - 1, 2.5
for i in range(n_state - 1):
if (up - (i + 1) * 2.5 < X <= up - i * 2.5):
return i, up - i * 2.5
def GA(max_prob_index, n_actions):
values = np.zeros(n_actions)
jjj = 0
for i in range(n_actions):
values[i] = jjj
jjj = jjj + 2.5
return values[max_prob_index]
def GR(X, x, n_actions, round_size, n_state):
values = np.zeros(n_actions)
jjj = 0
for i in range(n_actions):
values[i] = jjj
jjj = jjj + 2.5
reward = np.zeros(n_actions)
flag = 0
_, down = classify_state(X[x + 1][round_size - 1], n_state)
for i in range(n_actions):
if (down + 2.5 >= values[i] > down):
reward[i] = 1
elif (down + 5 >= values[i] >= down + 2.5):
reward[i] = 2
elif (down + 7.5 >= values[i] > down+5):
reward[i] = 3
else:
reward[i] = -1
return reward, flag, values
def classify_losspackge(diff, one_hot_state, n_state):
if (diff == 0):
class_one_hot = one_hot_state[0]
for i in range(int((n_state / 2) - 1)):
if (2.5 * i < diff <= 2.5 * (i + 1)):
class_one_hot = one_hot_state[i + 1]
if (2.5 * (int(n_state / 2) - 1) < diff):
class_one_hot = one_hot_state[int(n_state / 2) - 1]
for i in range(int(n_state / 2) - 2):
if (-2.5 * (i + 1) <= diff < -2.5 * (i)):
class_one_hot = one_hot_state[int(n_state / 2) - 1 + i + 1]
if (-2.5 * (int(n_state / 2) - 2) > diff):
class_one_hot = one_hot_state[int(n_state / 2) - 1 + int(n_state / 2) - 2 + 1]
return class_one_hot
def lstm_test(cell_number, n_actions, n_state, epoch, one_hot_state, X,
model_i, round_size):
tf.reset_default_graph()
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
input = tf.placeholder(tf.float32, [None, round_size , n_state], name="input_x") # 1*30
lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(num_units=cell_number, state_is_tuple=True)
_, final_state = tf.nn.dynamic_rnn(cell=lstm_cell, inputs=input, dtype=tf.float32)
W3 = tf.get_variable("W3", shape=[cell_number, n_actions],
initializer=tf.contrib.layers.xavier_initializer())
B3 = tf.get_variable("B3", shape=[1, n_actions],
initializer=tf.constant_initializer())
score = tf.matmul(final_state[1], W3) + B3
probability = tf.nn.softmax(score)
restore_path = './model_' + str(model_i) + '/' + str(epoch) + '.ckpt'
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, restore_path)
down_count = len(X)
RAB = np.zeros(2)
DLR = np.zeros(2)
G_list = []
X_list = []
G =80 #15.5
G_list.append(G)
batch_reward, batch_state, all_reward_for_loss, batch_action, all_action_for_loss = [], [], [], [], []
g_count=0
for x in range(len(X)-1):
if (x != 0):
if (G > 80):
G_list.append(80)
else:
G_list.append(action_values)
g_count=g_count+1
R_state = []
for i in range(round_size):
#print(len(X[x][i]))
state_arg, D = classify_state(X[x][i], n_state)
state_ = one_hot_state[state_arg]
R_state.append(state_)
batch_state.append(R_state)
state = np.reshape(R_state, [1, round_size , n_state])
tfprob = sess.run(probability, feed_dict={input: state})
max_prob_index = np.argmax(tfprob[0])
loss_package = G - X[x][round_size - 1]
if (loss_package >= 0):
RAB[0] = RAB[0] + loss_package
RAB[1] = RAB[1] + 1
else:
DLR[0] = DLR[0] + (-1) * loss_package
DLR[1] = DLR[1] + 1
action_values = GA(max_prob_index, n_actions)
reward, flag, values = GR(X, x, n_actions, round_size, n_state)
X_list.append(X[x][round_size - 1])
G = action_values
batch_reward.append(reward)
all_reward_for_loss.append(reward)
x_count=down_count
if (RAB[1] != 0):
RAB_ = RAB[0] / RAB[1]
else:
RAB_ = 0
if (DLR[1] != 0):
DLR_ = DLR[0] / DLR[1]
else:
DLR_ = 0
with open('./model_' + str(model_i) + '/lost_package.csv', 'a',
newline='') as p:
writer = csv.writer(p)
writer.writerow(['RAB', 'DLR'])
writer.writerow([RAB_, DLR_])
return x_count,g_count, G_list
| 2.5625
| 3
|
src/brouwers/utils/forms.py
|
modelbrouwers/modelbrouwers
| 6
|
12775161
|
<gh_stars>1-10
from django.forms import ModelForm
class AlwaysChangedModelForm(ModelForm):
"""
Mark the form always as changed, so that the instance is always saved.
"""
def has_changed(self):
return True
| 2.796875
| 3
|
third_party/Paste/paste/auth/multi.py
|
tingshao/catapult
| 5,079
|
12775162
|
# (c) 2005 <NAME>
# This module is part of the Python Paste Project and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
# This code was written with funding by http://prometheusresearch.com
"""
Authentication via Multiple Methods
In some environments, the choice of authentication method to be used
depends upon the environment and is not "fixed". This middleware allows
N authentication methods to be registered along with a goodness function
which determines which method should be used. The following example
demonstrates how to use both form and digest authentication in a server
stack; by default it uses form-based authentication unless
``*authmeth=digest`` is specified as a query argument.
>>> from paste.auth import form, cookie, digest, multi
>>> from paste.wsgilib import dump_environ
>>> from paste.httpserver import serve
>>>
>>> multi = multi.MultiHandler(dump_environ)
>>> def authfunc(environ, realm, user):
... return digest.digest_password(realm, user, user)
>>> multi.add_method('digest', digest.middleware, "Test Realm", authfunc)
>>> multi.set_query_argument('digest')
>>>
>>> def authfunc(environ, username, password):
... return username == password
>>> multi.add_method('form', form.middleware, authfunc)
>>> multi.set_default('form')
>>> serve(cookie.middleware(multi))
serving on...
"""
class MultiHandler(object):
"""
Multiple Authentication Handler
This middleware provides two othogonal facilities:
- a manner to register any number of authentication middlewares
- a mechanism to register predicates which cause one of the
registered middlewares to be used depending upon the request
If none of the predicates returns True, then the application is
invoked directly without middleware
"""
def __init__(self, application):
self.application = application
self.default = application
self.binding = {}
self.predicate = []
def add_method(self, name, factory, *args, **kwargs):
self.binding[name] = factory(self.application, *args, **kwargs)
def add_predicate(self, name, checker):
self.predicate.append((checker, self.binding[name]))
def set_default(self, name):
""" set default authentication method """
self.default = self.binding[name]
def set_query_argument(self, name, key = '*authmeth', value = None):
""" choose authentication method based on a query argument """
lookfor = "%s=%s" % (key, value or name)
self.add_predicate(name,
lambda environ: lookfor in environ.get('QUERY_STRING',''))
def __call__(self, environ, start_response):
for (checker, binding) in self.predicate:
if checker(environ):
return binding(environ, start_response)
return self.default(environ, start_response)
middleware = MultiHandler
__all__ = ['MultiHandler']
if "__main__" == __name__:
import doctest
doctest.testmod(optionflags=doctest.ELLIPSIS)
| 2.875
| 3
|
pyhsmm/basic/__init__.py
|
garyfeng/pyhsmm
| 1
|
12775163
|
import models
import distributions
import abstractions
| 1.0625
| 1
|
cookbook/c05/p19_temp_file.py
|
itpubs/python3-cookbook
| 3
|
12775164
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
Topic: 临时文件和目录
Desc :
"""
from tempfile import TemporaryFile
from tempfile import TemporaryDirectory
from tempfile import NamedTemporaryFile
import tempfile
def temp_file():
with TemporaryFile('w+t') as f:
# Read/write to the file
f.write('Hello World\n')
f.write('Testing\n')
# Seek back to beginning and read the data
f.seek(0)
data = f.read()
print(data)
with NamedTemporaryFile('w+t') as f:
print('filename is:', f.name)
with TemporaryDirectory() as dirname:
print('dirname is:', dirname)
print(tempfile.mkstemp())
print(tempfile.mkdtemp())
print(tempfile.gettempdir())
if __name__ == '__main__':
temp_file()
| 3.609375
| 4
|
aag/utils/Cli.py
|
JosephLai241/AAG
| 2
|
12775165
|
#===============================================================================
# Command-line interface
#===============================================================================
import argparse
from .Titles import Titles
class Parser():
"""
Methods for parsing CLI arguments.
"""
### Initialize objects that will be used on class methods.
def __init__(self):
self._usage = "$ AAG.py [-h] [-l] [-e] [-m FONT_NUM TEXT] [-r TEXT]"
self._description = r"""
Ascii Art Generator - Generate art from 425 fonts
Author: <NAME>
"""
self._epilog = r"""
EXAMPLES
List all available fonts:
$ ./AAG.py -l
Generate examples for all fonts:
$ ./AAG.py -e
Create ASCII art using the "larry3d" font:
$ ./AAG.py -m 211 something
Wrap your text in quotes if it is more than one word or contains special characters:
$ ./AAG.py -m 211 "ASCII Art Generator"
$ ./AAG.py -m 330 "H&M"
Generate ASCII art from a random font:
$ ./AAG.py -r "A random font"
"""
### Add parser flags.
def _add_flags(self, parser):
aag = parser.add_argument_group("generator options")
aag.add_argument(
"-l", "--list",
action = "store_true",
help = "list all available fonts and their corresponding number")
aag.add_argument(
"-e", "--examples",
action = "store_true",
help = "generate examples for each font")
aag.add_argument(
"-m", "--make",
action = "append",
metavar = "",
nargs = 2,
help = "generate ASCII art")
aag.add_argument(
"-r", "--randomize",
action = "append",
metavar = "",
help = "generate ASCII art from a random font")
### Get args.
def parse_args(self):
parser = argparse.ArgumentParser(
description = self._description,
epilog = self._epilog,
formatter_class = argparse.RawDescriptionHelpFormatter,
usage = self._usage)
self._add_flags(parser)
args = parser.parse_args()
return args, parser
class CheckArgs():
"""
Method for checking the `-m`/`--make` flag.
"""
@staticmethod
def check_make(args, parser):
for args in args.make:
try:
if not args[0].isdigit():
raise ValueError
except ValueError:
Titles.error_title()
parser.exit()
| 3.09375
| 3
|
myalgorithms/sorts_1.py
|
andriidem308/python_practice
| 2
|
12775166
|
<reponame>andriidem308/python_practice
def bubble_sort(arr):
swapped = True
while swapped:
swapped = False
for i in range(len(arr) - 1):
if arr[i] > arr[i + 1]:
arr[i], arr[i + 1] = arr[i + 1], arr[i]
swapped = True
def selection_sort(arr):
for i in range(len(arr)):
minpos = i
for j in range(i + 1, len(arr)):
if arr[j] < arr[minpos]:
minpos = j
arr[i], arr[minpos] = arr[minpos], arr[i]
def insertion_sort(arr):
for i in range(1, len(arr)):
current = arr[i]
j = i - 1
while j >= 0 and arr[j] > current:
arr[j + 1] = arr[j]
j -= 1
arr[j + 1] = current
def merge_sort(array):
def _merge(left_arr, right_arr):
_summary = [0] * (len(left_arr) + len(right_arr))
li = ri = n = 0
while li < len(left_arr) and ri < len(right_arr):
if left_arr[li] <= right_arr[ri]:
_summary[n] = left_arr[li]
li += 1
n += 1
else:
_summary[n] = right_arr[ri]
ri += 1
n += 1
while li < len(left_arr):
_summary[n] = left_arr[li]
li += 1
n += 1
while ri < len(right_arr):
_summary[n] = right_arr[ri]
ri += 1
n += 1
return _summary
if len(array) <= 1: return
middle = len(array) // 2
left_array = [array[i] for i in range(0, middle)]
right_array = [array[i] for i in range(middle, len(array))]
merge_sort(left_array)
merge_sort(right_array)
summary = _merge(left_array, right_array)
for i in range(len(array)):
array[i] = summary[i]
def quick_sort(array):
if len(array) <= 1:
return
pivot = array[0]
left_array = []
right_array = []
middle_array = []
for x in array:
if x < pivot:
left_array.append(x)
elif x > pivot:
right_array.append(x)
else:
middle_array.append(x)
quick_sort(left_array)
quick_sort(right_array)
index = 0
for x in left_array + middle_array + right_array:
array[index] = x
index += 1
| 4.1875
| 4
|
JSEncrypt_RSA.py
|
Singhoy/MyJSTranslated
| 0
|
12775167
|
<filename>JSEncrypt_RSA.py
# -*- coding: utf-8 -*-
from base64 import b64encode
from json import dumps
from math import floor
from random import random
from time import time
from Crypto.Cipher import PKCS1_v1_5
from Crypto.PublicKey import RSA
"""
原文:
var publicNewKey = "<KEY>";
function getNewEncodeStr(raw) {
var encodeJson = JSON.stringify({ "num": generateUUID(), "stamp": new Date().getTime(), "username": raw });
var encrypt = new JSEncrypt();
encrypt.setPublicKey(publicNewKey);
return encrypt.encrypt(encodeJson);
}
function generateUUID() {
var d = new Date().getTime();
var uuid = 'xxxxxxxxxxxxyxxxyxxxxxxxxxxxxxxx'.replace(/[xy]/g, function (c) {
var r = (d + Math.random() * 16) % 16 | 0;
d = Math.floor(d / 16); return (c == 'x' ? r : (r & 0x3 | 0x8)).toString(16);
});
return uuid;
};
"""
def 加密(pwd):
key = "<KEY>"
public_key = '-----BEGIN PUBLIC KEY-----\n' + key + '\n-----END PUBLIC KEY-----'
rsakey = RSA.importKey(public_key)
cipher = PKCS1_v1_5.new(rsakey)
uid = generate_uuid()
t = int(time() * 1000)
js = {"num": uid, "stamp": t, "username": pwd}
js = dumps(js).replace(" ", "")
cipher_text = b64encode(cipher.encrypt(js.encode("utf-8")))
return cipher_text.decode()
def generate_uuid():
la = [_ for _ in range(10, 36)]
lb = [chr(_) for _ in range(ord("a"), ord("z") + 1)]
dic = dict(zip(la, lb))
uuid = 'xxxxxxxxxxxxyxxxyxxxxxxxxxxxxxxx'
s = ""
d = int(time() * 1000)
for c in uuid:
_r = int((d + random() * 16) % 16) or 0
d = floor(d / 16)
if c != "x":
_r = int(_r and 0x3 or 0x8)
s += str(_r) if _r < 10 else dic[_r]
return s
if __name__ == "__main__":
k = "UzqJjJlfzBTDkHs5"
a = 加密(k)
print(a)
| 2.953125
| 3
|
covid_berlin_scraper/tests/test_download_dashboard.py
|
jakubvalenta/covid-berlin-scraper
| 1
|
12775168
|
<gh_stars>1-10
import datetime
from pathlib import Path
from unittest import TestCase
from unittest.mock import patch
import dateutil.tz
import regex
from covid_berlin_scraper.download_dashboard import download_dashboard
dashboard_content = (
Path(__file__).parent / 'test_data' / 'corona.html'
).read_text()
class TestDownloadDashboard(TestCase):
@patch(
'covid_berlin_scraper.download_dashboard.http_get',
return_value=dashboard_content,
)
def test_download_dashboard(self, patched_http_get):
default_tz = dateutil.tz.gettz('Europe/Berlin')
dashboard = download_dashboard(
url='foo',
date_selector='.toptitle.h1 p',
date_regex=regex.compile(
'Lagebericht (?P<date>\\d+\\.\\d+\\.\\d+)'
),
date_regex_group='date',
default_tz=default_tz,
timeout=10,
user_agent='Spam',
)
self.assertEqual(
dashboard.timestamp,
datetime.datetime(year=2020, month=9, day=10, tzinfo=default_tz),
)
self.assertEqual(dashboard.content, dashboard_content)
| 2.59375
| 3
|
jabbbar/project.py
|
barguman/jabbbar
| 4
|
12775169
|
<reponame>barguman/jabbbar
class Project():
def __init__(self, client, project_id=None):
self.client = client
self.project_id = project_id
def get_details(self, project_id=None):
"""
Get information about a specific project
http://developer.dribbble.com/v1/projects/#get-a-project
"""
project_id = project_id if project_id is not None else self.project_id
project_details = self.client.GET("/projects/{}".format(project_id))
return project_details.json()
def get_shots(self, project_id=None):
"""
Retrieves a list of all shots that are part of this project
http://developer.dribbble.com/v1/projects/shots/#list-shots-for-a-project
"""
project_id = project_id if project_id is not None else self.project_id
shots = self.client.GET("/projects/{}/shots".format(project_id))
return shots.json()
| 2.890625
| 3
|
collector.py
|
museless/Mysqlwatcher
| 0
|
12775170
|
<filename>collector.py
# -*- coding: utf-8 -*-
import pymysql
import sys
import os
import getopt
import time
import pdb
MIN_SLEEP = 5
SERVER_STATUS = (
"Aborted_clients",
"Aborted_connects",
"Bytes_received",
"Bytes_sent",
"Connections",
"Created_tmp_files",
"Created_tmp_tables",
"Created_tmp_disk_tables",
"Max_execution_time_exceeded",
"Threads_cached",
"Threads_created",
"Threads_running",
)
def collect(datapath, sleep=MIN_SLEEP):
conn = pymysql.Connection(user="root", password="<PASSWORD>.")
cursor = conn.cursor()
pf = open("{}processlist".format(datapath), "a")
sf = open("{}globalstatus".format(datapath), "a+")
t = time.strftime("%Y%m%d%H%M%S")
status_header = "Time," + ",".join(SERVER_STATUS)
pos = sf.tell()
sf.seek(0)
line = sf.readline().strip("\n")
sf.seek(pos)
if line and line != status_header:
sf = open("{}globalstatus-{}".format(datapath, t), "a")
if line != status_header:
sf.write(status_header + "\n")
while True:
t = time.strftime("%Y%m%d%H%M%S")
cursor.execute("show processlist")
data = cursor.fetchall()
d = ["{0[1]} {0[2]} {0[3]} {0[4]} {0[5]}".format(d) for d in data]
d.insert(0, t)
pf.write("\n".join(d) + "\n\n")
pf.flush()
cursor.execute("show global status")
data = cursor.fetchall()
vals = {k: v for (k, v) in data}
status = (str(vals.get(st, "NaN")) for st in SERVER_STATUS)
v = "{time} {status}\n".format(time=t, status=" ".join(status))
sf.write(v)
sf.flush()
time.sleep(sleep)
def show_help():
print("fuck")
def daemon():
pid = os.fork()
if pid != 0:
sys.exit()
pid = os.fork()
os.setsid()
if pid != 0:
sys.exit()
os.chdir("/")
f = open("/dev/null")
sys.stdin = sys.stderr = sys.stdout = f
def main(argv=sys.argv):
longopts = [
"detach", "sleep=", "help", "datapath="
]
opts, args = getopt.getopt(argv[1: ], "hs:", longopts)
sleep_time, datapath, daemonable = MIN_SLEEP, "", False
for (o, a) in opts:
if o == "--detach":
daemonable = True
elif o in ("-s", "--sleep"):
sleep_time = max(a, MIN_SLEEP)
elif o in ("-h", "--help"):
show_help()
elif o == "--datapath":
datapath = a
if not datapath:
print("collector: missing datapath")
return
if daemonable:
daemon()
collect(sleep=sleep_time, datapath=datapath)
if __name__ == "__main__":
main()
| 2.21875
| 2
|
utils/main_config.py
|
Themis3000/discord_bot_template
| 0
|
12775171
|
"""
Uses class Config to define all type conversion functions for main config.yaml file
"""
from utils.config import Config
import discord
config = Config("./config.yaml")
status_converter_dict = {"online": discord.Status.online,
"offline": discord.Status.offline,
"idle": discord.Status.idle,
"dnd": discord.Status.dnd,
"invisible": discord.Status.invisible}
activity_converter_dict = {"game": discord.Game,
"streaming": discord.Streaming,
"custom": discord.CustomActivity}
@config.register_get_handler("discord_presence.ready.status")
@config.register_get_handler("discord_presence.startup.status")
def status_converter(value):
if value in status_converter_dict:
return status_converter_dict[value]
return status_converter_dict["online"]
@config.register_get_handler("discord_presence.ready.activity")
@config.register_get_handler("discord_presence.startup.activity")
def presence_converter(value):
if value["type"] in activity_converter_dict:
return activity_converter_dict[value["type"]](name=value["text"])
return None
| 3.03125
| 3
|
v_1_1/app/file_merge.py
|
2462612540/Large-file-transfer
| 1
|
12775172
|
<reponame>2462612540/Large-file-transfer<gh_stars>1-10
from v_1_1.utils.file_utils import *
def file_merge_check(joinpath):
"""
合并文件前的检查
:return:
"""
if check_file_complete(joinpath):
#文件的完整的时候
return True
else:
# 文件不完整的时候
return False
def file_merge(joindir,joinfilename):
"""
合并文件
:param joinpath: 文件夹路径
:return:
"""
joinfilepath=joindir+joinfilename
file_message = json_file_read(quire_file_name(joindir)[0])
if os.path.exists(joinfilepath):
print("文件下已经存在文件")
if file_message[joinfilename] == file_md5(joinfilepath):
print("文件与原文件一致")
else:
print("文件与原来文件不一致,需要重新传输")
else:
# 检查是否能合并
if file_merge_check(joindir):
# 文件的合并
outfile = open(joinfilepath, 'wb')
for key in file_message:
if "part" in key:
infile = open(joindir + key, 'rb')
outfile.write(infile.read())
infile.close()
outfile.close()
print("文件合并完成")
if file_md5(joinfilepath) == file_message[joinfilename]:
print("合并的文件与原文件一致")
delete_file_other(temp_storage_path)
return True
else:
print("合并的文件与原文件不是一致,需要重新进行")
return False
else:
print("文件不完整,需要重新传输")
| 2.40625
| 2
|
models/modelzoo/__init__.py
|
naivelamb/kaggle-cloud-organization
| 30
|
12775173
|
<reponame>naivelamb/kaggle-cloud-organization<filename>models/modelzoo/__init__.py<gh_stars>10-100
from .dpn import *
from .inceptionV4 import *
from .inceptionresnetv2 import *
from .resnet import *
from .senet import *
from .xception import *
from .senet2 import seresnext26_32x4d
from .efficientNet import EfficientNet
| 1.070313
| 1
|
src/pipelinex/extras/ops/allennlp_ops.py
|
MarchRaBBiT/pipelinex
| 188
|
12775174
|
class AllennlpReaderToDict:
def __init__(self, **kwargs):
self.kwargs = kwargs
def __call__(self, *args_ignore, **kwargs_ignore):
kwargs = self.kwargs
reader = kwargs.get("reader")
file_path = kwargs.get("file_path")
n_samples = kwargs.get("n_samples")
instances = reader._read(file_path)
n_samples = n_samples or len(instances)
d = dict()
i = 0
for instance in instances:
if n_samples and i >= n_samples:
break
d[i] = instance.fields
i += 1
return d
| 2.796875
| 3
|
test/feature/test_harris.py
|
jiangwei221/kornia
| 0
|
12775175
|
<reponame>jiangwei221/kornia<filename>test/feature/test_harris.py
import pytest
import torch
import kornia as kornia
from torch.testing import assert_allclose
from torch.autograd import gradcheck
import utils # test utils
class TestCornerHarris:
def test_shape(self):
inp = torch.ones(1, 3, 4, 4)
sobel = kornia.feature.CornerHarris(k=0.04)
assert sobel(inp).shape == (1, 3, 4, 4)
def test_shape_batch(self):
inp = torch.zeros(2, 6, 4, 4)
sobel = kornia.feature.CornerHarris(k=0.04)
assert sobel(inp).shape == (2, 6, 4, 4)
def test_corners(self):
inp = torch.tensor([[[
[0., 0., 0., 0., 0., 0., 0.],
[0., 1., 1., 1., 1., 1., 0.],
[0., 1., 1., 1., 1., 1., 0.],
[0., 1., 1., 1., 1., 1., 0.],
[0., 1., 1., 1., 1., 1., 0.],
[0., 1., 1., 1., 1., 1., 0.],
[0., 0., 0., 0., 0., 0., 0.],
]]])
expected = torch.tensor([[[
[0., 0., 0., 0., 0., 0., 0.],
[0., 1., 0., 0., 0., 1., 0.],
[0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0.],
[0., 1., 0., 0., 0., 1., 0.],
[0., 0., 0., 0., 0., 0., 0.],
]]])
scores = kornia.feature.corner_harris(inp, k=0.04)
assert_allclose(scores, expected)
def test_corners_batch(self):
inp = torch.tensor([[
[0., 0., 0., 0., 0., 0., 0.],
[0., 1., 1., 1., 1., 1., 0.],
[0., 1., 1., 1., 1., 1., 0.],
[0., 1., 1., 1., 1., 1., 0.],
[0., 1., 1., 1., 1., 1., 0.],
[0., 1., 1., 1., 1., 1., 0.],
[0., 0., 0., 0., 0., 0., 0.],
], [
[0., 0., 0., 0., 0., 0., 0.],
[0., 1., 1., 1., 1., 0., 0.],
[0., 1., 1., 1., 1., 0., 0.],
[0., 1., 1., 1., 1., 0., 0.],
[0., 1., 1., 1., 1., 0., 0.],
[0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0.],
]]).repeat(2, 1, 1, 1)
expected = torch.tensor([[
[0., 0., 0., 0., 0., 0., 0.],
[0., 1., 0., 0., 0., 1., 0.],
[0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0.],
[0., 1., 0., 0., 0., 1., 0.],
[0., 0., 0., 0., 0., 0., 0.],
], [
[0., 0., 0., 0., 0., 0., 0.],
[0., 1., 0., 0., 1., 0., 0.],
[0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0.],
[0., 1., 0., 0., 1., 0., 0.],
[0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0.],
]])
scores = kornia.feature.corner_harris(inp, k=0.04)
assert_allclose(scores, expected)
def test_gradcheck(self):
k = 0.04
batch_size, channels, height, width = 1, 2, 5, 4
img = torch.rand(batch_size, channels, height, width)
img = utils.tensor_to_gradcheck_var(img) # to var
assert gradcheck(kornia.feature.corner_harris, (img, k),
raise_exception=True)
def test_jit(self):
@torch.jit.script
def op_script(input, k):
return kornia.feature.corner_harris(input, k)
k = torch.tensor(0.04)
img = torch.rand(2, 3, 4, 5)
actual = op_script(img, k)
expected = kornia.feature.corner_harris(img, k)
assert_allclose(actual, expected)
| 2.375
| 2
|
source/normal_compare/analyze_txt.py
|
geez0219/ARC
| 1
|
12775176
|
import os
import pdb
import numpy as np
from fastestimator.summary.logs import parse_log_file
from scipy.stats import ttest_ind
from tabulate import tabulate
def get_best_step(objective, eval_steps, result, mode, train_history):
obj_step = 0
for idx, value in enumerate(result):
if (mode == "max" and value >= objective) or (mode == "min"
and value <= objective):
obj_step = eval_steps[idx]
break
upper_step = obj_step
lower_step = eval_steps[idx - 1]
min_loss = None
min_train_step = None
for train_step, train_loss in train_history.items():
if train_step > lower_step and train_step <= upper_step:
if min_loss is None:
min_loss = train_loss
min_train_step = train_step
elif train_loss < min_loss:
min_loss = train_loss
min_train_step = train_step
return min_train_step
def get_column_mean_std(all_data,
best_mode,
lrname,
lr_schedules,
arc_name="lr-controller-weighted-acc"):
if best_mode == "max":
get_best = np.max
get_worst = np.min
elif best_mode == "min":
get_best = np.min
get_worst = np.max
else:
raise ValueError("best_mode needs to be one of ['max', 'min']")
column_data = all_data[lrname]
best_numbers = []
for lr_schedule in lr_schedules:
lr_schedule_data = column_data[lr_schedule]
for step, result, _ in lr_schedule_data:
best_numbers.append(get_best(result))
convergence_target = get_worst(best_numbers)
br_dict, bs_dict = {}, {}
for lr_schedule in lr_schedules:
best_step, best_result = [], []
lr_schedule_data = column_data[lr_schedule]
for step, result, train_history in lr_schedule_data:
best_result.append(get_best(result))
best_step.append(
get_best_step(convergence_target, step, result, best_mode,
train_history))
br_dict[lr_schedule] = best_result
bs_dict[lr_schedule] = best_step
table = []
for lr_schedule in lr_schedules:
best_result = br_dict[lr_schedule]
best_step = bs_dict[lr_schedule]
br_display = f"{np.mean(best_result):.4f}"
bs_display = f"{np.mean(best_step):.0f}"
if np.mean(best_result) == get_best(
[np.mean(x) for x in br_dict.values()]):
br_display += "*"
if np.mean(best_step) == min([np.mean(x) for x in bs_dict.values()]):
bs_display += "*"
if ttest_ind(br_dict[arc_name], br_dict[lr_schedule]).pvalue < 0.05:
br_display += "#"
if ttest_ind(bs_dict[arc_name], bs_dict[lr_schedule]).pvalue < 0.05:
bs_display += "#"
table.append([
lr_schedule, br_display, f"{np.std(best_result):.4f}", bs_display,
f"{np.std(best_step):.0f}"
])
print(
tabulate(table,
headers=[
"scheduler", "metric mean", "metric std", "step mean",
"step std"
],
tablefmt="github"))
def get_column_median(all_data,
best_mode,
lrname,
lr_schedules,
arc_name="lr-controller-weighted-acc"):
if best_mode == "max":
get_best = np.max
get_worst = np.min
elif best_mode == "min":
get_best = np.min
get_worst = np.max
else:
raise ValueError("best_mode needs to be one of ['max', 'min']")
column_data = all_data[lrname]
best_numbers = []
for lr_schedule in lr_schedules:
lr_schedule_data = column_data[lr_schedule]
for step, result, _ in lr_schedule_data:
best_numbers.append(get_best(result))
convergence_target = get_worst(best_numbers)
br_dict, bs_dict = {}, {}
for lr_schedule in lr_schedules:
best_step, best_result = [], []
lr_schedule_data = column_data[lr_schedule]
for step, result, train_history in lr_schedule_data:
best_result.append(get_best(result))
best_step.append(
get_best_step(convergence_target, step, result, best_mode,
train_history))
br_dict[lr_schedule] = best_result
bs_dict[lr_schedule] = best_step
table = []
for lr_schedule in lr_schedules:
best_result = br_dict[lr_schedule]
best_step = bs_dict[lr_schedule]
br_display = f"{np.median(best_result):.4f}"
bs_display = f"{np.median(best_step):.0f}"
if np.median(best_result) == get_best(
[np.median(x) for x in br_dict.values()]):
br_display += "*"
if np.median(best_step) == min(
[np.median(x) for x in bs_dict.values()]):
bs_display += "*"
if ttest_ind(br_dict[arc_name], br_dict[lr_schedule]).pvalue < 0.05:
br_display += "#"
if ttest_ind(bs_dict[arc_name], bs_dict[lr_schedule]).pvalue < 0.05:
bs_display += "#"
table.append([
lr_schedule,
br_display,
bs_display,
])
print(
tabulate(table,
headers=[
"scheduler",
"metric median",
"step median",
],
tablefmt="github"))
def check_file_complete(folder_path):
filenames = [
fname for fname in os.listdir(folder_path) if fname.endswith(".txt")
]
lr_set = set()
schedule_set = set()
id_set = set()
# get the set of lr, scheduler, id
for filename in filenames:
configs = os.path.splitext(filename)[0].split("_")
lr_name, lr_schedule_name, run_id = configs
lr_set.add(lr_name)
schedule_set.add(lr_schedule_name)
id_set.add(run_id)
# check all combinations exist
for lr in lr_set:
for schedule in schedule_set:
for run in id_set:
filename = f"{lr}_{schedule}_{run}.txt"
assert os.path.exists(os.path.join(
folder_path, filename)), f"{filename} is missing"
def print_table(folder_path, best_mode, metric_name, loss_name, mode):
if mode == "mean_std":
print_func = get_column_mean_std
elif mode == "median":
print_func = get_column_median
else:
raise ValueError("mode needs to be one of ['mean_std', 'median']")
check_file_complete(folder_path)
all_data = {}
filenames = [
fname for fname in os.listdir(folder_path) if fname.endswith(".txt")
]
for filename in filenames:
filepath = os.path.join(folder_path, filename)
configs = os.path.splitext(filename)[0].split("_")
lr_name, lr_schedule_name, run_id = configs
summary = parse_log_file(filepath, ".txt")
result = np.array(
[acc for acc in summary.history["eval"][metric_name].values()])
steps = np.array(
[acc for acc in summary.history["eval"][metric_name].keys()])
train_history = summary.history["train"][loss_name]
if lr_name not in all_data:
all_data[lr_name] = {}
if lr_schedule_name not in all_data[lr_name]:
all_data[lr_name][lr_schedule_name] = []
all_data[lr_name][lr_schedule_name].append(
(steps, result, train_history))
for lrname in sorted(list(all_data.keys()), reverse=True):
print(
f"========================================== lrname={lrname} ==========================================="
)
print_func(all_data,
best_mode,
lrname=lrname,
lr_schedules=[
"base-lr", "cosine-decay", "cyclic-cosine-decay",
"exponential-decay", "lr-controller-weighted-acc"
])
if __name__ == "__main__":
print_table(
mode="median", # "median" or "mean_std"
folder_path=
"/mnt/c/Users/212770359/Downloads/ARC-master/iccv/logs/normal_comparison/language_modeling", # path of the log dir
best_mode="min", # "max" or "min"
metric_name="perplexity", # evaluation metric
loss_name="ce") # loss key
| 2.5
| 2
|
L1Trigger/TrackTrigger/python/TTStubAlgorithmRegister_cfi.py
|
ckamtsikis/cmssw
| 852
|
12775177
|
import FWCore.ParameterSet.Config as cms
# First register all the hit matching algorithms, then specify preferred ones at end.
# The stub windows used has been optimized for for PU200 events
# We use by default the tight tuning
#
# Definition is presented here:
#
# https://indico.cern.ch/event/681577/#4-update-of-the-track-trigger
#
# This script is adapted to the very last Tilted Tracker geometry to date (tracker T5)
# This version was tested on CMSSW 10_0_0_pre1
#
TTStubAlgorithm_official_Phase2TrackerDigi_ = cms.ESProducer("TTStubAlgorithm_official_Phase2TrackerDigi_",
zMatchingPS = cms.bool(True),
zMatching2S = cms.bool(True),
#Number of tilted rings per side in barrel layers (for tilted geom only)
NTiltedRings = cms.vdouble( 0., 12., 12., 12., 0., 0., 0.),
# PU200 tight tuning, optimized for muons
BarrelCut = cms.vdouble( 0, 2, 2.5, 3.5, 4.5, 5.5, 7),
TiltedBarrelCutSet = cms.VPSet(
cms.PSet( TiltedCut = cms.vdouble( 0 ) ),
cms.PSet( TiltedCut = cms.vdouble( 0, 3, 3, 2.5, 3, 3, 2.5, 2.5, 2, 1.5, 1.5, 1, 1) ),
cms.PSet( TiltedCut = cms.vdouble( 0, 3.5, 3, 3, 3, 3, 2.5, 2.5, 3, 3, 2.5, 2.5, 2.5) ),
cms.PSet( TiltedCut = cms.vdouble( 0, 4, 4, 4, 3.5, 3.5, 3.5, 3.5, 3, 3, 3, 3, 3) ),
),
EndcapCutSet = cms.VPSet(
cms.PSet( EndcapCut = cms.vdouble( 0 ) ),
cms.PSet( EndcapCut = cms.vdouble( 0, 1, 2.5, 2.5, 3, 2.5, 3, 3.5, 4, 4, 4.5, 3.5, 4, 4.5, 5, 5.5) ),
cms.PSet( EndcapCut = cms.vdouble( 0, 0.5, 2.5, 2.5, 3, 2.5, 3, 3, 3.5, 3.5, 4, 3.5, 3.5, 4, 4.5, 5) ),
cms.PSet( EndcapCut = cms.vdouble( 0, 1, 3, 3, 2.5, 3.5, 3.5, 3.5, 4, 3.5, 3.5, 4, 4.5) ),
cms.PSet( EndcapCut = cms.vdouble( 0, 1, 2.5, 3, 2.5, 3.5, 3, 3, 3.5, 3.5, 3.5, 4, 4) ),
cms.PSet( EndcapCut = cms.vdouble( 0, 0.5, 1.5, 3, 2.5, 3.5, 3, 3, 3.5, 4, 3.5, 4, 3.5) ),
)
# PU200 loose tuning, optimized for robustness (uncomment if you want to use it)
#BarrelCut = cms.vdouble( 0, 2.0, 3, 4.5, 6, 6.5, 7.0),
#TiltedBarrelCutSet = cms.VPSet(
# cms.PSet( TiltedCut = cms.vdouble( 0 ) ),
# cms.PSet( TiltedCut = cms.vdouble( 0, 3, 3., 2.5, 3., 3., 2.5, 2.5, 2., 1.5, 1.5, 1, 1) ),
# cms.PSet( TiltedCut = cms.vdouble( 0, 4., 4, 4, 4, 4., 4., 4.5, 5, 4., 3.5, 3.5, 3) ),
# cms.PSet( TiltedCut = cms.vdouble( 0, 5, 5, 5, 5, 5, 5, 5.5, 5, 5, 5.5, 5.5, 5.5) ),
# ),
#EndcapCutSet = cms.VPSet(
# cms.PSet( EndcapCut = cms.vdouble( 0 ) ),
# cms.PSet( EndcapCut = cms.vdouble( 0, 1., 2.5, 2.5, 3.5, 5.5, 5.5, 6, 6.5, 6.5, 6.5, 6.5, 6.5, 6.5, 7, 7) ),
# cms.PSet( EndcapCut = cms.vdouble( 0, 0.5, 2.5, 2.5, 3, 5, 6, 6, 6.5, 6.5, 6.5, 6.5, 6.5, 6.5, 7, 7) ),
# cms.PSet( EndcapCut = cms.vdouble( 0, 1, 3., 4.5, 6., 6.5, 6.5, 6.5, 7, 7, 7, 7, 7) ),
# cms.PSet( EndcapCut = cms.vdouble( 0, 1., 2.5, 3.5, 6., 6.5, 6.5, 6.5, 6.5, 7, 7, 7, 7) ),
# cms.PSet( EndcapCut = cms.vdouble( 0, 0.5, 1.5, 3., 4.5, 6.5, 6.5, 7, 7, 7, 7, 7, 7) ),
# )
)
# CBC3 hit matching algorithm
TTStubAlgorithm_cbc3_Phase2TrackerDigi_ = cms.ESProducer("TTStubAlgorithm_cbc3_Phase2TrackerDigi_",
zMatchingPS = cms.bool(True),
zMatching2S = cms.bool(True),
)
# Set the preferred hit matching algorithms.
# We prefer the global geometry algorithm for now in order not to break
# anything. Override with process.TTStubAlgorithm_PSimHit_ = ...,
# etc. in your configuration.
TTStubAlgorithm_Phase2TrackerDigi_ = cms.ESPrefer("TTStubAlgorithm_official_Phase2TrackerDigi_")
| 1.804688
| 2
|
crawlerPlatform/crawlerDocuments/gsxt.py
|
ShawnLoveGame/crawlerPlatform
| 0
|
12775178
|
<gh_stars>0
# -*-: coding: utf-8 -*-
import requests
from requests.exceptions import ConnectionError, RequestException
import json
import math
import datetime
import pymysql
import time
from pybloom_live import BloomFilter, ScalableBloomFilter
import traceback
from SetProxy import Ss
from retrying import retry
class GSXT:
def __init__(self, searchword):
self.base_url = 'http://app.gsxt.gov.cn/'
self.search_url = 'http://app.gsxt.gov.cn/gsxt/cn/gov/saic/web/controller/PrimaryInfoIndexAppController/search?page=1'
self.detail_url = 'http://app.gsxt.gov.cn/gsxt/corp-query-entprise-info-primaryinfoapp-entbaseInfo-{}.html?nodeNum={}&entType={}'
self.headers = {
'Host': 'app.gsxt.gov.cn',
'Connection': 'keep-alive',
'Content-Length': '2',
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Origin': 'file://',
'X-Requested-With': 'XMLHttpRequest',
'User-Agent': 'Mozilla/5.0 (Linux; Android 9; VKY-AL00 Build/HUAWEIVKY-AL00; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/71.0.3578.99 Mobile Safari/537.36 Html5Plus/1.0',
'Content-Type': 'application/json;charset=UTF-8',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN,en-US;q=0.9',
'Cookie': '__jsluid=c4b165b7c3aed96d6a6da49b76cbeb94;JSESSIONID=E22B1C6516882041B052A576092875CA;SECTOKEN=7146152019179079477;tlb_cookie=172.16.12.1048080'
}
self.searchword = searchword
self.host = '192.168.1.68'
self.port = '3306'
self.user = 'root'
self.passwd = '<PASSWORD>'
self.database = 'phtest'
self.ss = Ss()
self.proxy = {}
def get_search_page(self, url):
form_data = {
"searchword": self.searchword.encode("utf-8").decode("latin1"),
"conditions": json.loads(
'{"excep_tab": "0","ill_tab": "0","area": "0","cStatus": "0","xzxk": "0","xzcf": "0","dydj": "0"}'),
"sourceType": "A"
}
try:
response1 = requests.post(url, headers=self.headers, proxies=self.proxy,timeout=6,
data=json.dumps(form_data, ensure_ascii=False))
response1.encoding = "gbk"
print(response1.text)
print(response1.status_code)
except Exception as e:
print(e)
print('traceback.print_exc():', traceback.print_exc())
print('traceback.format_exc():\n%s' % traceback.format_exc())
self.ss.setFalse(self.proxy['http'].split(":")[0], self.proxy['http'].split(":")[1], 'gxst')
self.ss.setFalse(self.proxy['https'].split(":")[0], self.proxy['https'].split(":")[1], 'gxst')
self.get_PROXY()
return self.get_search_page(url)
try:
data = json.loads(response1.text)
status = int(data['status'])
# print(data)
# status = data['status']
if response1.status_code == 200 and data['data'] and status != 500:
return response1.text
except Exception as e:
print(e)
print('traceback.print_exc():', traceback.print_exc())
print('traceback.format_exc():\n%s' % traceback.format_exc())
self.ss.setFalse(self.proxy['http'].split(":")[0], self.proxy['http'].split(":")[1], 'gxst')
self.ss.setFalse(self.proxy['https'].split(":")[0], self.proxy['https'].split(":")[1], 'gxst')
self.get_PROXY()
return self.get_search_page(url)
def get_detail_page(self, pripid, nodeNum, entType):
detailurl = self.detail_url.format(pripid, nodeNum, entType)
print(detailurl)
try:
response2 = requests.post(url=detailurl, headers=self.headers,timeout=6, proxies=self.proxy, json={})
except Exception as e:
print(e)
print('traceback.print_exc():', traceback.print_exc())
print('traceback.format_exc():\n%s' % traceback.format_exc())
self.ss.setFalse(self.proxy['http'].split(":")[0], self.proxy['http'].split(":")[1], 'gxst')
self.ss.setFalse(self.proxy['https'].split(":")[0], self.proxy['https'].split(":")[1], 'gxst')
self.get_PROXY()
return self.get_detail_page(pripid, nodeNum, entType)
try:
data = json.loads(response2.text)
print(response2.status_code)
print(data)
if response2.status_code == 200:
return response2.text
except:
self.ss.setFalse(self.proxy['http'].split(":")[0], self.proxy['http'].split(":")[1], 'gxst')
self.ss.setFalse(self.proxy['https'].split(":")[0], self.proxy['https'].split(":")[1], 'gxst')
self.get_PROXY()
return self.get_detail_page(pripid, nodeNum, entType)
if 'status' in data:
status = int(data['status'])
if status == 500:
self.ss.setFalse(self.proxy['http'].split(":")[0], self.proxy['http'].split(":")[1], 'gxst')
self.ss.setFalse(self.proxy['https'].split(":")[0], self.proxy['https'].split(":")[1], 'gxst')
self.get_PROXY()
return Exception
def get_json_data(self, url):
try:
response = requests.post(url, headers=self.headers,timeout=6, proxies=self.proxy, json={})
print(response.text)
response.encoding = 'utf-8'
print(response.status_code)
except Exception as e:
print(e)
print('traceback.print_exc():', traceback.print_exc())
print('traceback.format_exc():\n%s' % traceback.format_exc())
self.ss.setFalse(self.proxy['http'].split(":")[0], self.proxy['http'].split(":")[1], 'gxst')
self.ss.setFalse(self.proxy['https'].split(":")[0], self.proxy['https'].split(":")[1], 'gxst')
self.get_PROXY()
return self.get_json_data(url)
try:
json_data = json.loads(response.text)
if response.status_code == 200 and json_data is not None:
while 'NGIDERRORCODE' not in json_data:
print(json_data)
return json_data
else:
self.ss.setFalse(self.proxy['http'].split(":")[0], self.proxy['http'].split(":")[1], 'gxst')
self.ss.setFalse(self.proxy['https'].split(":")[0], self.proxy['https'].split(":")[1], 'gxst')
self.get_PROXY()
print(json_data)
return self.get_json_data(url)
if 'status' in json_data:
status = int(json_data['status'])
if status == 500:
self.ss.setFalse(self.proxy['http'].split(":")[0], self.proxy['http'].split(":")[1], 'gxst')
self.ss.setFalse(self.proxy['https'].split(":")[0], self.proxy['https'].split(":")[1], 'gxst')
self.get_PROXY()
return self.get_json_data(url)
return None
return None
except Exception as e:
print(e)
print('traceback.print_exc():', traceback.print_exc())
print('traceback.format_exc():\n%s' % traceback.format_exc())
self.ss.setFalse(self.proxy['http'].split(":")[0], self.proxy['http'].split(":")[1], 'gxst')
self.ss.setFalse(self.proxy['https'].split(":")[0], self.proxy['https'].split(":")[1], 'gxst')
self.get_PROXY()
return self.get_json_data(url)
# 解析详情页
def parse_detail_page(self, detail_page):
bloom_sql = "select entName, uniscId,register_num, person_name, estDate, dom, entType, regState, opFrom, opTo, apprDate, regOrg, opScope, statusDate,cancel_reason,regCap,regCaption,regCapCurCN from gsxt"
base_b = self.get_bloomFilter(bloom_sql)
print(detail_page)
detail_data = json.loads(detail_page)
print(detail_data)
# 基本信息
item = {}
# 注册资本
item['regCap'] = self.ExistOrNot(detail_data, "regCap")
item['regCaption'] = self.ExistOrNot(detail_data, "regCaption")
if item['regCaption']:
item['regCaption'] += "万"
item['regCapCurCN'] = self.ExistOrNot(detail_data, "regCapCurCN")
if not item['regCapCurCN']:
item['regCapCurCN'] = "人民币"
if detail_data['result']:
# 开始拆分数据
# 企业名
# item['entName'] = detail_data['result']['entName']
item['entName'] = self.ExistOrNot(detail_data['result'], 'entName', 'traName')
# 统一社会信用代码
item['uniscId'] = detail_data['result']['uniscId']
# 工商注册号
item['register_num'] = detail_data['result']['regNo']
# 执行事务合伙人或法定代表人
item['person_name'] = detail_data['result']['name']
# 成立时间
item['estDate'] = detail_data['result']['estDate']
# 地址
# item['dom'] = detail_data['result']['dom']
item['dom'] = self.ExistOrNot(detail_data['result'], 'dom', 'opLoc')
# 类型
item['entType'] = detail_data['result']['entType_CN']
# 登记状态
item['regState'] = detail_data['result']['regState_CN']
# 合伙期始
# item['opFrom'] = detail_data['result']['opFrom']
item['opFrom'] = self.ExistOrNot(detail_data['result'], 'opFrom')
# 合伙期至
# item['opTo'] = detail_data['result']['opTo']
item['opTo'] = self.ExistOrNot(detail_data['result'], 'opTo')
# 核准日期
# item['apprDate'] = detail_data['result']['apprDate']
item['apprDate'] = self.ExistOrNot(detail_data['result'], 'apprDate')
# 登记机关
item['regOrg'] = detail_data['result']['regOrg_CN']
# 经营范围
item['opScope'] = detail_data['result']['opScope']
# 状态决定日期
# item['statusDate'] = detail_data['statusInfo']['statusDate']
item['statusDate'] = self.ExistOrNot(detail_data['statusInfo'], 'statusDate')
# 注销原因
# item['cancel_reason'] = detail_data['statusInfo']['reason']
item['cancel_reason'] = self.ExistOrNot(detail_data['statusInfo'], 'reason')
print(item)
if item in base_b:
print("已存在数据库")
else:
base_b.add(item)
self.insert_base_data(item)
return item
# 插入基本信息
def insert_base_data(self, item):
print("插入基本信息")
uptime = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
db = pymysql.connect(self.host, self.user, self.passwd, self.database, charset='utf8')
sql = "insert into gsxt(entName,uniscId,register_num,person_name,estDate,dom,entType,regState,opFrom,opTo,apprDate,regOrg,opScope,statusDate,cancel_reason,local_utime,regCap,regCaption,regCapCurCN) values (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
try:
cursor = db.cursor()
cursor.execute(sql,
(
item['entName'], item['uniscId'], item['register_num'], item['person_name'],
item['estDate'],
item['dom'], item['entType'], item['regState'], item['opFrom'],
item['opTo'], item['apprDate'], item['regOrg'], item['opScope'],
item['statusDate'], item['cancel_reason'], uptime, item['regCap'], item['regCaption'],
item['regCapCurCN']))
db.commit()
db.close()
except RequestException as err:
print(err)
print(sql)
# 获取股东及出资信息
def get_holder_detail(self, params):
url = 'http://app.gsxt.gov.cn/gsxt/corp-query-entprise-info-shareholder-{}.html?nodeNum={}&entType={}&start=0&sourceType=A '
url = url.format(
params['pripid'], params['nodeNum'], params['entType'])
print("股东url")
print(url)
json_data = self.get_json_data(url)
print(json_data)
totalPage = int(json_data['totalPage'])
index = 0
for i in range(totalPage):
url = 'http://app.gsxt.gov.cn/gsxt/corp-query-entprise-info-shareholder-{}.html?nodeNum={}&entType={}&start={}&sourceType=A '
if i is not 0:
index += 5
url = url.format(params['pripid'], params['nodeNum'], params['entType'], str(index))
json_data = self.get_json_data(url)
print(json_data)
data = json_data['data']
if data is not None:
bsql = 'select inv_name, invType, blicType, bLicNo, respForm from gx_holder'
b = self.get_bloomFilter(bsql)
for g in range(len(data)):
item = {}
# 股东
item['inv_name'] = data[g]['inv']
# 股东类型
item['invType'] = data[g]['invType_CN']
# 企业执照类型
item['blicType'] = data[g]['blicType_CN']
# 注册号
item['bLicNo'] = data[g]['bLicNo']
# 企业类型
item['respForm'] = data[g]['respForm_CN']
# 获取下一个url的参数
invId = data[g]['invId']
print('股东信息')
print(item)
# 插入股东基本信息
if item in b:
print("已存在数据库")
else:
b.add(item)
self.insert_holder_base(item, params['main_id'])
# 获取详细股东出资信息
detail_url = 'http://app.gsxt.gov.cn/gsxt/corp-query-entprise-info-shareholderDetail-{}.html?entType={}&sourceType=A '.format(
invId, params['entType'])
print(detail_url)
h_data = self.get_json_data(detail_url)
print(h_data)
main_id = self.get_main_id(item['inv_name'])
inv_name = item['inv_name']
h_data1 = h_data['data'][1]
if h_data1 is not None:
bsql = 'select subConAm, conForm, conDate from gx_holder_con'
b = self.get_bloomFilter(bsql)
for j in range(len(h_data1)):
# 认缴出资金额
confirm = {}
confirm['subConAm'] = h_data1[j]['subConAm']
# 认缴出资方式
confirm['conForm'] = h_data1[j]['conForm_CN']
# 认缴出资时间
confirm['conDate'] = h_data1[j]['conDate']
print(confirm)
# 插入认缴出资表
if item in b:
print("已存在数据库")
else:
b.add(item)
self.insert_holder_confirm(confirm, main_id, inv_name)
h_data0 = h_data['data'][0]
if h_data0 is not None:
bsql = 'select acConAm, conDate, conForm from gx_holder_rea'
b = self.get_bloomFilter(bsql)
for j in range(len(h_data0)):
realHan = {}
# 实缴金额
realHan['acConAm'] = h_data0[j]['acConAm']
# 日期
realHan['conDate'] = h_data0[j]['conDate']
# 币种
realHan['conForm'] = h_data0[j]['conForm_CN']
print(realHan)
if item in b:
print("已存在数据库")
else:
b.add(item)
self.insert_holder_reahand(realHan, main_id, inv_name)
# 插入股东及出资信息
def insert_holder_base(self, item, main_id):
db = pymysql.connect(self.host, self.user, self.passwd, self.database, charset='utf8')
sql = "insert into gx_holder(c_id, inv_name,invType,blicType,bLicNo,respForm) values (%s,%s,%s,%s,%s,%s)"
try:
cursor = db.cursor()
cursor.execute(sql, (main_id, item['inv_name'], item['invType'], item['blicType'], item['bLicNo'],
item['respForm']))
db.commit()
db.close()
except RequestException as err:
print(err)
print(sql)
def insert_holder_confirm(self, confirm, main_id, inv_name):
db = pymysql.connect(self.host, self.user, self.passwd, self.database, charset='utf8')
sql = "insert into gx_holder_con(c_id, inv_name,subConAm,conForm,conDate) values (%s,%s,%s,%s,%s)"
try:
cursor = db.cursor()
cursor.execute(sql, (main_id, inv_name, confirm['subConAm'], confirm['conForm'], confirm['conDate']))
db.commit()
db.close()
except RequestException as err:
print(err)
print(sql)
def insert_holder_reahand(self, realHan, main_id, inv_name):
db = pymysql.connect(self.host, self.user, self.passwd, self.database, charset='utf8')
sql = "insert into gx_holder_rea(c_id, inv_name,acConAm,conDate,conForm) values (%s,%s,%s,%s,%s)"
try:
cursor = db.cursor()
cursor.execute(sql, (main_id, inv_name, realHan['acConAm'], realHan['conDate'], realHan['conForm']))
db.commit()
db.close()
except RequestException as err:
print(err)
print(sql)
# 获取主要人员信息
def get_leader(self, params):
url = 'http://app.gsxt.gov.cn/gsxt/corp-query-entprise-info-KeyPerson-{}.html?nodeNum={}&entType={}&sourceType=A '
url = url.format(
params['pripid'], params['nodeNum'], params['entType'])
print('主要人员url')
print(url)
json_data = self.get_json_data(url)
print(json_data)
data = json_data['data']
if data is not None:
bsql = 'select lename, leposition from gx_leader'
b = self.get_bloomFilter(bsql)
for i in range(len(data)):
item = {}
# 名字
item['lename'] = data[i]['name']
# 职位
item['leposition'] = data[i]['position_CN']
# 插入主要人员信息
print(item)
if item in b:
print("已存在数据库")
else:
b.add(item)
self.insert_leader(item, params['main_id'])
# 插入主要人员信息
def insert_leader(self, item, main_id):
db = pymysql.connect(self.host, self.user, self.passwd, self.database, charset='utf8')
sql = "insert into gx_leader(c_id, lename,leposition) values (%s,%s,%s)"
try:
cursor = db.cursor()
cursor.execute(sql, (main_id, item['lename'], item['leposition']))
db.commit()
db.close()
except RequestException as err:
print(err)
print(sql)
# 获取企业变更信息
def get_entprise_info_after(self, params):
url = 'http://app.gsxt.gov.cn/gsxt/corp-query-entprise-info-alter-{}.html?nodeNum={}&entType={}&start=0&sourceType=A'
url = url.format(
params['pripid'], params['nodeNum'], params['entType'])
json_data = self.get_json_data(url)
print(json_data)
totalPage = int(json_data['totalPage'])
index = 0
for i in range(totalPage):
if i is not 0:
index += 5
url = 'http://app.gsxt.gov.cn/gsxt/corp-query-entprise-info-alter-{}.html?nodeNum={}&entType={}&start={}&sourceType=A'.format(
params['pripid'], params['nodeNum'], params['entType'], str(index))
u_data = self.get_json_data(url)
print(u_data)
data = u_data['data']
if data is not None:
bsql = 'select altItem, altBe, altAf,altDate from gx_change'
b = self.get_bloomFilter(bsql)
for j in range(len(data)):
item = {}
# 变更事项
item['altItem'] = data[j]['altItem_CN']
# 变更前
item['altBe'] = data[j]['altBe']
# 变更后
item['altAf'] = data[j]['altAf']
# 变更日期
item['altDate'] = data[j]['altDate']
print("变更信息")
print(item)
if item in b:
print("已存在数据库")
else:
b.add(item)
self.insert_change(item, params['main_id'])
# 插入变更信息
def insert_change(self, item, main_id):
db = pymysql.connect(self.host, self.user, self.passwd, self.database, charset='utf8')
sql = "insert into gx_change(c_id, altItem,altBe,altAf,altDate) values (%s,%s,%s,%s,%s)"
try:
cursor = db.cursor()
cursor.execute(sql, (main_id, item['altItem'], item['altBe'], item['altAf'], item['altDate']))
db.commit()
db.close()
except RequestException as err:
print(err)
print(sql)
# 获取分支机构信息
def get_branch(self, params):
url = 'http://app.gsxt.gov.cn/gsxt/corp-query-entprise-info-branch-{}.html?nodeNum={}&entType={}&start=0&sourceType=A'
url = url.format(
params['pripid'], params['nodeNum'], params['entType'])
json_data = self.get_json_data(url)
print(json_data)
totalPage = int(json_data['totalPage'])
index = 0
for i in range(totalPage):
if i is not 0:
index += 9
url = 'http://app.gsxt.gov.cn/gsxt/corp-query-entprise-info-branch-{}.html?nodeNum={}&entType={}&start={}&sourceType=A'.format(
params['pripid'], params['nodeNum'], params['entType'], str(index))
u_data = self.get_json_data(url)
print(u_data)
data = u_data['data']
if data is not None:
bsql = 'select brName, regNo, regOrg, uniscId from gx_branch'
b = self.get_bloomFilter(bsql)
for j in range(len(data)):
item = {}
# 名称
item['brName'] = data[j]['brName']
# 注册号
item['regNo'] = data[j]['regNo']
# 登记机关
item['regOrg'] = data[j]['regOrg_CN']
# 社会统一码
item['uniscId'] = data[j]['uniscId']
if item in b:
print("已存在数据库")
else:
b.add(item)
self.insert_branch(item, params['main_id'])
print(item)
# 插入分支机构信息
def insert_branch(self, item, main_id):
db = pymysql.connect(self.host, self.user, self.passwd, self.database, charset='utf8')
sql = "insert into gx_branch(c_id, brName,regNo,regOrg,uniscId) values (%s,%s,%s,%s,%s)"
try:
cursor = db.cursor()
cursor.execute(sql, (main_id, item['brName'], item['regNo'], item['regOrg'], item['uniscId']))
db.commit()
db.close()
except RequestException as err:
print(err)
print(sql)
# 获取动产抵押登记信息
def get_mortreg(self, params):
url = 'http://app.gsxt.gov.cn/gsxt/corp-query-entprise-info-mortreginfo-{}.html?nodeNum={}&entType={}&start=0&sourceType=A'
url = url.format(
params['pripid'], params['nodeNum'], params['entType'])
json_data = self.get_json_data(url)
print(json_data)
totalPage = int(json_data['totalPage'])
print('动产登记页数')
print(totalPage)
index = 0
for i in range(totalPage):
if i is not 0:
index += 5
url = 'http://app.gsxt.gov.cn/gsxt/corp-query-entprise-info-mortreginfo-{}.html?nodeNum={}&entType={}&start={}&sourceType=A'.format(
params['pripid'], params['nodeNum'], params['entType'], str(index))
print(url)
u_data = self.get_json_data(url)
print(u_data)
data = u_data['data']
if data is not None:
bsql = 'select morRegCNo, regiDate, regOrg, priClaSecAm,publicDate,regCapCur,canDate from gx_mortreg'
b = self.get_bloomFilter(bsql)
for j in range(len(data)):
item = {}
# 登记编号
item['morRegCNo'] = data[j]['morRegCNo']
# 注册时间
item['regiDate'] = data[j]['regiDate']
# 登记机关
item['regOrg'] = data[j]['regOrg_CN']
# 被担保债权数额
item['priClaSecAm'] = data[j]['priClaSecAm']
# 公示日期
item['publicDate'] = data[j]['publicDate']
# 币种
item['regCapCur'] = data[j]['regCapCur_Cn']
# 注销日期
item['canDate'] = data[j]['canDate']
print("动产登记")
print(item)
if item in b:
print("已存在数据库")
else:
b.add(item)
self.insert_mortreg(item, params['main_id'])
# 插入动产抵押登记信息
def insert_mortreg(self, item, main_id):
db = pymysql.connect(self.host, self.user, self.passwd, self.database, charset='utf8')
sql = "insert into gx_mortreg(c_id, morRegCNo,regiDate,regOrg,priClaSecAm,publicDate,regCapCur,canDate) values (%s,%s,%s,%s,%s,%s,%s,%s)"
try:
cursor = db.cursor()
cursor.execute(sql, (main_id, item['morRegCNo'], item['regiDate'], item['regOrg'], item['priClaSecAm']
, item['publicDate'], item['regCapCur'], item['canDate']
))
db.commit()
db.close()
except RequestException as err:
print(err)
print(sql)
# 获取股权出质登记信息
def get_stakqualitinfo(self, params):
url = 'http://app.gsxt.gov.cn/gsxt/corp-query-entprise-info-stakqualitinfo-{}.html?nodeNum={}&entType={}&start=0&sourceType=A'
url = url.format(
params['pripid'], params['nodeNum'], params['entType'])
json_data = self.get_json_data(url)
print(json_data)
totalPage = int(json_data['totalPage'])
print('股权出质页数')
print(totalPage)
index = 0
for i in range(totalPage):
if i is not 0:
index += 5
url = 'http://app.gsxt.gov.cn/gsxt/corp-query-entprise-info-stakqualitinfo-{}.html?nodeNum={}&entType={}&start={}&sourceType=A'.format(
params['pripid'], params['nodeNum'], params['entType'], str(index))
print(url)
u_data = self.get_json_data(url)
print(u_data)
data = u_data['data']
if data is not None:
bsql = 'select equityNo, equPleDate, pledgor, impAm,impOrg,impOrgBLicType,regCapCur,status,publicDate,canDate,equPleCanRea from gx_staqualit'
b = self.get_bloomFilter(bsql)
for j in range(len(data)):
item = {}
# 登记编号
item['equityNo'] = data[j]['equityNo']
# 登记日期
item['equPleDate'] = data[j]['equPleDate']
# 出质人
item['pledgor'] = data[j]['pledgor']
# 出质股权数额
item['impAm'] = data[j]['impAm']
# 质权人
item['impOrg'] = data[j]['impOrg']
# 质权人类型
item['impOrgBLicType'] = data[j]['impOrgBLicType_CN']
# 币种
item['regCapCur'] = data[j]['regCapCur_CN']
# 状态
item['status'] = data[j]['type']
# 公示日期
item['publicDate'] = data[j]['publicDate']
# 取消日期
item['canDate'] = data[j]['canDate']
# 取消原因
item['equPleCanRea'] = data[j]['equPleCanRea']
print(item)
if item in b:
print("已存在数据库")
else:
b.add(item)
self.insert_stakqualit(item, params['main_id'])
# 插入股权出质登记信息
def insert_stakqualit(self, item, main_id):
db = pymysql.connect(self.host, self.user, self.passwd, self.database, charset='utf8')
sql = "insert into gx_staqualit(c_id, equityNo,equPleDate,pledgor,impAm,impOrg,impOrgBLicType,regCapCur,status,publicDate,canDate,equPleCanRea) values (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
try:
cursor = db.cursor()
cursor.execute(sql, (main_id, item['equityNo'], item['equPleDate'], item['pledgor'], item['impAm']
, item['impOrg'], item['impOrgBLicType'], item['regCapCur'], item['status'],
item['publicDate'], item['canDate'], item['equPleCanRea']
))
db.commit()
db.close()
except RequestException as err:
print(err)
print(sql)
return None
# 获取商标注册信息
def get_trademark(self, params):
url = 'http://app.gsxt.gov.cn/gsxt/corp-query-entprise-info-trademark-{}.html?nodeNum={}&entType={}&start=0&sourceType=A'
url = url.format(
params['pripid'], params['nodeNum'], params['entType'])
json_data = self.get_json_data(url)
print(json_data)
totalPage = int(json_data['totalPage'])
print('商标注册信息页数')
print(totalPage)
index = 0
for i in range(totalPage):
if i is not 0:
index += 5
url = 'http://app.gsxt.gov.cn/gsxt/corp-query-entprise-info-trademark-{}.html?nodeNum={}&entType={}&start={}&sourceType=A'.format(
params['pripid'], params['nodeNum'], params['entType'], str(index))
print(url)
u_data = self.get_json_data(url)
print(u_data)
data = u_data['data']
if data is not None:
bsql = 'select regNum,intCls,regAnncDate,regAnncIssue,propertyEndDate,propertyBgnDate,goodsCnName from gx_trademark'
b = self.get_bloomFilter(bsql)
for j in range(len(data)):
item = {}
# 商标注册号
item['regNum'] = data[j]['regNum']
# 类别
item['intCls'] = data[j]['intCls']
# 注册公告日期
item['regAnncDate'] = data[j]['regAnncDate']
# 注册公告期号
item['regAnncIssue'] = data[j]['regAnncIssue']
# 专用权起始日期
item['propertyEndDate'] = data[j]['propertyEndDate']
# 专用权终止日期
item['propertyBgnDate'] = data[j]['propertyBgnDate']
# 商品/服务项目
item['goodsCnName'] = data[j]['goodsCnName']
print(item)
if item in b:
print("已存在数据库")
else:
b.add(item)
self.insert_trademark(item, params['main_id'])
# 插入商标注册信息
def insert_trademark(self, item, main_id):
db = pymysql.connect(self.host, self.user, self.passwd, self.database, charset='utf8')
sql = "insert into gx_trademark(c_id, regNum,intCls,regAnncDate,regAnncIssue,propertyEndDate,propertyBgnDate,goodsCnName) values (%s,%s,%s,%s,%s,%s,%s,%s)"
try:
cursor = db.cursor()
cursor.execute(sql, (main_id, item['regNum'], item['intCls'], item['regAnncDate'], item['regAnncIssue']
, item['propertyEndDate'], item['propertyBgnDate'], item['goodsCnName']
))
db.commit()
db.close()
except RequestException as err:
print(err)
print(sql)
return None
# 获取司法协助信息
def get_assistInfo(self, params):
url = 'http://app.gsxt.gov.cn/gsxt/Affiche-query-info-assistInfo-{}.html?nodeNum={}&entType={}&start=0&sourceType=A'
url = url.format(
params['pripid'], params['nodeNum'], params['entType'])
json_data = self.get_json_data(url)
print(json_data)
totalPage = int(json_data['totalPage'])
recordsTotal = int(json_data['recordsTotal'])
print('司法协助信息条数')
print(recordsTotal)
index = 0
for i in range(totalPage):
if i is not 0:
index += 5
url = 'http://app.gsxt.gov.cn/gsxt/Affiche-query-info-assistInfo-{}.html?nodeNum={}&entType={}&start={}&sourceType=A'.format(
params['pripid'], params['nodeNum'], params['entType'], str(index))
print(url)
u_data = self.get_json_data(url)
print(u_data)
data = u_data['data']
if data is not None:
bsql = 'select publicDate,inv,froAm,frozState_CN,executeNo,bLicType_CN,bLicNo,cerNo from gx_assist_info'
b = self.get_bloomFilter(bsql)
for j in range(len(data)):
item = {}
# 公示时间
item['publicDate'] = data[j]['publicDate']
# 被执行人
item['inv'] = data[j]['inv']
# 股权数额
item['froAm'] = data[j]['froAm']
# 股权状态
item['frozState_CN'] = data[j]['frozState_CN']
# 执行通知书文号
item['executeNo'] = data[j]['executeNo']
# 被执行人证照种类
item['bLicType_CN'] = data[j]['bLicType_CN']
# 被执行人证照号码
item['bLicNo'] = data[j]['bLicNo']
# 执行裁定文书号
item['cerNo'] = data[j]['cerNo']
print(item)
if item in b:
print("已存在数据库")
else:
b.add(item)
self.insert_assistInfo(item, params['main_id'])
# 插入司法协助信息
def insert_assistInfo(self, item, main_id):
db = pymysql.connect(self.host, self.user, self.passwd, self.database, charset='utf8')
sql = "insert into gx_assist_info(c_id, publicDate,inv,froAm,frozState_CN,executeNo,bLicType_CN,bLicNo,cerNo) values (%s,%s,%s,%s,%s,%s,%s,%s,%s)"
try:
cursor = db.cursor()
cursor.execute(sql, (
main_id, item['publicDate'], item['inv'], item['froAm'], item['frozState_CN'],
item['executeNo'], item['bLicType_CN'], item['bLicNo'], item['cerNo']
))
db.commit()
db.close()
except RequestException as err:
print(err)
print(sql)
return None
# 获取行政许可信息
def get_licenceinfoDetail(self, params):
url = 'http://app.gsxt.gov.cn/gsxt/corp-query-entprise-info-licenceinfoDetail-{}.html?nodeNum={}&entType={}&start=0&sourceType=A'
url = url.format(
params['pripid'], params['nodeNum'], params['entType'])
json_data = self.get_json_data(url)
print(json_data)
totalPage = int(json_data['totalPage'])
recordsTotal = int(json_data['recordsTotal'])
print('行政许可信息条数')
print(recordsTotal)
index = 0
for i in range(totalPage):
if i is not 0:
index += 5
url = 'http://app.gsxt.gov.cn/gsxt/corp-query-entprise-info-licenceinfoDetail-{}.html?nodeNum={}&entType={}&start={}&sourceType=A'.format(
params['pripid'], params['nodeNum'], params['entType'], str(index))
print(url)
u_data = self.get_json_data(url)
print(u_data)
data = u_data['data']
if data is not None:
bsql = 'select licNo,licName_CN,licAnth,valFrom,valTo,licItem from gx_licenceinfodetail'
b = self.get_bloomFilter(bsql)
for j in range(len(data)):
item = {}
# 许可文件编号
item['licNo'] = data[j]['licNo']
# 许可文件名称
item['licName_CN'] = data[j]['licName_CN']
# 许可机关
item['licAnth'] = data[j]['licAnth']
# 有效起始时间
item['valFrom'] = data[j]['valFrom']
# 有效终止时间
item['valTo'] = data[j]['valTo']
# 许可内容
item['licItem'] = data[j]['licItem']
print(item)
if item in b:
print("已存在数据库")
else:
b.add(item)
self.insert_licenceinfoDetail(item, params['main_id'])
# 插入行政许可信息
def insert_licenceinfoDetail(self, item, main_id):
db = pymysql.connect(self.host, self.user, self.passwd, self.database, charset='utf8')
sql = "insert into gx_licenceinfodetail(c_id, licNo,licName_CN,licAnth,valFrom,valTo,licItem) values (%s,%s,%s,%s,%s,%s,%s)"
try:
cursor = db.cursor()
cursor.execute(sql, (main_id, item['licNo'], item['licName_CN'], item['licAnth'], item['valFrom']
, item['valTo'], item['licItem']))
db.commit()
db.close()
except RequestException as err:
print(err)
print(sql)
return None
# 获取行政处罚信息
def get_punishmentdetail(self, params):
url = 'http://app.gsxt.gov.cn/gsxt/corp-query-entprise-info-punishmentdetail-{}.html?nodeNum={}&entType={}&start=0&sourceType=A'
url = url.format(
params['pripid'], params['nodeNum'], params['entType'])
json_data = self.get_json_data(url)
print(json_data)
totalPage = int(json_data['totalPage'])
recordsTotal = int(json_data['recordsTotal'])
print('行政处罚信息条数')
print(recordsTotal)
index = 0
for i in range(totalPage):
if i is not 0:
index += 5
url = 'http://app.gsxt.gov.cn/gsxt/corp-query-entprise-info-punishmentdetail-{}.html?nodeNum={}&entType={}&start={}&sourceType=A'.format(
params['pripid'], params['nodeNum'], params['entType'], str(index))
print(url)
u_data = self.get_json_data(url)
print(u_data)
data = u_data['data']
if data is not None:
bsql = 'select penDecNo,illegActType,penContent,penAuth_CN,penDecIssDate from gx_punishmentdetail'
b = self.get_bloomFilter(bsql)
for j in range(len(data)):
item = {}
# 决定书文号
item['penDecNo'] = data[j]['penDecNo']
# 违法行为类型
item['illegActType'] = data[j]['illegActType']
# 行政处罚内容
item['penContent'] = data[j]['penContent']
# 决定机关名称
item['penAuth_CN'] = data[j]['penAuth_CN']
# 处罚决定日期
item['penDecIssDate'] = data[j]['penDecIssDate']
print(item)
if item in b:
print("已存在数据库")
else:
b.add(item)
self.insert_punishmentdetail(item, params['main_id'])
# 插入行政处罚信息
def insert_punishmentdetail(self, item, main_id):
db = pymysql.connect(self.host, self.user, self.passwd, self.database, charset='utf8')
sql = "insert into gx_punishmentdetail(c_id, penDecNo,illegActType,penContent,penAuth_CN,penDecIssDate) values (%s,%s,%s,%s,%s,%s)"
try:
cursor = db.cursor()
cursor.execute(sql, (main_id, item['penDecNo'], item['illegActType'], item['penContent'], item['penAuth_CN']
, item['penDecIssDate']))
db.commit()
db.close()
except RequestException as err:
print(err)
print(sql)
return None
# 获取抽查检查结果信息
def get_spotCheckInfo(self, params):
url = 'http://app.gsxt.gov.cn/gsxt/corp-query-entprise-info-spotCheckInfo-{}.html?nodeNum={}&entType={}&start=0&sourceType=A'
url = url.format(
params['pripid'], params['nodeNum'], params['entType'])
json_data = self.get_json_data(url)
print(json_data)
totalPage = int(json_data['totalPage'])
recordsTotal = int(json_data['recordsTotal'])
print('抽查检查结果信息条数')
print(recordsTotal)
index = 0
for i in range(totalPage):
if i is not 0:
index += 5
url = 'http://app.gsxt.gov.cn/gsxt/corp-query-entprise-info-spotCheckInfo-{}.html?nodeNum={}&entType={}&start={}&sourceType=A'.format(
params['pripid'], params['nodeNum'], params['entType'], str(index))
print(url)
u_data = self.get_json_data(url)
print(u_data)
data = u_data['data']
if data is not None:
bsql = 'select insAuth_CN,insDate,insRes_CN from gx_spotcheckinfo'
b = self.get_bloomFilter(bsql)
for j in range(len(data)):
item = {}
# 检查实施机关
item['insAuth_CN'] = data[j]['insAuth_CN']
# 日期
item['insDate'] = data[j]['insDate']
# 结果
item['insRes_CN'] = data[j]['insRes_CN']
print(item)
if item in b:
print("已存在数据库")
else:
b.add(item)
self.insert_spotCheckInfo(item, params['main_id'])
# 插入抽查检查结果信息
def insert_spotCheckInfo(self, item, main_id):
db = pymysql.connect(self.host, self.user, self.passwd, self.database, charset='utf8')
sql = "insert into gx_spotcheckinfo(c_id, insAuth_CN,insDate,insRes_CN) values (%s,%s,%s,%s)"
try:
cursor = db.cursor()
cursor.execute(sql, (main_id, item['insAuth_CN'], item['insDate'], item['insRes_CN']
))
db.commit()
db.close()
except RequestException as err:
print(err)
print(sql)
return None
# 列入严重违法失信企业名单(黑名单)信息
def get_illInfo(self, params):
url = 'http://app.gsxt.gov.cn/gsxt/corp-query-entprise-info-illInfo-{}.html?nodeNum={}&entType={}&start=0&sourceType=A'
url = url.format(
params['pripid'], params['nodeNum'], params['entType'])
json_data = self.get_json_data(url)
print(json_data)
totalPage = int(json_data['totalPage'])
recordsTotal = int(json_data['recordsTotal'])
print('黑名单条数')
print(recordsTotal)
index = 0
for i in range(totalPage):
if i is not 0:
index += 5
url = 'http://app.gsxt.gov.cn/gsxt/corp-query-entprise-info-illInfo-{}.html?nodeNum={}&entType={}&start={}&sourceType=A'.format(
params['pripid'], params['nodeNum'], params['entType'], str(index))
print(url)
u_data = self.get_json_data(url)
print(u_data)
data = u_data['data']
if data is not None:
bsql = 'select abntime,serILLRea_CN,decOrg_CN from gx_illinfo'
b = self.get_bloomFilter(bsql)
for j in range(len(data)):
item = {}
# 列入日期
item['abntime'] = data[j]['abntime']
# 列入原因
item['serILLRea_CN'] = data[j]['serILLRea_CN']
# 作出决定机关(列出)
item['decOrg_CN'] = data[j]['decOrg_CN']
print(item)
if item in b:
print("已存在数据库")
else:
b.add(item)
self.insert_illInfo(item, params['main_id'])
# 插入黑名单信息
def insert_illInfo(self, item, main_id):
db = pymysql.connect(self.host, self.user, self.passwd, self.database, charset='utf8')
sql = "insert into gx_illinfo(c_id, abntime,serILLRea_CN,decOrg_CN) values (%s,%s,%s,%s)"
try:
cursor = db.cursor()
cursor.execute(sql, (main_id, item['abntime'], item['serILLRea_CN'], item['decOrg_CN']
))
db.commit()
db.close()
except RequestException as err:
print(err)
print(sql)
return None
# 列入经营异常信息
def get_entBusExcep(self, params):
url = 'http://app.gsxt.gov.cn/gsxt/corp-query-entprise-info-entBusExcep-{}.html?nodeNum={}&entType={}&start=0&sourceType=A'
url = url.format(
params['pripid'], params['nodeNum'], params['entType'])
json_data = self.get_json_data(url)
print(json_data)
totalPage = int(json_data['totalPage'])
recordsTotal = int(json_data['recordsTotal'])
print('经营异常条数')
print(recordsTotal)
index = 0
for i in range(totalPage):
if i is not 0:
index += 5
url = 'http://app.gsxt.gov.cn/gsxt/corp-query-entprise-info-entBusExcep-{}.html?nodeNum={}&entType={}&start={}&sourceType=A'.format(
params['pripid'], params['nodeNum'], params['entType'], str(index))
print(url)
u_data = self.get_json_data(url)
print(u_data)
data = u_data['data']
if data is not None:
bsql = 'select abntime,speCause_CN,decOrg_CN from gx_entbusexcep'
b = self.get_bloomFilter(bsql)
for j in range(len(data)):
item = {}
# 列入日期
item['abntime'] = data[j]['abntime']
# 列入原因
item['speCause_CN'] = data[j]['speCause_CN']
# 作出决定机关(列出)
item['decOrg_CN'] = data[j]['decOrg_CN']
print(item)
if item in b:
print("已存在数据库")
else:
b.add(item)
self.insert_entBusExcep(item, params['main_id'])
# 插入经营异常信息
def insert_entBusExcep(self, item, main_id):
db = pymysql.connect(self.host, self.user, self.passwd, self.database, charset='utf8')
sql = "insert into gx_entbusexcep(c_id, abntime,speCause_CN,decOrg_CN) values (%s,%s,%s,%s)"
try:
cursor = db.cursor()
cursor.execute(sql, (main_id, item['abntime'], item['speCause_CN'], item['decOrg_CN']
))
db.commit()
db.close()
except RequestException as err:
print(err)
print(sql)
return None
# 获取企业年报信息
def get_annualReportInfo(self, params):
url = 'http://app.gsxt.gov.cn/gsxt/corp-query-entprise-info-anCheYearInfo-{}.html?nodeNum={}&entType={}&sourceType=A'
url = url.format(
params['pripid'], params['nodeNum'], params['entType'])
json_data = self.get_json_data(url)
print(json_data)
for i in range(len(json_data)):
anCheId = json_data[i]['anCheId']
anCheYear = json_data[i]['anCheYear']
anCheDate = json_data[i]['anCheDate']
print(anCheYear, "年度报告", " 时间:", anCheDate)
url = 'http://app.gsxt.gov.cn/gsxt/corp-query-entprise-info-primaryinfoapp-annualReportInfo-{}.html?nodeNum={}&anCheId={}&anCheYear={}&entType={}&sourceType=A'
url = url.format(params['pripid'], params['nodeNum'], anCheId, anCheYear, params['entType'])
u_data = self.get_json_data(url)
print(u_data)
data = u_data['annRep']
print(type(data))
annRepData = {}
# 基本信息
annRepData['tel'] = data['annRepData']['tel']
if 'email' in data['annRepData'].keys():
annRepData['email'] = data['annRepData']['email']
else:
annRepData['email'] = 'xxx'
print(annRepData)
self.insert_annRepData(annRepData, params['main_id'], anCheYear)
# 对外投资信息
if 'annRepDataInvestment' in data.keys():
annRepDataInvestment = data['annRepDataInvestment']
# RepDataInvestment = annRepDataInvestment['entName']
for i in range(len(annRepDataInvestment)):
investment = {}
# 对外投资公司名称
investment['entName'] = annRepDataInvestment[i]['entName']
# 对外投资公司统一社会信用代码
investment['uniscId'] = annRepDataInvestment[i]['uniscId']
print(investment)
self.insert_annRepDataInvestment(investment, params['main_id'], anCheYear)
# 网站或网店信息
if 'annRepDataWebsite' in data.keys():
annRepDataWebsite = data['annRepDataWebsite']
for i in range(len(annRepDataWebsite)):
Website = {}
# 网站名称
Website['webSitName'] = annRepDataWebsite[i]['webSitName']
# 网址
Website['domain'] = annRepDataWebsite[i]['domain']
print(Website)
self.insert_annRepDataWebsite(Website, params['main_id'], anCheYear)
# 股东及出资信息
if 'annRepDataSponsor' in data.keys():
annRepDataSponsor = data['annRepDataSponsor']
for i in range(len(annRepDataSponsor)):
sponsor = {}
# 出资人名称
sponsor['invName'] = annRepDataSponsor[i]['invName']
# 认缴出资额(万元)
sponsor['liSubConAm'] = annRepDataSponsor[i]['liSubConAm']
# 认缴出资时间
sponsor['subConDate'] = annRepDataSponsor[i]['subConDate']
# 认缴出资方式
sponsor['subConFormName'] = annRepDataSponsor[i]['subConFormName']
# 实缴出资额(万元)
sponsor['liAcConAm'] = annRepDataSponsor[i]['liAcConAm']
# 实缴出资时间
sponsor['acConDate'] = annRepDataSponsor[i]['acConDate']
# 实缴出资方式
sponsor['acConForm_CN'] = annRepDataSponsor[i]['acConForm_CN']
print(sponsor)
self.insert_annRepDataSponsor(sponsor, params['main_id'], anCheYear)
# 股权变更信息
if 'annRepDataAlterstock' in data.keys():
annRepDataAlterstock = data['annRepDataAlterstock']
for i in range(len(annRepDataAlterstock)):
alterstock = {}
# 变更时间
alterstock['altDate'] = annRepDataAlterstock[i]['altDate']
# 变更名称
alterstock['inv'] = annRepDataAlterstock[i]['inv']
# 变更前股权比例
alterstock['transAmPr'] = annRepDataAlterstock[i]['transAmPr']
# 变更后股权比例
alterstock['transAmAft'] = annRepDataAlterstock[i]['transAmAft']
print(alterstock)
self.insert_annRepDataAlterstock(alterstock, params['main_id'], anCheYear)
# 插入企业年报基本信息
def insert_annRepData(self, item, main_id, anCheYear):
print("mainid: ", main_id)
db = pymysql.connect(self.host, self.user, self.passwd, self.database, charset='utf8')
sql = "insert into gx_annrepdata(year,c_id, tel,email) values (%s,%s,%s,%s)"
try:
cursor = db.cursor()
cursor.execute(sql, (anCheYear, main_id, item['tel'], item['email']))
db.commit()
db.close()
except RequestException as err:
print(err)
print(sql)
return None
# 插入年报对外投资信息
def insert_annRepDataInvestment(self, item, main_id, anCheYear):
db = pymysql.connect(self.host, self.user, self.passwd, self.database, charset='utf8')
sql = "insert into gx_annredata_investment(year,c_id, entName,uniscId) values (%s,%s,%s,%s)"
try:
cursor = db.cursor()
cursor.execute(sql, (anCheYear, main_id, item['entName'], item['uniscId']))
db.commit()
db.close()
except RequestException as err:
print(err)
print(sql)
return None
# 插入年报网站或网店信息
def insert_annRepDataWebsite(self, item, main_id, anCheYear):
db = pymysql.connect(self.host, self.user, self.passwd, self.database, charset='utf8')
sql = "insert into gx_annrepdata_website(year,c_id, webSitName,domain) values (%s,%s,%s,%s)"
try:
cursor = db.cursor()
cursor.execute(sql, (anCheYear, main_id, item['webSitName'], item['domain']))
db.commit()
db.close()
except RequestException as err:
print(err)
print(sql)
return None
# 插入年报股东及出资信息
def insert_annRepDataSponsor(self, item, main_id, anCheYear):
db = pymysql.connect(self.host, self.user, self.passwd, self.database, charset='utf8')
sql = "insert into gx_annrepdata_sponsor(year,c_id, invName,liSubConAm,subConDate,subConFormName,liAcConAm,acConDate,acConForm_CN) values (%s,%s,%s,%s,%s,%s,%s,%s,%s)"
try:
cursor = db.cursor()
cursor.execute(sql, (
anCheYear, main_id, item['invName'], item['liSubConAm'], item['subConDate'], item['subConFormName'],
item['liAcConAm'], item['acConDate'], item['acConForm_CN']))
db.commit()
db.close()
except RequestException as err:
print(err)
print(sql)
return None
# 插入年报股权变更信息
def insert_annRepDataAlterstock(self, item, main_id, anCheYear):
db = pymysql.connect(self.host, self.user, self.passwd, self.database, charset='utf8')
sql = "insert into gx_annrepdata_alterstock(year,c_id, altDate,inv,transAmPr,transAmAft) values (%s,%s,%s,%s,%s,%s)"
try:
cursor = db.cursor()
db.ping(reconnect=True)
cursor.execute(sql,
(anCheYear, main_id, item['altDate'], item['inv'], item['transAmPr'], item['transAmAft']))
db.commit()
db.close()
except RequestException as err:
print(err)
print(sql)
return None
# 获取股权变更信息
def get_insAlterstockinfo(self, params):
url = 'http://app.gsxt.gov.cn/gsxt/corp-query-entprise-info-insAlterstockinfo-{}.html?nodeNum={}&entType={}&start=0&sourceType=A'
url = url.format(
params['pripid'], params['nodeNum'], params['entType'])
json_data = self.get_json_data(url)
print(json_data)
totalPage = int(json_data['totalPage'])
recordsTotal = int(json_data['recordsTotal'])
print('股权变更条数')
print(recordsTotal)
index = 0
for i in range(totalPage):
if i is not 0:
index += 5
url = 'http://app.gsxt.gov.cn/gsxt/corp-query-entprise-info-insAlterstockinfo-{}.html?nodeNum={}&entType={}&start={}&sourceType=A'.format(
params['pripid'], params['nodeNum'], params['entType'], str(index))
print(url)
u_data = self.get_json_data(url)
print(u_data)
data = u_data['data']
if data is not None:
bsql = 'select inv,altDate,transAmPrBf,transAmPrAf,publicDate from gx_insalterstockinfo'
b = self.get_bloomFilter(bsql)
for j in range(len(data)):
item = {}
# 股东
item['inv'] = data[j]['inv']
# 股权变更日期
item['altDate'] = data[j]['altDate']
# 变更前股权比例
item['transAmPrBf'] = data[j]['transAmPrBf']
# 变更后股权比例
item['transAmPrAf'] = data[j]['transAmPrAf']
# 公示日期
item['publicDate'] = data[j]['publicDate']
print(item)
if item in b:
print("已存在数据库")
else:
b.add(item)
self.insert_insAlterstockinfo(item, params['main_id'])
# 插入股权变更信息
def insert_insAlterstockinfo(self, item, main_id):
db = pymysql.connect(self.host, self.user, self.passwd, self.database, charset='utf8')
sql = "insert into gx_insalterstockinfo(c_id, inv,altDate,transAmPrBf,transAmPrAf,publicDate) values (%s,%s,%s,%s,%s,%s)"
try:
cursor = db.cursor()
db.ping(reconnect=True)
cursor.execute(sql, (main_id, item['inv'], item['altDate'], item['transAmPrBf'],
item['transAmPrAf'], item['publicDate']))
db.commit()
db.close()
except RequestException as err:
print(err)
print(sql)
return None
# 知识产权出质登记信息
def get_insProPledgeRegInfo(self, params):
url = 'http://app.gsxt.gov.cn/gsxt/corp-query-entprise-info-insProPledgeRegInfo-{}.html?nodeNum={}&entType={}&start=0&sourceType=A'
url = url.format(
params['pripid'], params['nodeNum'], params['entType'])
json_data = self.get_json_data(url)
print(json_data)
totalPage = int(json_data['totalPage'])
recordsTotal = int(json_data['recordsTotal'])
print('知识产权出质登记信息条数')
print(recordsTotal)
index = 0
for i in range(totalPage):
if i is not 0:
index += 5
url = 'http://app.gsxt.gov.cn/gsxt/corp-query-entprise-info-insProPledgeRegInfo-{}.html?nodeNum={}&entType={}&start={}&sourceType=A'.format(
params['pripid'], params['nodeNum'], params['entType'], str(index))
print(url)
u_data = self.get_json_data(url)
print(u_data)
data = u_data['data']
if data is not None:
bsql = 'select uniscId,entName,kinds,pledgor,pleRegPerFrom,pleRegPerTo,publicDate,tmName,type from gx_inspropledgereginfo'
b = self.get_bloomFilter(bsql)
for j in range(len(data)):
item = {}
# 知识产权登记号
item['uniscId'] = data[j]['uniscId']
# 名称
item['entName'] = data[j]['entName']
# 种类
item['kinds'] = data[j]['kinds']
# 出质人名称
item['pledgor'] = data[j]['pledgor']
# 质权登记起始日期
item['pleRegPerFrom'] = data[j]['pleRegPerFrom']
# 质权登记终止日期
item['pleRegPerTo'] = data[j]['pleRegPerTo']
# 公示日期
item['publicDate'] = data[j]['publicDate']
# 商标名称
item['tmName'] = data[j]['tmName']
# 状态
item['type'] = data[j]['type']
print(item)
if item in b:
print("已存在数据库")
else:
b.add(item)
self.insert_insProPledgeRegInfo(item, params['main_id'])
# 插入知识产权出质登记信息
def insert_insProPledgeRegInfo(self, item, main_id):
db = pymysql.connect(self.host, self.user, self.passwd, self.database, charset='utf8')
sql = "insert into gx_inspropledgereginfo(c_id, uniscId,entName,kinds,pledgor,pleRegPerFrom,pleRegPerTo,publicDate,tmName,type) values (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
try:
cursor = db.cursor()
db.ping(reconnect=True)
cursor.execute(sql, (main_id, item['uniscId'], item['entName'], item['kinds'], item['pledgor'],
item['pleRegPerFrom'], item['pleRegPerTo'], item['publicDate'], item['tmName'],
item['type']))
db.commit()
db.close()
except RequestException as err:
print(err)
print(sql)
return None
def get_bloomFilter(self, sql):
bloom = ScalableBloomFilter(mode=ScalableBloomFilter.SMALL_SET_GROWTH)
db = pymysql.connect(self.host, self.user, self.passwd, self.database, charset='utf8')
cursor = db.cursor()
cursor.execute(sql)
desc = cursor.description
# desc 将会输出 (('total_premium', 246, 7, 26, 26, 2, 0), ('quote_count', 3, 3, 11, 11, 0, 0), ('order_count', 3, 3, 11, 11, 0, 0))
object_dict = [
dict(zip([col[0] for col in desc], row))
for row in cursor.fetchall()
]
# print(object_dict)
cursor.close()
for d in object_dict:
bloom.add(d)
return bloom
def get_main_id(self, inv_name):
global idS
db = pymysql.connect(self.host, self.user, self.passwd, self.database, charset='utf8')
sql = "select id from gx_holder where inv_name=%s"
try:
cursor = db.cursor()
cursor.execute(sql, inv_name)
idS = cursor.fetchone()
if idS:
print(idS[0])
idS = int(idS[0])
db.commit()
db.close()
except:
print("查询数据错误")
return idS
def select_main_id(self, entName):
db = pymysql.connect(self.host, self.user, self.passwd, self.database, charset='utf8')
sql = "select id from gsxt where entName=%s"
global idS
try:
cursor = db.cursor()
cursor.execute(sql, entName)
idS = cursor.fetchone()
if idS:
print(idS[0])
idS = int(idS[0])
db.commit()
db.close()
except:
print("查询数据错误")
return idS
def ExistOrNot(self, jsonObj, key1, key2=None):
if key1 in jsonObj:
return jsonObj[key1]
else:
if key2 in jsonObj:
return jsonObj[key2]
else:
return None
@retry(stop_max_attempt_number=100)
def get_PROXY(self):
# 获取代理
json_p = self.ss.getOne('gxst')
host = str(json_p['ip'])
port = str(json_p['port'])
hostport = host + ':' + port
self.proxy.update({'http': hostport})
json_ps = self.ss.getOnes('gxst')
hosts = str(json_ps['ip'])
ports = str(json_ps['port'])
hostports = hosts + ':' + ports
self.proxy.update({'https': hostports})
print("ip端口响应正常")
try:
requests.get('http://app.gsxt.gov.cn/gsxt/pubMsgList.html ', headers=self.headers, proxies=self.proxy,
timeout=5, verify=False)
except:
self.ss.setFalse(str(json_p['ip']), str(json_p['port']), 'gxst')
self.ss.setFalse(str(json_ps['ip']), str(json_ps['port']), 'gxst')
raise Exception
print(self.proxy)
def main(self):
self.get_PROXY()
print("搜索关键词:", self.searchword)
search_page = self.get_search_page(self.search_url)
while (search_page == None):
search_page = self.get_search_page(self.search_url)
print(search_page)
time.sleep(1)
print(type(search_page))
data = json.loads(search_page)
print(data)
print(type(data))
# 获取搜索总条数
recordsTotal = int(data['data']['result']['recordsTotal'])
print('获取搜索总条数')
print(recordsTotal)
perPage = int(data['data']['result']['perPage'])
pageNum = math.ceil(recordsTotal / perPage)
print(pageNum)
if pageNum > 0:
for i in range(1, pageNum + 1):
index_url = 'http://app.gsxt.gov.cn/gsxt/cn/gov/saic/web/controller/PrimaryInfoIndexAppController/search?page={}'.format(
str(i))
searchpage = self.get_search_page(index_url)
while (searchpage == None):
searchpage = self.get_search_page(index_url)
print(searchpage)
time.sleep(1)
data = json.loads(searchpage)
print(index_url)
json_data = data['data']['result']['data']
# print(json_data)
for j in range(len(json_data)):
pripid = json_data[j]['pripid']
nodeNum = json_data[j]['nodeNum']
entType = json_data[j]['entType']
detail_page = self.get_detail_page(pripid, nodeNum, entType)
print(type(detail_page))
k = 0
detail_data = json.loads(detail_page)
while ('code' in detail_data and 'data' in detail_data) or ('NGIDERRORCODE' in detail_data):
self.ss.setFalse(self.proxy['http'].split(":")[0], self.proxy['http'].split(":")[1], 'gxst')
self.ss.setFalse(self.proxy['https'].split(":")[0], self.proxy['https'].split(":")[1], 'gxst')
self.get_PROXY()
detail_page = self.get_detail_page(pripid, nodeNum, entType)
detail_data = json.loads(detail_page)
k = k + 1
print(k)
if k > 5:
j = j + 1
pripid = json_data[j]['pripid']
nodeNum = json_data[j]['nodeNum']
entType = json_data[j]['entType']
detail_page = self.get_detail_page(pripid, nodeNum, entType)
detail_data = json.loads(detail_page)
k = 0
print(detail_page)
# detail_data = json.loads(detail_page)
print(detail_data)
print(type(detail_page))
# 企业基本信息
items = self.parse_detail_page(detail_page)
print(items)
params = {}
params['pripid'] = detail_data['result']['pripId']
params['nodeNum'] = detail_data['result']['nodeNum']
params['entType'] = detail_data['result']['entType']
entName = items['entName']
main_id = self.select_main_id(entName)
params['main_id'] = main_id
# 股东及出资信息
holder_detail = self.get_holder_detail(params)
print(holder_detail)
# # # 主要人员信息
# keyperson = self.get_leader(params)
# print(keyperson)
# # 分支机构信息
entprise_info_branch = self.get_branch(params)
# print(entprise_info_branch)
# # # 变更信息
# entprise_info_after = self.get_entprise_info_after(params)
# print(entprise_info_after)
# # # 动产抵押登记信息
# mortreginfo = self.get_mortreg(params)
# print(mortreginfo)
# # # 股权出质信息
# stakqualitinfo = self.get_stakqualitinfo(params)
# print(stakqualitinfo)
# # #商标注册信息
# trademark = self.get_trademark(params)
# print(trademark)
# # 司法协助信息
# assistInfo = self.get_assistInfo(params)
# print(assistInfo)
# # 行政许可信息
# licenceinfoDetail = self.get_licenceinfoDetail(params)
# print(licenceinfoDetail)
# # 行政处罚信息
# punishmentdetail = self.get_punishmentdetail(params)
# print(punishmentdetail)
# # 抽查检查结果信息
# spotCheckInfo = self.get_spotCheckInfo(params)
# print(spotCheckInfo)
# # 列入严重违法失信企业名单(黑名单)信息
# illInfo = self.get_illInfo(params)
# print(illInfo)
# # 列入经营异常名录信息
# entBusExcep = self.get_entBusExcep(params)
# print(entBusExcep)
# # 企业年报信息
# annualReportInfo = self.get_annualReportInfo(params)
# print(annualReportInfo)
# # 股权变更信息
# insAlterstockinfo = self.get_insAlterstockinfo(params)
# print(insAlterstockinfo)
# # 知识产权出质登记信息
# insProPledgeRegInfo = self.get_insProPledgeRegInfo(params)
# print(insProPledgeRegInfo)
if __name__ == '__main__':
g = GSXT("建设")
g.main()
| 1.945313
| 2
|
home/models.py
|
cipug/literate-robot
| 3
|
12775179
|
from django.db import models
from wagtail.core.models import Page
from wagtail.core.fields import StreamField
from wagtail.core import blocks
from wagtail.admin.edit_handlers import FieldPanel, StreamFieldPanel
from wagtail.images.blocks import ImageChooserBlock
class HomePage(Page):
body = StreamField([
('jumbotron', blocks.RawHTMLBlock()),
])
content_panels = Page.content_panels + [
StreamFieldPanel('body'),
]
subpage_types = [
'home.HomePage',
'home.BasicPage',
]
parent_page_type = [
'wagtailcore.Page'
]
class BasicPage(Page):
body = StreamField([
('heading', blocks.CharBlock(classname="full title")),
('paragraph', blocks.RichTextBlock()),
('image', ImageChooserBlock()),
])
content_panels = Page.content_panels + [
StreamFieldPanel('body'),
]
subpage_types = [
'home.BasicPage',
]
parent_page_type = [
'wagtailcore.Page'
]
| 1.992188
| 2
|
Alarm.py
|
redart16/Python-samples2
| 1
|
12775180
|
<filename>Alarm.py
print("""******************
Alarm Program
******************""")
import time
Ahour = int(input("Please enter the alarm hour:"))
Aminute = int(input("Please enter the alarm minute:"))
while True:
LT = time.localtime(time.time())
if Ahour == LT.tm_hour and Aminute == LT.tm_min :
print( "Time is:", LT.tm_hour, ":", LT.tm_min, "The alarm is ringing")
break
elif (LT.tm_min == 30 and LT.tm_sec ==1 ) or (LT.tm_min == 7 and LT.tm_sec ==1 ):
rhour = Ahour - LT.tm_hour
rminute= Aminute - LT.tm_min
print( "Remaining to Alarm:", rhour, ":", rminute)
else:
pass
print (" The Alarm Has been terminated")
| 3.921875
| 4
|
utils/common.py
|
ezeportela/newspaper-ds
| 0
|
12775181
|
<reponame>ezeportela/newspaper-ds
import yaml
__config = None
def config():
if not __config:
with open('config.yaml', 'r') as f:
config = yaml.load(f, Loader=yaml.SafeLoader)
return config
def get_news_sites():
return config()['news_sites']
def get_news_site(uid):
return config()['news_sites'][uid]
| 2.375
| 2
|
paradrop/daemon/paradrop/airshark/spectrum_reader.py
|
lhartung/paradrop-test
| 0
|
12775182
|
import struct
from datetime import datetime
from twisted.internet.fdesc import setNonBlocking
class SpectrumReader(object):
# spectral scan packet format constants
hdrsize = 3
pktsize = 17 + 56
# ieee 802.11 constants
sc_wide = 0.3125 # in MHz
def __init__(self, path):
self.fp = file(path)
if not self.fp:
raise Exception("Cant open file '%s'" % path)
setNonBlocking(self.fp.fileno())
def read_samples(self):
data = self.fp.read()
if (data):
ts = datetime.now()
return ts, data
else:
return None, None
def flush(self):
self.fp.read()
@staticmethod
def decode(data):
"""
For information about the decoding of spectral samples see:
https://wireless.wiki.kernel.org/en/users/drivers/ath9k/spectral_scan
https://github.com/erikarn/ath_radar_stuff/tree/master/lib
and your ath9k implementation in e.g.
/drivers/net/wireless/ath/ath9k/common-spectral.c
"""
pos = 0
while pos < len(data) - SpectrumReader.hdrsize + 1:
(stype, slen) = struct.unpack_from(">BH", data, pos)
if not (stype == 1 and slen == SpectrumReader.pktsize):
print "skip malformed packet"
break # header malformed, discard data. This event is very unlikely (once in ~3h)
# On the other hand, if we buffer the sample in a primitive way, we consume too much cpu
# for only one or two "rescued" samples every 2-3 hours
# We only support 20 MHz
if stype == 1:
if pos >= len(data) - SpectrumReader.hdrsize - SpectrumReader.pktsize + 1:
break
pos += SpectrumReader.hdrsize
packet = data[pos: pos + SpectrumReader.pktsize]
pos += SpectrumReader.pktsize
yield packet
'''
(max_exp, freq, rssi, noise, max_mag, max_index, bitmap_weight, tsf) = \
struct.unpack_from(">BHbbHBBQ", data, pos)
pos += 17
sdata = struct.unpack_from("56B", data, pos)
pos += 56
yield (tsf, max_exp, freq, rssi, noise, max_mag, max_index, bitmap_weight, sdata)
# calculate power in dBm
sumsq_sample = 0
samples = []
for raw_sample in sdata:
if raw_sample == 0:
sample = 1
else:
sample = raw_sample << max_exp
sumsq_sample += sample*sample
samples.append(sample)
if sumsq_sample == 0:
sumsq_sample = 1
sumsq_sample = 10 * math.log10(sumsq_sample)
sc_total = 56 # HT20: 56 OFDM subcarriers
#first_sc = freq - SpectrumReader.sc_wide * (sc_total/2 + 0.5)
pwr = {}
for i, sample in enumerate(samples):
subcarrier_freq = 0
if i < 28:
subcarrier_freq = freq - SpectrumReader.sc_wide * (28 - i)
else:
subcarrier_freq = freq + SpectrumReader.sc_wide * (i - 27)
#subcarrier_freq = first_sc + i*SpectrumReader.sc_wide
sigval = noise + rssi + 20 * math.log10(sample) - sumsq_sample
pwr[subcarrier_freq] = sigval
yield (tsf, freq, noise, rssi, pwr)
'''
| 2.71875
| 3
|
problema 2.py
|
IrayP/PythonPC3
| 0
|
12775183
|
<gh_stars>0
def capitalizar_cada_palabra(cadena):
print(cadena.title())
| 1.601563
| 2
|
trdg/labels_csv.py
|
BismarckBamfo/ocr-paper
| 1
|
12775184
|
<filename>trdg/labels_csv.py
import pandas as pd
from fire import Fire
def make_train_csv(path):
filename = []
words = []
with open(f'{path}/train/labels.txt', 'r') as f:
train_text = f.readlines()
for idx, x in enumerate(train_text):
split_line = x.split('\t')
filename.append(split_line[0])
words.append(split_line[1].rstrip('\n').lstrip())
df = pd.DataFrame(list(zip(filename, words)), columns=['filename', 'words'])
df.to_csv(f'{path}/train/labels.csv', sep=';', encoding='utf-8', index=False)
def make_val_csv(path):
filename = []
words = []
with open(f'{path}/val/labels.txt', 'r') as f:
train_text = f.readlines()
for idx, x in enumerate(train_text):
split_line = x.split('\t')
filename.append(split_line[0])
words.append(split_line[1].rstrip('\n').lstrip())
df = pd.DataFrame(list(zip(filename, words)), columns=['filename', 'words'])
df.to_csv(f'{path}/val/labels.csv', sep=';', encoding='utf-8', index=False)
def make_test_csv(path):
filename = []
words = []
with open(f'{path}/test/labels.txt', 'r') as f:
train_text = f.readlines()
for idx, x in enumerate(train_text):
split_line = x.split('\t')
filename.append(split_line[0])
words.append(split_line[1].rstrip('\n').lstrip())
df = pd.DataFrame(list(zip(filename, words)), columns=['filename', 'words'])
df.to_csv(f'{path}/test/labels.csv', sep=';', encoding='utf-8', index=False)
def main(path):
make_train_csv(path)
make_val_csv(path)
make_test_csv(path)
if __name__ == '__main__':
Fire(main)
| 3.078125
| 3
|
Python/if-else.py
|
MarsBighead/mustang
| 4
|
12775185
|
<reponame>MarsBighead/mustang<filename>Python/if-else.py
#!/usr/bin/python
name = raw_input('What is your name? ')
if name.endswith('Gumby'):
print 'Hello, <NAME>'
else:
print 'Hello. stranger'
| 2.984375
| 3
|
js/packages/cli/niftyrecords-assets/NiftyRecords_JSON_generator.py
|
niftyrecordsnft/metaplex
| 0
|
12775186
|
import json
import os
import random
import iso8601
import shutil
numberOfFiles = 1000
creatorAddress = "BjLKxBKRUjFX3WyfyTcTtotC5TfRaPJgVjEeMn1MuzPd"
# Build Blockchain JSON
for x in range(numberOfFiles):
nftNumber = x + 1
niftyRecordNFTData = {
"name" : "NiftyRecord #" + str(nftNumber),
"symbol": "NFRC",
"uri" : "https://assets.niftyrecordsnft.com/niftyrecords/" + str(nftNumber) + "/NiftyRecord-" + str(nftNumber) + ".json",
"image": "https://assets.niftyrecordsnft.com/niftyrecords/" + str(nftNumber) + "/NiftyRecord-" + str(nftNumber) + ".png",
"properties": {
"files": [
{
"uri": "https://assets.niftyrecordsnft.com/niftyrecords/" + str(nftNumber) + "/NiftyRecord-" + str(nftNumber) + ".png",
"type": "image/png"
}
],
"category": "image",
"creators": [
{
"address": creatorAddress,
"share": 100
}
]
},
"seller_fee_basis_points": 750
}
# When generating blockchain JSON
with open('blockchain/' + str(x) + '.json', 'w') as f:
json.dump(niftyRecordNFTData, f)
# Paste in temporary white placeholder image
shutil.copyfile('sleeves/white.png', 'blockchain/' + str(x) + '.png')
unrevealedSleeves = [
{
"name" : "White",
"rangeStart": 1,
"rangeEnd": 40,
"imagePath":"sleeves/white.png",
"revealDate": "2022-03-28T21:00:00-04:00"
},
{
"name" : "Black",
"rangeStart": 41,
"rangeEnd": 70,
"imagePath":"sleeves/black.png",
"revealDate": "2022-03-28T20:00:00-04:00"
},
{
"name" : "Silver",
"rangeStart": 71,
"rangeEnd": 90,
"imagePath":"sleeves/silver.png",
"revealDate": "2022-03-28T19:00:00-04:00"
},
{
"name" : "Gold",
"rangeStart": 91,
"rangeEnd": 100,
"imagePath":"sleeves/gold.png",
"revealDate": "2022-03-28T18:00:00-04:00"
}
]
generatedWhiteSleeves = 0
maxWhiteSleeves = numberOfFiles * 0.4
print("maxWhiteSleeves")
print(maxWhiteSleeves)
generatedBlackSleeves = 0
maxBlackSleeves = numberOfFiles * 0.3
print("maxBlackSleeves")
print(maxBlackSleeves)
generatedSilverSleeves = 0
maxSilverSleeves = numberOfFiles * 0.2
print("maxSilverSleeves")
print(maxSilverSleeves)
generatedGoldSleeves = 0
maxGoldSleeves = numberOfFiles * 0.1
print("maxGoldSleeves")
print(maxGoldSleeves)
# Build server unrevealed metadata JSON
for x in range(numberOfFiles):
# Determine record sleeve
# TODO: Increment generated number and ensure that we haven't gone over the allowed amount of sleeves for this limit
thisSleeve = unrevealedSleeves[0] # default to the white sleeve
while True:
# Generate a random number between 1 and 100
randomNumber = random.randint(1, 100)
# Find the associated sleeve to this percent value
for sleeve in unrevealedSleeves:
if randomNumber > sleeve["rangeStart"] and randomNumber < sleeve["rangeEnd"]:
thisSleeve = sleeve
if thisSleeve["name"] == "White":
if generatedWhiteSleeves < maxWhiteSleeves:
generatedWhiteSleeves += 1
break
else:
# Hit max number of sleeve, try once again
print("Hit max white sleeves, trying again")
continue
elif thisSleeve["name"] == "Black":
if generatedBlackSleeves < maxBlackSleeves:
generatedBlackSleeves += 1
break
else:
# Hit max number of sleeve, try once again
print("Hit max black sleeves, trying again")
continue
elif thisSleeve["name"] == "Silver":
if generatedSilverSleeves < maxSilverSleeves:
generatedSilverSleeves += 1
break
else:
# Hit max number of sleeve, try once again
print("Hit max silver sleeves, trying again")
continue
elif thisSleeve["name"] == "Gold":
if generatedGoldSleeves < maxGoldSleeves:
generatedGoldSleeves += 1
break
else:
# Hit max number of sleeve, try once again
print("Hit max gold sleeves, trying again")
continue
print("thisSleeve")
print(thisSleeve)
nftNumber = x + 1
niftyRecordNFTData = {
"id" : nftNumber,
"name" : "NiftyRecord #" + str(nftNumber),
"symbol": "NFRC",
"image": "https://assets.niftyrecordsnft.com/niftyrecords/" + str(nftNumber) + "/NiftyRecord-" + str(nftNumber) + ".png",
"description": "This is NiftyRecords #" + str(nftNumber) + "!",
"attributes": [
{
"trait_type": "Opened",
"value": "No"
},
{
"trait_type": "Record Sleeve",
"value": thisSleeve["name"]
}
],
"collection": {
"name": "NiftyRecords",
"family": "NiftyRecords"
},
"revealed": False,
"revealAfter": iso8601.parse_date(thisSleeve["revealDate"]).timestamp()
}
fileName = 'server/' + str(nftNumber) + "/NiftyRecord-" + str(nftNumber) + '.json'
os.makedirs(os.path.dirname(fileName), exist_ok=True)
# When generating server-JSON
with open(fileName, 'w') as f:
json.dump(niftyRecordNFTData, f)
#Copy in the respective sleeve image that corresponds to the choice
shutil.copyfile(thisSleeve["imagePath"], 'server/' + str(nftNumber) + '/NiftyRecord-' + str(nftNumber) + '.png')
| 2.75
| 3
|
src/image_tools/filter_ids.py
|
Tpool1/Cancer_ML
| 0
|
12775187
|
import numpy as np
def filter_ids(array, clinical_ids):
# list of array indices that need to be deleted
del_indices = []
i = 0
for img in array:
id = img[-1]
if id not in clinical_ids:
del_indices.append(i)
i = i + 1
array = np.delete(array, del_indices, axis=0)
return array
| 2.96875
| 3
|
recipes/Python/577491_Observer_Design_Pattern_pythgevent_coroutine/recipe-577491.py
|
tdiprima/code
| 2,023
|
12775188
|
__author__ = "<NAME>"
__email__ = "<EMAIL>"
import gevent
from gevent import core
from gevent.hub import getcurrent
from gevent.event import Event
from gevent.pool import Pool
import functools
def wrap(method, *args, **kargs):
if method is None:
return None
if args or kargs:
method = functools.partial(method, *args, **kargs)
def wrapper(*args, **kargs):
return method(*args, **kargs)
return wrapper
class FiredEvent(Exception):
pass
class Event(object):
def __init__(self,events,name,callback):
self.events = events
self.name = name.lower()
self.callback = callback
def unsubscribe(self):
if not self.events._events.has_key(self.name):
return False
try:
del self.events._events[self.name][self.events._events[self.name].index(self)]
except:
pass
return True
def cancel(self):
self.unsubscribe()
def run(self):
gevent.spawn(self.callback)
def __del__(self):
self.unsubscribe()
class Observer(object):
def __new__(cls,*args):
if not hasattr(cls,'_instance'):
cls._instance = object.__new__(cls)
cls._instance._events = {}
return cls._instance
def subscribe(self,name,callback):
if not self._events.has_key(name.lower()):
self._events[name] = []
ev = Event(self,name,callback)
self._events[name].append(ev)
return ev
def fire(self,name):
try:
ev = self._events[name.lower()].pop(0)
except:
return False
while ev:
gevent.spawn(ev.run)
try:
ev = self._events[name.lower()].pop(0)
except:
break
return True
def wait(self,name):
if not self._events.has_key(name.lower()):
self._events[name] = []
ev = Event(self,name,wrap(getcurrent().throw,FiredEvent))
self._events[name].append(ev)
return ev
if __name__ == '__main__':
# Testing
def in_another_greenlet():
print '001',getcurrent()
def test_subscribe():
e = Observer()
print '000',getcurrent()
getcurrent().in_another_greenlet = in_another_greenlet
b = e.subscribe('kill',getcurrent().in_another_greenlet)
gevent.sleep(5)
print 'END'
b.unsubscribe()
def test_wait():
e = Observer()
ev = e.wait('kill')
try:
gevent.sleep(3)
except FiredEvent:
print 'Fired!'
else:
print 'Not Fired!'
finally:
ev.cancel()
def fire_event():
e2 = Observer()
gevent.sleep(2)
e2.fire('kill')
p = Pool()
p.spawn(test_wait)
p.spawn(test_subscribe)
p.spawn(fire_event)
p.join()
| 2.6875
| 3
|
canopen/sdo/__init__.py
|
mlederhi/canopen
| 301
|
12775189
|
<gh_stars>100-1000
from .base import Variable, Record, Array
from .client import SdoClient
from .server import SdoServer
from .exceptions import SdoAbortedError, SdoCommunicationError
| 1.15625
| 1
|
playback/forms.py
|
Nierot/Spotify
| 0
|
12775190
|
<gh_stars>0
from django import forms
class UsernameForm(forms.Form):
username = forms.CharField(
label='Username',
max_length=50,
widget=forms.TextInput(attrs={'class': 'form-control'})
)
| 2.140625
| 2
|
application_form/migrations/0019_alter_field_apartments.py
|
frwickst/apartment-application-service
| 1
|
12775191
|
<gh_stars>1-10
# Generated by Django 2.2.21 on 2021-06-04 09:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("application_form", "0018_move_field_has_children"),
]
operations = [
migrations.AlterField(
model_name="application",
name="apartments",
field=models.ManyToManyField(
blank=True,
through="application_form.ApplicationApartment",
to="apartment.Apartment",
),
),
]
| 1.570313
| 2
|
rnacentral/rnacentral/utils/__init__.py
|
pythseq/rnacentral-webcode
| 21
|
12775192
|
<reponame>pythseq/rnacentral-webcode<gh_stars>10-100
"""
Copyright [2009-2017] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import re
import socket
def get_environment():
"""
Detect host environment: HX, OY, PG or DEV.
"""
hostname = socket.gethostname()
match = re.search(r'ves-(\w+)-\w+\.ebi\.ac\.uk', hostname)
if match and match.group(1) in ['hx', 'pg', 'oy']:
env = match.group(1)
else:
env = 'dev'
return env.upper()
| 2.171875
| 2
|
examples/one_hot_encode.py
|
ppmdatix/rtdl
| 0
|
12775193
|
<filename>examples/one_hot_encode.py
def one_hot_encode(_df, _col):
_values = set(_df[_col].values)
for v in _values:
_df[_col + str(v)] = _df[_col].apply(lambda x : float(x == v) )
return _df
| 2.875
| 3
|
PVPluginsHDF/PVGeo_HDF_All.py
|
OpenGeoVis/PVGeo-HDF5
| 9
|
12775194
|
<gh_stars>1-10
paraview_plugin_version = '0.1.0'
# This is module to import. It provides VTKPythonAlgorithmBase, the base class
# for all python-based vtkAlgorithm subclasses in VTK and decorators used to
# 'register' the algorithm with ParaView along with information about UI.
from paraview.util.vtkAlgorithm import *
import sys
sys.path.append('/Users/bane/Documents/OpenGeoVis/Software/PVGeo-HDF5')
from pvgeohdf.netcdf import *
# Helpers:
from PVGeo import _helpers
# Classes to Decorate
from PVGeo.grids import *
#### GLOBAL VARIABLES ####
MENU_CAT = 'PVGeo-HDF'
###############################################################################
SVC_DESC = "SVC Parcel Reader: Time varying point cloud"
@smproxy.reader(name="PVGeoHDFSVCParcelReader",
label="PVGeo: SVC Parcel Reader",
extensions=SVCParcelReader.extensions,
file_description=SVC_DESC)
class PVGeoHDFSVCParcelReader(SVCParcelReader):
def __init__(self):
SVCParcelReader.__init__(self)
#### Seters and Geters ####
@smproperty.xml(_helpers.get_file_reader_xml(SVCParcelReader.extensions, reader_description=SVC_DESC))
def add_file_name(self, fname):
SVCParcelReader.add_file_name(self, fname)
# @smproperty.doublevector(name="TimeDelta", default_values=1.0, panel_visibility="advanced")
# def SetTimeDelta(self, dt):
# SVCParcelReader.SetTimeDelta(self, dt)
@smproperty.doublevector(name="TimestepValues", information_only="1", si_class="vtkSITimeStepsProperty")
def get_time_step_values(self):
"""This is critical for registering the timesteps"""
return SVCParcelReader.get_time_step_values(self)
# This is an example of how to create a GUI input field
@smproperty.stringvector(name='DataName', default_values='Data')
def set_data_name(self, name):
SVCParcelReader.set_data_name(self, name)
###############################################################################
SVC_DESC = "CMAQ Reader: Time varying grid"
@smproxy.reader(name="PVGeoCMAQReader",
label="PVGeo: CMAQ Reader",
extensions=CMAQReader.extensions,
file_description=SVC_DESC)
class PVGeoCMAQReader(CMAQReader):
def __init__(self):
CMAQReader.__init__(self)
#### Seters and Geters ####
@smproperty.xml(_helpers.get_file_reader_xml(CMAQReader.extensions, reader_description=SVC_DESC))
def add_file_name(self, fname):
CMAQReader.add_file_name(self, fname)
# @smproperty.doublevector(name="TimeDelta", default_values=1.0, panel_visibility="advanced")
# def set_time_delta(self, dt):
# CMAQReader.set_time_delta(self, dt)
@smproperty.doublevector(name="TimestepValues", information_only="1", si_class="vtkSITimeStepsProperty")
def get_time_step_values(self):
"""This is critical for registering the timesteps"""
return CMAQReader.get_time_step_values(self)
@smproperty.doublevector(name="Spacing", default_values=[1.0, 1.0, 1.0],)
def set_spacing(self, dx, dy, dz):
CMAQReader.set_spacing(self, dx, dy, dz)
@smproperty.doublevector(name="Origin", default_values=[0.0, 0.0, 0.0],)
def set_origin(self, ox, oy, oz):
CMAQReader.set_origin(self, ox, oy, oz)
| 1.953125
| 2
|
setup.py
|
pedrocunial/hello_aws
| 0
|
12775195
|
<reponame>pedrocunial/hello_aws
from setuptools import setup
setup(
name='pccli',
version='0.3',
py_modules=['pccli'],
install_requires=[
'Click',
'boto3',
'pathlib',
'awscli',
],
entry_points='''
[console_scripts]
pccli=pccli:cli
''',
)
| 1.484375
| 1
|
src/fonts/inter-ui/misc/pylib/fontbuild/setup.py
|
OpenBazaar/openbazaar-css
| 0
|
12775196
|
<gh_stars>0
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
ext_modules = [
Extension("decomposeGlyph", ["decomposeGlyph.pyx"]),
Extension("alignpoints", ["alignpoints.pyx"]),
Extension("Build", ["Build.pyx"]),
Extension("convertCurves", ["convertCurves.pyx"]),
Extension("mitreGlyph", ["mitreGlyph.pyx"]),
Extension("mix", ["mix.pyx"]),
Extension("italics", ["italics.pyx"]),
Extension("curveFitPen", ["curveFitPen.pyx"]),
]
setup(
name = 'copy',
cmdclass = {'build_ext': build_ext},
ext_modules = ext_modules
)
| 1.304688
| 1
|
src/astro/files/locations/amazon/s3.py
|
astro-projects/astro
| 71
|
12775197
|
import os
from typing import Dict, List, Tuple
from urllib.parse import urlparse, urlunparse
from astro.constants import FileLocation
from astro.files.locations.base import BaseFileLocation
from astro.utils.dependencies import s3
class S3Location(BaseFileLocation):
"""Handler S3 object store operations"""
location_type = FileLocation.S3
@staticmethod
def _parse_s3_env_var() -> Tuple[str, str]:
"""Return S3 ID/KEY pair from environment vars"""
return os.environ["AWS_ACCESS_KEY_ID"], os.environ["AWS_SECRET_ACCESS_KEY"]
@property
def transport_params(self) -> Dict:
"""Structure s3fs credentials from Airflow connection.
s3fs enables pandas to write to s3
"""
hook = s3.S3Hook(aws_conn_id=self.conn_id) if self.conn_id else s3.S3Hook()
session = hook.get_session()
return {"client": session.client("s3")}
@property
def paths(self) -> List[str]:
"""Resolve S3 file paths with prefix"""
url = urlparse(self.path)
bucket_name = url.netloc
prefix = url.path[1:]
hook = s3.S3Hook(aws_conn_id=self.conn_id) if self.conn_id else s3.S3Hook()
prefixes = hook.list_keys(bucket_name=bucket_name, prefix=prefix)
paths = [
urlunparse((url.scheme, url.netloc, keys, "", "", "")) for keys in prefixes
]
return paths
@property
def size(self) -> int:
return -1
| 2.4375
| 2
|
src/testing/TestON/bin/cli.py
|
securedataplane/preacher
| 1
|
12775198
|
<filename>src/testing/TestON/bin/cli.py
#!/usr/bin/env python
'''
Created on 20-Dec-2012
@author: <NAME> (<EMAIL>)
TestON is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 2 of the License, or
(at your option) any later version.
TestON is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with TestON. If not, see <http://www.gnu.org/licenses/>.
'''
"""
cli will provide the CLI shell for teston framework.
A simple command-line interface for TestON.
The TestON CLI provides a simple console which
makes it easy to launch the test. For example, the command run will execute the test.
teston> run test DpctlTest
Several useful commands are provided.
"""
from subprocess import call
from cmd import Cmd
from os import isatty
import sys
import re
import os
import time
import threading
import __builtin__
import pprint
dump = pprint.PrettyPrinter(indent=4)
__builtin__.testthread = False
introduction = "TestON is the testing framework \nDeveloped by Paxterra Solutions (www.paxterrasolutions.com)"
__builtin__.COLORS = False
path = re.sub( "/bin$", "", sys.path[0] )
sys.path.insert( 1, path )
from core.teston import *
class CLI( threading.Thread,Cmd,object ):
"command-line interface to execute the test."
prompt = 'teston> '
def __init__( self, teston, stdin=sys.stdin ):
self.teston = teston
self._mainevent = threading.Event()
threading.Thread.__init__(self)
self.main_stop = False
self.locals = { 'test': teston }
self.stdin = stdin
Cmd.__init__( self )
self.pause = False
self.stop = False
__builtin__.cli = self
def emptyline( self ):
"Don't repeat last command when you hit return."
pass
helpStr = (
" teston help"
)
def do_help( self, line ):
"Describe available CLI commands."
Cmd.do_help( self, line )
if line is '':
output( self.helpStr )
def do_run(self,args):
'''
run command will execute the test with following optional command line arguments
logdir <directory to store logs in>
testcases <list of testcases separated by comma or range of testcases separated by hypen>
mail <mail-id or list of mail-ids seperated by comma>
example 1, to execute the examples specified in the ~/examples diretory.
'''
try:
args = args.split()
options = {}
options = self.parseArgs(args,options)
options = dictToObj(options)
if not testthread:
test = TestThread(options)
test.start()
while test.isAlive():
test.join(1)
else:
print main.TEST+ " test execution paused, please resume that before executing to another test"
except KeyboardInterrupt, SystemExit:
print "Interrupt called, Exiting."
test._Thread__stop()
main.cleanup()
main.exit()
def do_resume(self, line):
'''
resume command will continue the execution of paused test.
teston>resume
[2013-01-07 23:03:44.640723] [PoxTest] [STEP] 1.1: Checking the host reachability using pingHost
2013-01-07 23:03:44,858 - PoxTest - INFO - Expected Prompt Found
....
'''
if testthread:
testthread.play()
else :
print "There is no test to resume"
def do_nextstep(self,line):
'''
nextstep will execute the next-step of the paused test and
it will pause the test after finishing of step.
teston> nextstep
Will pause the test's execution, after completion of this step.....
teston> [2013-01-07 21:24:26.286601] [PoxTest] [STEP] 1.8: Checking the host reachability using pingHost
2013-01-07 21:24:26,455 - PoxTest - INFO - Expected Prompt Found
.....
teston>
'''
if testthread:
main.log.info("Executing the nextstep, Will pause test execution, after completion of the step")
testthread.play()
time.sleep(.1)
testthread.pause()
else:
print "There is no paused test "
def do_dumpvar(self,line):
'''
dumpvar will print all the test data in raw format.
usgae :
teston>dumpvar main
Here 'main' will be the test object.
teston>dumpvar params
here 'params' will be the parameters specified in the params file.
teston>dumpvar topology
here 'topology' will be topology specification of the test specified in topo file.
'''
if testthread:
if line == "main":
dump.pprint(vars(main))
else :
try :
dump.pprint(vars(main)[line])
except KeyError as e:
print e
else :
print "There is no paused test "
def do_currentcase(self,line):
'''
currentcase will return the current case in the test execution.
teston>currentcase
Currently executing test case is: 2
'''
if testthread:
print "Currently executing test case is: "+str(main.CurrentTestCaseNumber)
else :
print "There is no paused test "
def do_currentstep(self,line):
'''
currentstep will return the current step in the test execution.
teston>currentstep
Currently executing test step is: 2.3
'''
if testthread:
print "Currently executing test step is: "+str(main.CurrentTestCaseNumber)+'.'+str(main.stepCount)
else :
print "There is no paused test "
def do_stop(self,line):
'''
Will stop the paused test, if any !
'''
if testthread:
testthread.stop()
return 'exited by user command'
def do_gettest(self,line):
'''
gettest will return the test name which is under execution or recently executed.
Test under execution:
teston>gettest
Currently executing Test is: PoxTest
Test recently executed:
Recently executed test is: MininetTest
'''
try :
if testthread :
print "Currently executing Test is: "+main.TEST
else :
print "Recently executed test is: "+main.TEST
except NameError:
print "There is no previously executed Test"
def do_showlog(self,line):
'''
showlog will show the test's Log
teston>showlog
Last executed test's log is : //home/openflow/TestON/logs/PoxTest_07_Jan_2013_21_42_11/PoxTest_07_Jan_2013_21_42_11.log
.....
teston>showlog
Currently executing Test's log is: /home/openflow/TestON/logs/PoxTest_07_Jan_2013_21_46_58/PoxTest_07_Jan_2013_21_46_58.log
.....
'''
try :
if testthread :
print "Currently executing Test's log is: "+main.LogFileName
else :
print "Last executed test's log is : "+main.LogFileName
logFile = main.LogFileName
logFileHandler = open(logFile, 'r')
for msg in logFileHandler.readlines() :
print msg,
logFileHandler.close()
except NameError:
print "There is no previously executed Test"
def parseArgs(self,args,options):
'''
This will parse the command line arguments.
'''
options = self.initOptions(options)
try :
for index, option in enumerate(args):
if index > 0 :
if re.match("logdir|mail|example|testdir|testcases|onoscell", option, flags = 0):
index = index+1
options[option] = args[index]
options = self.testcasesInRange(index,option,args,options)
else :
options['testname'] = option
except IndexError as e:
print e
return options
def initOptions(self,options):
'''
This will initialize the commandline options.
'''
options['logdir'] = None
options['mail'] = None
options['example'] = None
options['testdir'] = None
options['testcases'] = None
options['onoscell'] = None
return options
def testcasesInRange(self,index,option,args,options):
'''
This method will handle testcases list,specified in range [1-10].
'''
if re.match("testcases",option,1):
testcases = []
args[index] = re.sub("\[|\]","",args[index],0)
m = re.match("(\d+)\-(\d+)",args[index],flags=0)
if m:
start_case = eval(m.group(1))
end_case = eval(m.group(2))
if (start_case <= end_case):
i = start_case
while i <= end_case:
testcases.append(i)
i= i+1
else :
print "Please specify testcases properly like 1-5"
else :
options[option] = args[index]
return options
options[option] = str(testcases)
return options
def cmdloop(self, intro=introduction):
print introduction
while True:
try:
super(CLI, self).cmdloop(intro="")
self.postloop()
except KeyboardInterrupt:
if testthread:
testthread.pause()
else:
print "KeyboardInterrupt, Exiting."
sys.exit()
def do_echo( self, line ):
'''
Echoing of given input.
'''
output(line)
def do_sh( self, line ):
'''
Run an external shell command
sh pwd
sh ifconfig etc.
'''
call( line, shell=True )
def do_py( self, line ):
'''
Evaluate a Python expression.
py main.log.info("Sample Log Information")
2013-01-07 12:07:26,804 - PoxTest - INFO - Sample Log Information
'''
try:
exec( line )
except Exception as e:
output( str( e ) + '\n' )
def do_interpret(self,line):
'''
interpret will translate the single line openspeak statement to equivalent python script.
teston> interpret ASSERT result EQUALS main.TRUE ONPASS "Ping executed successfully" ONFAIL "Ping failed"
utilities.assert_equals(expect=main.TRUE,actual=result,onpass="Ping executed successfully",onfail="Ping failed")
'''
from core import openspeak
ospk = openspeak.OpenSpeak()
try :
translated_code = ospk.interpret(text=line)
print translated_code
except AttributeError as e:
print 'Dynamic params are not allowed in single statement translations'
def do_do (self,line):
'''
Do will translate and execute the openspeak statement for the paused test.
do <OpenSpeak statement>
'''
if testthread:
from core import openspeak
ospk = openspeak.OpenSpeak()
try :
translated_code = ospk.interpret(text=line)
eval(translated_code)
except ( AttributeError, SyntaxError ) as e:
print 'Dynamic params are not allowed in single statement translations:'
print e
else :
print "Do will translate and execute the openspeak statement for the paused test.\nPlease use interpret to translate the OpenSpeak statement."
def do_compile(self,line):
'''
compile will translate the openspeak (.ospk) file into TestON test script (python).
It will receive the openspeak file path as input and will generate
equivalent test-script file in the same directory.
usage:
-----
teston>compile /home/openflow/TestON/PoxTest.ospk
Auto-generated test-script file is /home/openflow/TestON/PoxTest.py
'''
from core import openspeak
openspeak = openspeak.OpenSpeak()
openspeakfile = line
if os.path.exists(openspeakfile) :
openspeak.compiler(openspeakfile=openspeakfile,writetofile=1)
print "Auto-generated test-script file is "+ re.sub("ospk","py",openspeakfile,0)
else:
print 'There is no such file : '+line
def do_exit( self, _line ):
"Exit"
if testthread:
testthread.stop()
sys.exit()
return 'exited by user command'
def do_quit( self, line ):
"Exit"
return self.do_exit( line )
def do_EOF( self, line ):
"Exit"
output( '\n' )
return self.do_exit( line )
def isatty( self ):
"Is our standard input a tty?"
return isatty( self.stdin.fileno() )
def do_source( self, line ):
'''
Read shell commands from an input file and execute them sequentially.
cmdsource.txt :
"pwd
ls "
teston>source /home/openflow/cmdsource.txt
/home/openflow/TestON/bin/
cli.py __init__.py
'''
args = line.split()
if len(args) != 1:
error( 'usage: source <file>\n' )
return
try:
self.inputFile = open( args[ 0 ] )
while True:
line = self.inputFile.readline()
if len( line ) > 0:
call( line, shell=True )
else:
break
except IOError:
error( 'error reading file %s\n' % args[ 0 ] )
def do_updatedriver(self,line):
'''
updatedriver will update the given driver name which exists into mentioned config file.
It will receive two optional arguments :
1. Config File Path
2. Drivers List to be updated.
Default : config file = "~/TestON/config/updatedriver" ,
Driver List = all drivers specified in config file .
'''
args = line.split()
config = ''
drivers = ''
try :
for index, option in enumerate(args):
if option == 'config':
index = index + 1
config = args[index]
elif option == 'drivers' :
index = index + 1
drivers = args[index]
except IndexError:
pass
import updatedriver
converter = updatedriver.UpdateDriver()
if config == '':
location = os.path.abspath( os.path.dirname( __file__ ) )
path = re.sub( "(bin)$", "", location )
config = path + "/config/updatedriver.cfg"
configDict = converter.configparser(config)
else :
converter.configparser(config)
configDict = converter.configparser(config)
converter.writeDriver(drivers)
def do_time( self, line ):
"Measure time taken for any command in TestON."
start = time.time()
self.onecmd(line)
elapsed = time.time() - start
self.stdout.write("*** Elapsed time: %0.6f secs\n" % elapsed)
def default( self, line ):
"""Called on an input line when the command prefix is not recognized."""
first, args, line = self.parseline( line )
if not args:
return
if args and len(args) > 0 and args[ -1 ] == '\n':
args = args[ :-1 ]
rest = args.split( ' ' )
error( '*** Unknown command: %s\n' % first )
class TestThread(threading.Thread):
'''
TestThread class will handle the test execution and will communicate with the thread in the do_run.
'''
def __init__(self,options):
self._stopevent = threading.Event()
threading.Thread.__init__(self)
self.is_stop = False
self.options = options
__builtin__.testthread = self
def run(self):
'''
Will execute the test.
'''
while not self.is_stop :
if not self._stopevent.isSet():
self.test_on = TestON(self.options)
try :
if self.test_on.init_result:
result = self.test_on.run()
if not self.is_stop :
result = self.test_on.cleanup()
self.is_stop = True
except KeyboardInterrupt:
print "Recevied Interrupt, cleaning-up the logs and drivers before exiting"
result = self.test_on.cleanup()
self.is_stop = True
__builtin__.testthread = False
def pause(self):
'''
Will pause the test.
'''
if not cli.pause:
print "Will pause the test's execution, after completion of this step.....\n\n\n\n"
cli.pause = True
self._stopevent.set()
elif cli.pause and self.is_stop:
print "KeyboardInterrupt, Exiting."
self.test_on.exit()
else:
print "Recevied Interrupt, cleaning-up the logs and drivers before exiting"
result = self.test_on.cleanup()
self.is_stop = True
def play(self):
'''
Will resume the paused test.
'''
self._stopevent.clear()
cli.pause = False
def stop(self):
'''
Will stop the test execution.
'''
print "Stopping the test"
self.is_stop = True
cli.stop = True
__builtin__.testthread = False
def output(msg):
'''
Simply, print the message in console
'''
print msg
def error(msg):
'''
print the error message.
'''
print msg
def dictToObj(dictionary):
'''
This will facilitates the converting of the dictionary to the object.
This method will help to send options as object format to the test.
'''
if isinstance(dictionary, list):
dictionary = [dictToObj(x) for x in dictionary]
if not isinstance(dictionary, dict):
return dictionary
class Convert(object):
pass
obj = Convert()
for k in dictionary:
obj.__dict__[k] = dictToObj(dictionary[k])
return obj
if __name__ == '__main__':
if len(sys.argv) > 1:
__builtin__.COLORS = True
CLI("test").onecmd(' '.join(sys.argv[1:]))
else:
__builtin__.COLORS = False
CLI("test").cmdloop()
| 2.03125
| 2
|
11/src/11.3.7.py
|
XXG-Lab/Dragon
| 4
|
12775199
|
ps = []
for i in xrange(1, 30):
for j in xrange(i + 2, 40 - i):
ps.append((i, j))
fps = []
for k in xrange(2, 37 + 1):
for j in xrange(k + 1, min([k + 29, (k + 39) // 2]) + 1):
fps.append((j - k, j))
print len(ps), len(fps)
print sorted(ps) == sorted(fps)
| 2.515625
| 3
|
smart_contract/hello_compiler.py
|
Topstack-defi/oracle-neo-futures
| 6
|
12775200
|
<gh_stars>1-10
from boa.compiler import Compiler
Compiler.load_and_save('neo_futures.py')
#Compiler.load_and_save('oracle_lite.py')
| 1.296875
| 1
|
datasets/tools/rosbag_to_h5.py
|
adarshkosta/ssl_e2vid
| 24
|
12775201
|
<filename>datasets/tools/rosbag_to_h5.py
"""
Adapted from Event-driven Perception for Robotics https://github.com/event-driven-robotics/importRosbag
"""
from struct import unpack
from struct import error as structError
from tqdm import tqdm
import glob
import argparse
import os
import h5py
import numpy as np
from h5_packager import H5Packager
from messageTypes.common import unpack_header
from messageTypes.dvs_msgs_EventArray import (
importTopic as import_dvs_msgs_EventArray,
)
from messageTypes.esim_msgs_OpticFlow import (
importTopic as import_esim_msgs_OpticFlow,
)
from messageTypes.geometry_msgs_PoseStamped import (
importTopic as import_geometry_msgs_PoseStamped,
)
from messageTypes.geometry_msgs_Transform import (
importTopic as import_geometry_msgs_Transform,
)
from messageTypes.geometry_msgs_TransformStamped import (
importTopic as import_geometry_msgs_TransformStamped,
)
from messageTypes.geometry_msgs_TwistStamped import (
importTopic as import_geometry_msgs_TwistStamped,
)
from messageTypes.sensor_msgs_CameraInfo import (
importTopic as import_sensor_msgs_CameraInfo,
)
from messageTypes.sensor_msgs_Image import (
importTopic as import_sensor_msgs_Image,
)
from messageTypes.sensor_msgs_Imu import importTopic as import_sensor_msgs_Imu
from messageTypes.sensor_msgs_PointCloud2 import (
importTopic as import_sensor_msgs_PointCloud2,
)
from messageTypes.tf_tfMessage import importTopic as import_tf_tfMessage
def import_topic(topic, **kwargs):
msgs = topic["msgs"]
topic_type = topic["type"].replace("/", "_")
if topic_type == "dvs_msgs_EventArray":
topic_dict = import_dvs_msgs_EventArray(msgs, **kwargs)
elif topic_type == "esim_msgs_OpticFlow":
topic_dict = import_esim_msgs_OpticFlow(msgs, **kwargs)
elif topic_type == "geometry_msgs_PoseStamped":
topic_dict = import_geometry_msgs_PoseStamped(msgs, **kwargs)
elif topic_type == "geometry_msgs_Transform":
topic_dict = import_geometry_msgs_Transform(msgs, **kwargs)
elif topic_type == "geometry_msgs_TransformStamped":
topic_dict = import_geometry_msgs_TransformStamped(msgs, **kwargs)
elif topic_type == "geometry_msgs_TwistStamped":
topic_dict = import_geometry_msgs_TwistStamped(msgs, **kwargs)
elif topic_type == "sensor_msgs_CameraInfo":
topic_dict = import_sensor_msgs_CameraInfo(msgs, **kwargs)
elif topic_type == "sensor_msgs_Image":
topic_dict = import_sensor_msgs_Image(msgs, **kwargs)
elif topic_type == "sensor_msgs_Imu":
topic_dict = import_sensor_msgs_Imu(msgs, **kwargs)
elif topic_type == "sensor_msgs_PointCloud2":
topic_dict = import_sensor_msgs_PointCloud2(msgs, **kwargs)
elif topic_type == "tf_tfMessage":
topic_dict = import_tf_tfMessage(msgs, **kwargs)
else:
return None
if topic_dict:
topic_dict["rosbagType"] = topic["type"]
return topic_dict
def read_file(filename):
print("Attempting to import " + filename + " as a rosbag 2.0 file.")
with open(filename, "rb") as file:
# File format string
file_format = file.readline().decode("utf-8")
print("ROSBAG file format: " + file_format)
if file_format != "#ROSBAG V2.0\n":
print("This file format might not be supported")
eof = False
conns = []
chunks = []
while not eof:
# Read a record header
try:
header_len = unpack("=l", file.read(4))[0]
except structError:
if len(file.read(1)) == 0: # Distinguish EOF from other struct errors
# a struct error could also occur if the data is downloaded by one os and read by another.
eof = True
continue
# unpack the header into fields
header_bytes = file.read(header_len)
fields = unpack_header(header_len, header_bytes)
# Read the record data
data_len = unpack("=l", file.read(4))[0]
data = file.read(data_len)
# The op code tells us what to do with the record
op = unpack("=b", fields["op"])[0]
fields["op"] = op
if op == 2:
# It's a message
# AFAIK these are not found unpacked in the file
# fields['data'] = data
# msgs.append(fields)
pass
elif op == 3:
# It's a bag header - use this to do progress bar for the read
chunk_count = unpack("=l", fields["chunk_count"])[0]
pbar = tqdm(total=chunk_count, position=0, leave=True)
elif op == 4:
# It's an index - this is used to index the previous chunk
conn = unpack("=l", fields["conn"])[0]
count = unpack("=l", fields["count"])[0]
for idx in range(count):
time, offset = unpack("=ql", data[idx * 12 : idx * 12 + 12])
chunks[-1]["ids"].append((conn, time, offset))
elif op == 5:
# It's a chunk
fields["data"] = data
fields["ids"] = []
chunks.append(fields)
pbar.update(len(chunks))
elif op == 6:
# It's a chunk-info - seems to be redundant
pass
elif op == 7:
# It's a conn
# interpret data as a string containing the connection header
conn_fields = unpack_header(data_len, data)
conn_fields.update(fields)
conn_fields["conn"] = unpack("=l", conn_fields["conn"])[0]
conn_fields["topic"] = conn_fields["topic"].decode("utf-8")
conn_fields["type"] = conn_fields["type"].decode("utf-8").replace("/", "_")
conns.append(conn_fields)
return conns, chunks
def break_chunks_into_msgs(chunks):
msgs = []
for chunk in tqdm(chunks, position=0, leave=True):
for idx in chunk["ids"]:
ptr = idx[2]
header_len = unpack("=l", chunk["data"][ptr : ptr + 4])[0]
ptr += 4
# unpack the header into fields
header_bytes = chunk["data"][ptr : ptr + header_len]
ptr += header_len
fields = unpack_header(header_len, header_bytes)
# Read the record data
data_len = unpack("=l", chunk["data"][ptr : ptr + 4])[0]
ptr += 4
fields["data"] = chunk["data"][ptr : ptr + data_len]
fields["conn"] = unpack("=l", fields["conn"])[0]
msgs.append(fields)
return msgs
def rekey_conns_by_topic(conn_dict):
topics = {}
for conn in conn_dict:
topics[conn_dict[conn]["topic"]] = conn_dict[conn]
return topics
def import_rosbag(filename, **kwargs):
print("Importing file: ", filename)
conns, chunks = read_file(filename)
# Restructure conns as a dictionary keyed by conn number
conn_dict = {}
for conn in conns:
conn_dict[conn["conn"]] = conn
conn["msgs"] = []
if kwargs.get("listTopics", False):
topics = rekey_conns_by_topic(conn_dict)
print("Topics in the file are (with types):")
for topicKey, topic in topics.items():
del topic["conn"]
del topic["md5sum"]
del topic["msgs"]
del topic["op"]
del topic["topic"]
topic["message_definition"] = topic["message_definition"].decode("utf-8")
print(" " + topicKey + " --- " + topic["type"])
return topics
msgs = break_chunks_into_msgs(chunks)
for msg in msgs:
conn_dict[msg["conn"]]["msgs"].append(msg)
topics = rekey_conns_by_topic(conn_dict)
imported_topics = {}
import_topics = kwargs.get("import_topics")
import_types = kwargs.get("import_types")
if import_topics is not None:
for topic_to_import in import_topics:
for topic_in_file in topics.keys():
if topic_in_file == topic_to_import:
imported_topic = import_topic(topics[topic_in_file], **kwargs)
if imported_topic is not None:
imported_topics[topic_to_import] = imported_topic
del topics[topic_in_file]
elif import_types is not None:
for type_to_import in import_types:
type_to_import = type_to_import.replace("/", "_")
for topic_in_file in list(topics.keys()):
if topics[topic_in_file]["type"].replace("/", "_") == type_to_import:
imported_topic = import_topic(topics[topic_in_file], **kwargs)
if imported_topic is not None:
imported_topics[topic_in_file] = imported_topic
del topics[topic_in_file]
else: # import everything
for topic_in_file in list(topics.keys()):
imported_topic = import_topic(topics[topic_in_file], **kwargs)
if imported_topic is not None:
imported_topics[topic_in_file] = imported_topic
del topics[topic_in_file]
print()
if imported_topics:
print("Topics imported are:")
for topic in imported_topics.keys():
print(topic + " --- " + imported_topics[topic]["rosbagType"])
# del imported_topics[topic]['rosbagType']
print()
if topics:
print("Topics not imported are:")
for topic in topics.keys():
print(topic + " --- " + topics[topic]["type"])
print()
return imported_topics
def extract_rosbag(
rosbag_path,
output_path,
event_topic,
image_topic=None,
start_time=None,
end_time=None,
packager=H5Packager,
):
ep = packager(output_path)
t0 = -1
sensor_size = None
if not os.path.exists(rosbag_path):
print("{} does not exist!".format(rosbag_path))
return
# import rosbag
bag = import_rosbag(rosbag_path)
max_events = 10000000
xs, ys, ts, ps = [], [], [], []
num_pos, num_neg, last_ts, img_cnt = 0, 0, 0, 0
# event topic
print("Processing events...")
for i in range(0, len(bag[event_topic]["ts"])):
timestamp = bag[event_topic]["ts"][i]
if i == 0:
t0 = timestamp
last_ts = timestamp
xs.append(bag[event_topic]["x"][i])
ys.append(bag[event_topic]["y"][i])
ts.append(timestamp)
ps.append(1 if bag[event_topic]["pol"][i] else 0)
if len(xs) == max_events:
ep.package_events(xs, ys, ts, ps)
del xs[:]
del ys[:]
del ts[:]
del ps[:]
print(timestamp - t0)
if bag[event_topic]["pol"][i]:
num_pos += 1
else:
num_neg += 1
last_ts = timestamp
if sensor_size is None:
sensor_size = [max(xs) + 1, max(ys) + 1]
print("Sensor size inferred from events as {}".format(sensor_size))
ep.package_events(xs, ys, ts, ps)
del xs[:]
del ys[:]
del ts[:]
del ps[:]
# image topic
if image_topic is not None:
print("Processing images...")
for i in range(0, len(bag[image_topic]["ts"])):
timestamp = bag[image_topic]["ts"][i]
t0 = timestamp if timestamp < t0 else t0
last_ts = timestamp if timestamp > last_ts else last_ts
image = bag[image_topic]["frames"][i]
ep.package_image(image, timestamp, img_cnt)
sensor_size = image.shape
img_cnt += 1
ep.add_metadata(
num_pos,
num_neg,
last_ts - t0,
t0,
last_ts,
img_cnt,
sensor_size,
)
def extract_rosbags(rosbag_paths, output_dir, event_topic, image_topic):
for path in rosbag_paths:
bagname = os.path.splitext(os.path.basename(path))[0]
out_path = os.path.join(output_dir, "{}.h5".format(bagname))
print("Extracting {} to {}".format(path, out_path))
extract_rosbag(path, out_path, event_topic, image_topic=image_topic)
if __name__ == "__main__":
"""
Tool for converting rosbag events to an efficient HDF5 format that can be speedily
accessed by python code.
"""
parser = argparse.ArgumentParser()
parser.add_argument("path", help="ROS bag file to extract or directory containing bags")
parser.add_argument(
"--output_dir",
default="/tmp/extracted_data",
help="Folder where to extract the data",
)
parser.add_argument("--event_topic", default="/dvs/events", help="Event topic")
parser.add_argument(
"--image_topic",
default=None,
help="Image topic (if left empty, no images will be collected)",
)
args = parser.parse_args()
print("Data will be extracted in folder: {}".format(args.output_dir))
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
if os.path.isdir(args.path):
rosbag_paths = sorted(glob.glob(os.path.join(args.path, "*.bag")))
else:
rosbag_paths = [args.path]
extract_rosbags(rosbag_paths, args.output_dir, args.event_topic, args.image_topic)
| 1.976563
| 2
|
demurage/models.py
|
getmobilehq/mykontainer-backend
| 0
|
12775202
|
import uuid
from django.db import models
from django.forms import model_to_dict
# Create your models here.
class DemurageSize(models.Model):
SIZES = (('Dry 20 ft', 'Dry 20 ft'),
('Reefer 20 ft', 'Reefer 20 ft'),
('Special 20 ft', 'Special 20 ft'),
('Dry 40 ft', 'Dry 40 ft'),
('Reefer 40 ft', 'Reefer 40 ft'),
("Special 40 ft", "Special 40 ft"),
("Dry 45 ft", "Dry 45 ft"),
("Reefer 45 ft", "Reefer 45 ft"))
id = models.UUIDField(primary_key=True, unique=True, editable=False, default=uuid.uuid4)
size = models.CharField(max_length=255, unique=True, choices=SIZES)
free_days = models.IntegerField(default=0)
is_active = models.BooleanField(default=True)
date_created = models.DateTimeField(auto_now_add=True)
def delete(self):
self.is_active=False
self.ranges.update(is_active=False)
self.save()
def __str__(self):
return self.size
class Demurage(models.Model):
id = models.UUIDField(primary_key=True, unique=True, editable=False, default=uuid.uuid4)
shipping_company = models.ForeignKey("main.ShippingCompany", on_delete=models.CASCADE, related_name="demurages")
start_day = models.IntegerField()
end_day = models.IntegerField()
price_per_day = models.FloatField()
size = models.ForeignKey("demurage.DemurageSize", on_delete=models.CASCADE, related_name="ranges", null=True)
demurage_type = models.CharField(max_length=250, blank=True, null=True, choices=(("import", "Import"),
("export","Export")))
is_active = models.BooleanField(default=True)
date_created = models.DateTimeField(auto_now_add=True)
def delete(self):
self.is_active=False
self.save()
@property
def shipping_company_detail(self):
return model_to_dict(self.shipping_company, exclude=["date_added","is_active"])
@property
def size_detail(self):
return model_to_dict(self.size, exclude=["date_added","is_active"])
| 2.484375
| 2
|
Lab3/lab3.py
|
JackShen1/RCS
| 0
|
12775203
|
<reponame>JackShen1/RCS
from math import log, factorial
from Lab2.lab2 import P_system, probs, working_states, get_probs
TIME = 2501
K = 3
def find_t_system(prob: float) -> float:
return (-1 * TIME) / log(prob)
Q_system = 1 - P_system
T_system = find_t_system(prob=P_system)
print(f"\033[1mЙмовірність безвідмовної роботи на час {TIME} годин:\033[0m {P_system:.6f}\n"
f"\033[1mЙмовірність відмови на час {TIME} годин:\033[0m {Q_system:.6f}\n"
f"\033[1mСередній наробіток до відмови системи без резервування:\033[0m {T_system:.2f} годин\n")
Q_res_system = Q_system / factorial(K + 1)
print(f"\033[1mЙмовірність відмови на час {TIME} годин системи з загальним ненавантаженим "
f"резервуванням з кратністю {K}:\033[0m {Q_res_system:.6f}")
P_res_system = 1 - Q_res_system
print(f"\033[1mЙмовірність безвідмовної роботи на час {TIME} годин системи з загальним ненавантаженим "
f"резервуванням:\033[0m {P_res_system:.6f}")
T_res_system = find_t_system(prob=P_res_system)
print(f"\033[1mСередній наробіток до відмови системи з загальним ненавантаженим "
f"резервуванням:\033[0m {T_res_system:.2f}\n")
_system = (Q_system, P_system, T_system)
_res_system = (Q_res_system, P_res_system, T_res_system)
G_sys = [_res_system[i] / _system[i] for i, _ in enumerate(_system)]
print(f"\033[1mВиграш надійності протягом часу {TIME} годин за ймовірністю відмов:\033[0m {G_sys[0]:.2f}\n"
f"\033[1mВиграш надійності протягом часу {TIME} годин за ймовірністю безвідмовної роботи:\033[0m {G_sys[1]:.2f}\n"
f"\033[1mВиграш надійності за середнім часом безвідмовної роботи:\033[0m {G_sys[2]:.2f}\n")
Q_t = [pow(1 - p, K + 1) for p in probs]
P_t = [1 - q for q in Q_t]
print(f"\033[1mЙмовірність відмови та безвідмовної роботи кожного елемента системи при його "
f"навантаженому резервуванні з кратністю {K}:\033[0m")
print('-' * 51)
for i, elems in enumerate(zip(Q_t[1:-1], P_t[1:-1])):
print("|" + f"Q_reserved{i+1} = {elems[0]:.5f}".center(24) +
"|" + f"P_reserved{i+1} = {elems[1]:.5f}".center(24) + "|")
print('-' * 51)
probabilities = get_probs(all_working_states=working_states, probs=P_t)
P_res_system2 = sum(probabilities)
Q_res_system2 = 1 - P_res_system2
T_res_system2 = find_t_system(prob=P_res_system2)
print(f"\033[1mЙмовірність безвідмовної роботи системи в цілому:\033[0m {P_res_system2:.6f}\n"
f"\033[1mЙмовірність відмови системи в цілому\033[0m {Q_res_system2:.6f}\n"
f"\033[1mСередній наробіток системи в цілому:\033[0m {T_res_system2:.2f} годин\n")
_res_system2 = (Q_res_system2, P_res_system2, T_res_system2)
G_sys2 = [_res_system2[i] / _system[i] for i, _ in enumerate(_system)]
print(f"\033[1mВиграш надійності за ймовірністю відмов:\033[0m {G_sys2[0]:.2f}\n"
f"\033[1mВиграш надійності за ймовірністю безвідмовної роботи:\033[0m {G_sys2[1]:.2f}\n"
f"\033[1mВиграш надійності за середнім часом безвідмовної роботи:\033[0m {G_sys2[2]:.2f}")
| 2.671875
| 3
|
tests/test_cdutil_selectRegion.py
|
CDAT/cdutil
| 0
|
12775204
|
import cdutil
import cdat_info
import cdms2
import cdms2,cdutil,sys,MV2,numpy,os,cdat_info
import unittest
import numpy
import tempfile
class CDUTIL(unittest.TestCase):
def testRegions(self):
regionNA = cdutil.region.domain(latitude=(-50.,50.,'ccb'))
f=cdms2.open(cdat_info.get_sampledata_path()+'/clt.nc')
d=f('u', regionNA)
# --------------------------------------------------------
# makesure the warning has been displayed for the 3rd args
# --------------------------------------------------------
bounds = d.getLatitude().getBounds()
self.assertTrue(numpy.allclose(bounds[0], numpy.array([-50., -49.19124603])))
self.assertTrue(numpy.allclose(bounds[-1], numpy.array([49.19124603, 50.])))
if __name__ == "__main__":
unittest.main()
| 2.109375
| 2
|
leetcode/1588 Sum of All Odd Length Subarrays.py
|
jaredliw/python-question-bank
| 1
|
12775205
|
class Solution(object):
def sumOddLengthSubarrays(self, arr):
"""
:type arr: List[int]
:rtype: int
"""
# Runtime: 32 ms
# Memory: 13.4 MB
prefix_sum = []
last_sum = 0
for item in arr:
last_sum += item
prefix_sum.append(last_sum)
total = sum(arr) # Lengths of subarrays with length = 1
for subarr_size in range(3, len(arr) + 1, 2):
for ptr in range(len(arr) - subarr_size + 1):
total += prefix_sum[ptr + subarr_size - 1]
if ptr != 0:
total -= prefix_sum[ptr - 1]
return total
| 3.203125
| 3
|
assemblyline/odm/models/submission_tree.py
|
malvidin/assemblyline-base
| 39
|
12775206
|
from assemblyline import odm
from assemblyline.common import forge
Classification = forge.get_classification()
@odm.model(index=True, store=False)
class SubmissionTree(odm.Model):
classification = odm.Classification(default=Classification.UNRESTRICTED) # Classification of the cache
filtered = odm.Boolean(default=False) # Has this cache entry been filtered
expiry_ts = odm.Date() # Expiry date
tree = odm.Text(index=False) # Tree cache
| 1.953125
| 2
|
apps/training.py
|
valentingol/transformers_tf
| 2
|
12775207
|
import time
import tensorflow as tf
from datasets.scripts.fra_eng import datasets_fra_eng
from transformer.text.tokenizer import TokenizerBert
from transformer.architecture.transfo import TransformerNLP
from transformer.train.metrics import MaskedAccuracy
from transformer.train.metrics import MaskedSparseCategoricalCrossentropy
from transformer.train.optimizer import ScheduleLR
if __name__ == '__main__':
# get dataset for french to english traduction
_, _, full_dataset = datasets_fra_eng()
full_dataset = full_dataset.shuffle(buffer_size=len(full_dataset))
len_ds = len(full_dataset)
# build tokenizer
fr_dataset = full_dataset.map(lambda fr, _: fr)
en_dataset = full_dataset.map(lambda _, en: en)
fr_tokenizer = TokenizerBert(lower_case=True)
en_tokenizer = TokenizerBert(lower_case=True)
fr_tokenizer.build_tokenizer(fr_dataset)
en_tokenizer.build_tokenizer(en_dataset)
# prepare dataset
full_dataset = full_dataset.cache()
full_dataset = full_dataset.batch(32)
full_dataset = full_dataset.prefetch(2)
# create transformer
in_vocab_size = len(fr_tokenizer.vocab)
out_vocab_size = len(en_tokenizer.vocab)
transfo = TransformerNLP(n_layers=12, d_model=768, n_heads=12, d_ff=1072,
dropout=0.1, in_vocab_size=in_vocab_size,
out_vocab_size=out_vocab_size,
max_seq_len=40)
# training set-up
schedule_lr = ScheduleLR(d_model=transfo.d_model)
opt = tf.keras.optimizers.Adam(schedule_lr, beta_1=0.9, beta_2=0.98,
epsilon=1e-9)
loss_function = MaskedSparseCategoricalCrossentropy()
acc_function = MaskedAccuracy()
# training function
input_signature = [tf.TensorSpec(shape=(None, None), dtype=tf.int64)] * 3
@tf.function(input_signature=input_signature)
def train(fr_tokens, en_tokens, labels):
with tf.GradientTape() as tape:
proba, _ = transfo(fr_tokens, en_tokens, training=True)
# enventually cut to maximum_length to match proba shape
labels = labels[..., :tf.shape(proba)[-2]]
loss = loss_function(labels, proba)
grads = tape.gradient(loss, transfo.trainable_variables)
opt.apply_gradients(zip(grads, transfo.trainable_variables))
acc = acc_function(labels, proba)
return loss, acc
# training loop
mean_loss = tf.keras.metrics.Mean()
mean_acc = tf.keras.metrics.Mean()
for i, (fr_txt, en_txt) in enumerate(full_dataset):
fr_tokens, en_tokens = fr_tokenizer(fr_txt), en_tokenizer(en_txt)
labels = en_tokens[:, 1:]
en_tokens = en_tokens[:, :-1]
loss, acc = train(fr_tokens, en_tokens, labels)
loss, acc = mean_loss(loss), mean_acc(acc)
if i == 0: start = time.time()
if i % 100 == 0 and i > 0:
current_time_epoch = time.time() - start
time_epoch = current_time_epoch * len_ds / (i+1)
remaining_time = time_epoch - current_time_epoch
print('batch', i, '/', len_ds)
print(f'loss = {loss.numpy():.3f}, acc = {acc.numpy():.3f}')
print(f'estimated remaining time: {int(remaining_time // 60)}min '
f'{remaining_time % 60:.1f}sec')
mean_loss.reset_state(), mean_acc.reset_state()
| 2.03125
| 2
|
mlab-ns-simulator/mlabsim/tests/test_update.py
|
hellais/ooni-support
| 5
|
12775208
|
<gh_stars>1-10
import json
from twisted.trial import unittest
from twisted.web import server
from mock import MagicMock, call
from mlabsim import update
ExampleLookup = """
Example response of production: http://mlab-ns.appspot.com/npad?format=json
{'city': 'Mountain View',
'country': 'US',
'fqdn': 'npad.iupui.mlab3.nuq02.measurement-lab.org',
'ip': ['172.16.17.32', 'fc00:db20:35b:7399::5'],
'port': '8001',
'site': 'nuq02',
'url': 'http://npad.iupui.mlab3.nuq02.measurement-lab.org:8000'}
"""
class UpdateResourceTests (unittest.TestCase):
def setUp(self):
# Test data:
self.fqdn = 'mlab01.ooni-tests.not-real.except-it-actually-could-be.example.com'
self.expectedentry = {
'fqdn': self.fqdn,
'city': 'Somewheresville',
'country': 'US',
'ip': ['127.2.3.4', '::1'],
'port': 8421,
'site': 'mlab01',
'tool_extra': {
'collector_onion': 'testfakenotreal.onion',
},
}
self.entryjson = json.dumps(self.expectedentry, indent=2, sort_keys=True)
self.m_db = MagicMock()
self.m_request = MagicMock()
self.ur = update.UpdateResource(self.m_db)
def test_render_PUT_valid_parameters(self):
self.m_request.content.read.return_value = self.entryjson
# Execute the code under test:
retval = self.ur.render_PUT(self.m_request)
# Verifications:
self.assertEqual(server.NOT_DONE_YET, retval)
# Verify that m_db now stores the expected entry:
self.assertEqual(
self.m_db.mock_calls,
[call.__setitem__(self.fqdn, self.expectedentry)])
# Verify that a 200 response was sent:
self.assertEqual(
self.m_request.mock_calls,
[call.content.read(),
call.sendJsonResponse('Ok.'),
])
def test_render_PUT_malformed_JSON(self):
self.m_request.content.read.return_value = self.entryjson[:-1] # Mangled with slice.
# Execute the code under test:
retval = self.ur.render_PUT(self.m_request)
# Verifications:
self.assertEqual(server.NOT_DONE_YET, retval)
# Verify that m_db was not modified (or accessed) in any way:
self.assertEqual(
self.m_db.mock_calls,
[])
# Verify that a 400 response was sent:
self.assertEqual(
self.m_request.mock_calls,
[call.content.read(),
call.sendJsonErrorMessage('Malformed JSON body.'),
])
def test_render_PUT_missing_fqdn(self):
malformedentry = dict(self.expectedentry)
del malformedentry['fqdn']
body = json.dumps(malformedentry, indent=2, sort_keys=True)
self.m_request.content.read.return_value = body
# Execute the code under test:
retval = self.ur.render_PUT(self.m_request)
# Verifications:
self.assertEqual(server.NOT_DONE_YET, retval)
# Verify that m_db was not modified (or accessed) in any way:
self.assertEqual(
self.m_db.mock_calls,
[])
# Verify that a 400 response was sent:
self.assertEqual(
self.m_request.mock_calls,
[call.content.read(),
call.sendJsonErrorMessage("Missing 'fqdn' field."),
])
| 2.375
| 2
|
python/miind/meshtest3.py
|
dekamps/miind
| 13
|
12775209
|
<reponame>dekamps/miind
import unittest
from aexpdevelop import *
from miind.bary import isinsidequadrilateral
from matplotlib.path import Path
from scipy.spatial import KDTree
import miind.mesh as mesh
mesh.MAX_NEIGHBOURS=16
TEST_MESH = 'aexp.mesh'
class MeshTest(unittest.TestCase):
if not os.path.exists(TEST_MESH):
GenerateTestMesh()
m = mesh.Mesh(TEST_MESH)
def setup(self):
return
def tearDown(self):
return
def testMeshSlicing(self):
''' Just a check of the slicing order used in the parsing of a mesh. Replace the 'x' by 'print statements when needed.'''
a = np.array([1,2,3, 4,5,6, 7, 8, 9, 10,11,12, 13,14,15])
a.shape = (5,3)
x = a
for l in a[0::2,:]:
x = l
for m in a[1::2,:]:
x = m
def testInside2(self):
data = np.array([[-56.9872066344, 0.554565985372], [-58.4625502211, 0.199160622028], [-58.4375907348, 0.656613945886], [-56.9662529743, 1.00948787166]])
point= np.array([-57.3,0.73])
self.assertTrue(isinsidequadrilateral(data,point))
def testKDTree(self):
'''Just a check to see if we feed the right data format to KDTree.'''
ver=np.zeros(10)
ver.shape=(5,2)
ver[0,0] = 0
ver[0,1] = 0
ver[1,0] = 1
ver[1,1] = 1
ver[2,0] = -1
ver[2,1] = -1
ver[3,0] = 0
ver[3,1] = 10
ver[4,0] = 100
ver[4,1] = 0
tree = KDTree(ver)
points = np.array([[1,1.1],[1000,0.]])
res=tree.query(points)
self.assertTrue(res[1][0] == 1 and res[1][1] == 4)
def testQuery(self):
''' This shows how the closest mesh point to a given (v,w) pair can be found. The query gives the distance [0] and the index [1]
of the closest mesh point to (v,w). The index can be used to get the point from the data array.'''
close = MeshTest.m.query([-50,0.5])
point = MeshTest.m.data[close[1]]
self.assertAlmostEqual(point[0], -49.6416200889 )
self.assertAlmostEqual(point[1], 0.39109939 )
def testNeighbours(self):
close = MeshTest.m.query([-50,0.5])
point = MeshTest.m.data[close[1]]
ns = MeshTest.m.neighbours[(point[0],point[1])]
self.assertTrue(ns[0] == [7, 15] )
self.assertTrue(ns[1] == [7, 16] )
self.assertTrue(ns[2] == [8, 15] )
self.assertTrue(ns[3] == [8, 16] )
def testInBin(self):
'''This is essentially how the bin is found that contains a tuple (v,w).
Beware!!! It is almost always sufficient to find the closest neighbour,
and test the bins that contains this neighbour for whether the point is
contained. This does not always work, and testtransition.py contains
a counter example. For this reason in transition.py the number of neighbours
is increased.'''
point = [-70, -5]
close = MeshTest.m.query(point)
point_cl = MeshTest.m.data[close[1]]
ns = MeshTest.m.neighbours[(point_cl[0],point_cl[1])]
self.assertFalse (MeshTest.m.isinbin(6,2,point))
self.assertFalse(MeshTest.m.isinbin(6,3,point))
self.assertTrue(MeshTest.m.isinbin(7,2,point))
self.assertFalse(MeshTest.m.isinbin(6,3,point))
def testShowGrid(self):
m=mesh.Mesh('aexp.mesh')
mesh.display_mesh(m,m.dimensions(),True)
def testBBox(self):
'''Test whether the right bounding box is generated. Consult the
plot generated by mesh.py to check these values.'''
close = MeshTest.m.query([-50,0.5])
point = MeshTest.m.data[close[1]]
ns = MeshTest.m.neighbours[(point[0],point[1])]
i = ns[0][0]
j = ns[0][1]
self.assertTrue(i == 7)
self.assertTrue(j == 15)
box = MeshTest.m.bbox(i,j)
self.assertAlmostEqual(box[0][0],-50.555337890395002)
self.assertAlmostEqual(box[0][1], -0.61602201398800005)
self.assertAlmostEqual(box[1][0], -49.641620088925002)
self.assertAlmostEqual(box[1][1], 0.39109939538600003)
def testBin(self):
''' For selected (v, w) tuple query which bin they're from.'''
self.assertTrue(MeshTest.m.bin_from_point([-70,-5]) == [7,2] )
self.assertTrue(MeshTest.m.bin_from_point([-57.3,0.73]) == [16,8] )
def testStationaryBin(self):
'''Create a simple test grid that shows how a stationary point (reversal bin) should be added to a mesh.'''
f=open('teststat.mesh','w')
f.write('xxx\n') # first line doesn't matter any more
f.write('1e-3\n') # some made up resolution
f.write('-3. -3. -2.5 -1. 1. 2.5 3. 3. \n')
f.write(' 0. 1. 2. 3. 3. 2. 1. 0. \n')
f.write('-2. -2. -1.5 -1. 1. 1.5 2. 2.\n')
f.write(' 0. 1. 2. 2. 2. 2. 1. 0.\n')
f.close()
m=mesh.Mesh('teststat.mesh')
# this is a mesh where a stationary cell is entered
q=mesh.Quadrilateral([-1.,-1.,1.,1.],[0.,0.5,0.5,0.])
m.insert_stationary(q)
# it is legal to enter more than one stationary cell
q2=mesh.Quadrilateral([-1.,-1.,1.,1.],[0.5,1.0,1.0,0.5])
m.insert_stationary(q2)
mesh.display_mesh(m,m.dimensions(),label=True)
def testBlockedGrid(self):
'''Test a grid that is built of two group of cells that are not necessarily related. The two block are split by a single line containing the
string 'closed\n'. '''
f=open('blocktest.mesh','w')
f.write('xxx\n') # first line doesn't matter any more
f.write('1e-3\n') # some made up resolution
f.write(' 0. 1. 2. \n')
f.write(' 0. 0. 0. \n')
f.write(' 0. 1. 2. 3.\n')
f.write(' 1. 1. 1. 1.\n')
f.write(' 0. 1. 2. 3.\n')
f.write(' 2. 2. 2. 2.\n')
f.write('closed\n')
f.write(' 0. 1.\n')
f.write(' 3. 3.\n')
f.write(' 0. 1.\n')
f.write(' 4. 4.\n')
f.close()
m=mesh.Mesh('blocktest.mesh')
mesh.display_mesh(m,m.dimensions(),True)
def testInversionStrip(self):
'''Define two diagonal strips in the conventional way. Define a third strip by inversion.'''
# the first strip is demarcated by y = -nx + 3, y = -nx + 4, x = 0 and y = 0
n = 5.
vs = np.linspace(0.,3./n,4)
wlow = -n*vs + 3.
whigh = -n*vs + 4.
# the second strip is demarcated by y = nx + 3, y = nx + 4, x = 0 and y = 0
negvs = np.linspace(0.,-3./n,4)
negvlow = n*negvs + 3.
negvhigh = n*negvs + 4.
# a strip in the center, partly covered by the side strip
invs = np.arange(1./(2.*n),4./n,1./n)
with open('inversion.mesh','w') as f:
f.write('xxx\n') # first line doesn't matter any more
f.write('1e-3\n') # some made up resolution
for v in vs:
f.write(str(v) + ' ')
f.write('\n')
for w in wlow:
f.write(str(w) + ' ' )
f.write('\n')
for v in vs:
f.write(str(v) + ' ')
f.write('\n')
for w in whigh:
f.write(str(w) + ' ' )
f.write('\n')
f.write('closed\n')
for v in negvs:
f.write(str(v) + ' ')
f.write('\n')
for w in negvlow:
f.write(str(w) + ' ' )
f.write('\n')
for v in negvs:
f.write(str(v) + ' ')
f.write('\n')
for w in negvhigh:
f.write(str(w) + ' ' )
f.write('\n')
f.write('inversion\n')
for v in invs:
f.write(str(v) + ' ' )
f.write('\n')
for w in range(3,-1,-1):
f.write(str(float(w)) + ' ')
f.write('\n')
for v in invs:
f.write(str(-v) + ' ' )
f.write('\n')
for w in range(3,-1,-1):
f.write(str(float(w)) + ' ')
f.write('\n')
def testReadInversionMesh(self):
m=mesh.Mesh('inversion.mesh')
if __name__ == '__main__':
meshSuite = unittest.TestLoader().loadTestsFromTestCase(MeshTest)
unittest.main()
| 2.59375
| 3
|
proto/graphscope/proto/proto_generator.py
|
haoxins/GraphScope
| 2
|
12775210
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2020 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import glob
import os
import shutil
import subprocess
import sys
def gather_all_proto(proto_dir, suffix="*.proto"):
directory = os.path.join(proto_dir, suffix)
files = glob.glob(directory)
return files
def create_path(path):
"""Utility function to create a path."""
if os.path.isdir(path):
return
os.makedirs(path, exist_ok=True)
def cpp_out(relative_dir, output_dir):
files = gather_all_proto(relative_dir)
for proto_file in files:
subprocess.check_call(
[
shutil.which("protoc"),
"-I%s" % ".",
"--cpp_out=%s" % output_dir,
proto_file,
],
stderr=subprocess.STDOUT,
)
def python_out(relative_dir, output_dir):
files = gather_all_proto(relative_dir)
for proto_file in files:
subprocess.check_call(
[
sys.executable,
"-m",
"grpc_tools.protoc",
"-I%s" % ".",
"--python_out=%s" % os.path.join(output_dir),
proto_file,
],
stderr=subprocess.STDOUT,
)
def cpp_service_out(relative_dir, output_dir):
plugin_path = str(
subprocess.check_output([shutil.which("which"), "grpc_cpp_plugin"]), "utf-8"
).strip()
suffix = "*_service.proto"
files = gather_all_proto(relative_dir, suffix)
for proto_file in files:
subprocess.check_call(
[
shutil.which("protoc"),
"-I%s" % ".",
"--grpc_out=%s" % output_dir,
"--plugin=protoc-gen-grpc=%s" % plugin_path,
proto_file,
],
stderr=subprocess.STDOUT,
)
def python_service_out(relative_dir, output_dir):
suffix = "*_service.proto"
files = gather_all_proto(relative_dir, suffix)
for proto_file in files:
subprocess.check_call(
[
sys.executable,
"-m",
"grpc_tools.protoc",
"-I%s" % '.',
"--python_out=%s" % output_dir,
"--grpc_python_out=%s" % output_dir,
proto_file,
],
stderr=subprocess.STDOUT,
)
if __name__ == "__main__":
if len(sys.argv) < 2:
print("Usage: python proto_generator.py <OUTPUT_PATH> [--cpp] [--python]")
sys.exit(1)
# path to 'GraphScope/proto/python/proto'
current_dir = os.path.dirname(os.path.abspath(__file__))
print(current_dir)
# path to 'GraphScope/proto'
base_dir = os.path.join(current_dir, "../", "../")
os.chdir(base_dir)
print(base_dir)
# must use relative path
relative_dir = os.path.join(".", "graphscope", "proto")
output_dir = sys.argv[1]
output_dir = os.path.realpath(os.path.realpath(output_dir))
create_path(output_dir)
print("Generating cpp proto to:" + output_dir)
if len(sys.argv) <= 2 or len(sys.argv) > 2 and sys.argv[2] == "--cpp":
cpp_out(relative_dir, output_dir)
cpp_service_out(relative_dir, output_dir)
if len(sys.argv) <= 2 or len(sys.argv) > 2 and sys.argv[2] == "--python":
python_out(relative_dir, output_dir)
python_service_out(relative_dir, output_dir)
| 1.96875
| 2
|
petstagram/common/admin.py
|
lion963/petstagram_workshop
| 0
|
12775211
|
<reponame>lion963/petstagram_workshop
from django.contrib import admin
from common.models import Comment
admin.site.register(Comment)
| 1.117188
| 1
|
melodic/lib/python2.7/dist-packages/rqt_py_common/rqt_roscomm_util.py
|
Dieptranivsr/Ros_Diep
| 2
|
12775212
|
# Software License Agreement (BSD License)
#
# Copyright (c) 2012, <NAME>, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of <NAME>, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Author: <NAME>
import os
import genmsg
import roslaunch
from roslaunch import RLException
import rospkg
import rospy
import rostopic
class RqtRoscommUtil(object):
@staticmethod
def load_parameters(config, caller_id):
"""
Load parameters onto the parameter server.
Copied from ROSLaunchRunner.
@type config: roslaunch.config.ROSLaunchConfig
@raise RLException:
"""
# XMLRPC proxy for communicating with master, 'xmlrpclib.ServerProxy'
param_server = config.master.get()
param = None
try:
# multi-call style xmlrpc
# According to API doc, get_multi() returns
# multicall XMLRPC proxy for communicating with master,
# "xmlrpclib.MultiCall"
param_server_multi = config.master.get_multi()
# clear specified parameter namespaces
# 2468 unify clear params to prevent error
for param in roslaunch.launch._unify_clear_params(config.clear_params):
if param_server.hasParam(caller_id, param)[2]:
param_server_multi.deleteParam(caller_id, param)
r = param_server_multi()
for code, msg, _ in r:
if code != 1:
raise RLException("Failed to clear parameter {}: ".format(msg))
except RLException:
raise
except Exception as e:
rospy.logerr("load_parameters: unable to set params " +
"(last param was [{}]): {}".format(param, e))
raise # re-raise as this is fatal
try:
# multi-call objects are not reusable
param_server_multi = config.master.get_multi()
for param in config.params.values():
# suppressing this as it causes too much spam
# printlog("setting parameter [%s]"%param.key)
param_server_multi.setParam(caller_id, param.key, param.value)
r = param_server_multi()
for code, msg, _ in r:
if code != 1:
raise RLException("Failed to set parameter: %s" % (msg))
except RLException:
raise
except Exception as e:
print("load_parameters: unable to set params (last param was " +
"[%s]): %s" % (param, e))
raise # re-raise as this is fatal
rospy.logdebug("... load_parameters complete")
@staticmethod
def iterate_packages(subdir):
"""
Iterator for packages that contain the given subdir.
This method is generalizing rosmsg.iterate_packages.
@param subdir: eg. 'launch', 'msg', 'srv', 'action'
@type subdir: str
@raise ValueError:
"""
if subdir == None or subdir == '':
raise ValueError('Invalid package subdir = {}'.format(subdir))
rospack = rospkg.RosPack()
pkgs = rospack.list()
rospy.logdebug('pkgs={}'.format(pkgs))
for p in pkgs:
d = os.path.join(rospack.get_path(p), subdir)
rospy.logdebug('rospack dir={}'.format(d))
if os.path.isdir(d):
yield p, d
@staticmethod
def list_files(package, subdir, file_extension='.launch'):
"""
#TODO: Come up with better name of the method.
Taken from rosmsg.
Lists files contained in the specified package
@param package: package name, ``str``
@param file_extension: Defaults to '.launch', ``str``
:returns: list of msgs/srv in package, ``[str]``
"""
if subdir == None or subdir == '':
raise ValueError('Invalid package subdir = {}'.format(subdir))
rospack = rospkg.RosPack()
path = os.path.join(rospack.get_path(package), subdir)
return [genmsg.resource_name(package, t) for t in RqtRoscommUtil._list_types(path, file_extension)]
@staticmethod
def _list_types(path, ext):
"""
Taken from rosmsg
List all messages in the specified package
:param package str: name of package to search
:param include_depends bool: if True, will also list messages in
package dependencies.
:returns [str]: message type names
"""
types = RqtRoscommUtil._list_resources(path,
RqtRoscommUtil._msg_filter(ext))
result = [x for x in types]
# result = [x[:-len(ext)] for x in types] # Remove extension
result.sort()
return result
@staticmethod
def _list_resources(path, rfilter=os.path.isfile):
"""
Taken from rosmsg._list_resources
List resources in a package directory within a particular
subdirectory. This is useful for listing messages, services, etc...
:param rfilter: resource filter function that returns true if filename
is the desired resource type, ``fn(filename)->bool``
"""
resources = []
if os.path.isdir(path):
resources = [f for f
in os.listdir(path) if rfilter(os.path.join(path, f))]
else:
resources = []
return resources
@staticmethod
def _msg_filter(ext):
"""
Taken from rosmsg._msg_filter
"""
def mfilter(f):
"""
Predicate for filtering directory list. matches message files
:param f: filename, ``str``
"""
return os.path.isfile(f) and f.endswith(ext)
return mfilter
@staticmethod
def is_roscore_running():
"""
@rtype: bool
"""
try:
# Checkif rosmaster is running or not.
rostopic.get_topic_class('/rosout')
return True
except rostopic.ROSTopicIOException as e:
return False
| 1.179688
| 1
|
leapp/cli/__main__.py
|
dhodovsk/leapp
| 29
|
12775213
|
from leapp.cli import main
import leapp.utils.i18n # noqa: F401; pylint: disable=unused-import
main()
| 1.085938
| 1
|
JSSP/genetic_algorithm/ga.py
|
mcfadd/Job_Shop_Schedule_Problem
| 45
|
12775214
|
<filename>JSSP/genetic_algorithm/ga.py<gh_stars>10-100
import random
import statistics
from enum import Enum
from ._ga_helpers import crossover
from ..exception import InfeasibleSolutionException
from ..solution import Solution, SolutionFactory
from ..util import get_stop_condition
"""
GA selection functions
"""
def _tournament_selection(*args):
"""
Tournament style selection for the genetic algorithm.
This function selects args[1] (i.e. selection_size) solutions from the population,
then removes the best solution out of the selection from the population and returns it.
See https://en.wikipedia.org/wiki/Tournament_selection for more info.
:param args: list where args[0] is the population of solutions and args[1] is the selection size
:rtype: Solution
:returns: a Solution from the population
"""
selection_indices = random.sample(range(len(args[0])), args[1])
selection_group = sorted([index for index in selection_indices],
key=lambda index: args[0][index].makespan)
parent = args[0].pop(selection_group[0])
return parent
def _fitness_proportionate_selection(*args):
"""
Fitness proportionate selection for the genetic algorithm (also called roulette wheel selection).
This function first normalizes the fitness values (makespan) of the solutions in the population,
then randomly removes a solution from the population and returns it.
See https://en.wikipedia.org/wiki/Fitness_proportionate_selection for more info.
:param args: list where args[0] is the population
:rtype: Solution
:returns: a Solution from the population
"""
fitness_sum = sum(sol.makespan for sol in args[0])
s = random.uniform(0, fitness_sum)
partial_sum = 0
for sol in args[0]:
partial_sum += sol.makespan
if partial_sum >= s:
args[0].remove(sol)
return sol
def _random_selection(*args):
"""
Random selection for the genetic algorithm.
This function randomly removes a solution from the population and returns it.
:param args: list where args[0] is the population
:rtype: Solution
:returns: a solution from the population
"""
return args[0].pop(random.randrange(0, len(args[0])))
class GASelectionEnum(Enum):
"""
Enumeration class containing three selection methods for selecting parent solutions for the genetic algorithm.
Selection Methods:
1. GASelectionEnum.TOURNAMENT - Tournament style selection
2. GASelectionEnum. FITNESS_PROPORTIONATE - Fitness proportionate selection (also called roulette wheel selection)
3. GASelectionEnum.RANDOM - Random selection
"""
TOURNAMENT = _tournament_selection
FITNESS_PROPORTIONATE = _fitness_proportionate_selection
RANDOM = _random_selection
"""
GA agent class
"""
class GeneticAlgorithmAgent:
"""
Genetic algorithm optimization agent.
:type stopping_condition: float
:param stopping_condition: either the duration to run GA in seconds or the number of generations to iterate though
:type population: [Solution]
:param population: list of Solutions to start the GA from, must not be empty
:type time_condition: bool
:param time_condition: if true GA is ran for stopping_condition number of seconds else it is ran for stopping_condition generations
:type selection_method_enum: GASelectionEnum
:param selection_method_enum: selection method to use for selecting parents from the population. (see GASelectionEnum for selection methods)
:type mutation_probability: float
:param mutation_probability: probability of mutating a child solution (i.e change a random operation's machine) in range [0, 1]
:type selection_size: int
:param selection_size: size of the selection group. (applicable only for tournament style selection)
:type benchmark: bool
:param benchmark: if true benchmark data is gathered
"""
def __init__(self, stopping_condition, population, time_condition=False,
selection_method_enum=GASelectionEnum.TOURNAMENT, mutation_probability=0.8,
selection_size=2, benchmark=False):
"""
Initializes an instance of GeneticAlgorithmAgent.
See help(GeneticAlgorithmAgent)
"""
assert selection_size is not None and 1 < selection_size, "selection_size must be an integer greater than 1"
# parameters
self.runtime = None
self.iterations = None
self.time_condition = time_condition
if time_condition:
self.runtime = stopping_condition
else:
self.iterations = stopping_condition
self.initial_population = population
self.population_size = len(population)
self.selection_method = selection_method_enum
self.mutation_probability = mutation_probability
self.selection_size = selection_size
self.benchmark = benchmark
# results
self.result_population = []
self.best_solution = None
if benchmark:
self.benchmark_iterations = 0
self.best_solution_makespan_v_iter = []
self.avg_population_makespan_v_iter = []
self.min_makespan_coordinates = []
def start(self):
"""
Starts the genetic algorithm for this GeneticAlgorithmAgent.
:rtype: Solution
:returns: best Solution found
"""
population = self.initial_population[:]
best_solution = min(population)
iterations = 0
# get static data
data = self.initial_population[0].data
dependency_matrix_index_encoding = data.job_task_index_matrix
usable_machines_matrix = data.usable_machines_matrix
factory = SolutionFactory(data)
# variables used for benchmarks
best_solution_makespan_v_iter = []
avg_population_makespan_v_iter = []
best_solution_iteration = 0
# create stopping condition function
stop_condition = get_stop_condition(self.time_condition, self.runtime, self.iterations)
not_done = True
while not stop_condition(iterations):
if self.benchmark:
avg_population_makespan_v_iter.append(statistics.mean([sol.makespan for sol in population]))
next_population = []
while len(population) > self.selection_size and not_done:
parent1 = self.selection_method(population, self.selection_size)
parent2 = self.selection_method(population, self.selection_size)
# breed the parents to produce child1 (parent1 cross parent2)
# Note mutation happens in crossover function
feasible_child = False
while not feasible_child:
# the try except block is because sometimes the crossover operation results in a setup of -1
# which then produces an infeasible solution. This is due to the sequence dependency setup times matrix not allowing for wait time.
try:
child1 = crossover(parent1, parent2,
self.mutation_probability, dependency_matrix_index_encoding,
usable_machines_matrix)
if child1 != parent1 and child1 != parent2:
feasible_child = True
except InfeasibleSolutionException:
if stop_condition(iterations):
not_done = False
break
# breed the parents to produce child2 (parent2 cross parent1)
feasible_child = False
while not feasible_child:
try:
child2 = crossover(parent2, parent1,
self.mutation_probability, dependency_matrix_index_encoding,
usable_machines_matrix)
if child2 != parent1 and child2 != parent2:
feasible_child = True
except InfeasibleSolutionException:
if stop_condition(iterations):
not_done = False
break
# add best 2 individuals to next generation if they are not already in the next generation (elitist strategy)
if not_done:
sorted_individuals = sorted([parent1, parent2, child1, child2])
added = 0
index = 0
while added < 2 and index < len(sorted_individuals):
if sorted_individuals[index] not in next_population:
next_population.append(sorted_individuals[index])
added += 1
index += 1
# if parent1, parent2, child1, and child2 are all in next_population, add random solutions
while added < 2:
next_population.append(factory.get_solution())
added += 1
else:
next_population.append(parent1)
next_population.append(parent2)
# check for better solution than best_solution
if min(child1, child2) < best_solution:
best_solution = min(child1, child2)
if self.benchmark:
best_solution_iteration = iterations
if self.benchmark:
best_solution_makespan_v_iter.append(best_solution.makespan)
iterations += 1
elif not self.time_condition:
iterations += 1
next_population += population
population = next_population
self.best_solution = best_solution
self.result_population = next_population
if self.benchmark:
self.benchmark_iterations = iterations
self.best_solution_makespan_v_iter = best_solution_makespan_v_iter
self.avg_population_makespan_v_iter = avg_population_makespan_v_iter
self.min_makespan_coordinates = (best_solution_iteration, best_solution.makespan)
return self.best_solution
| 2.78125
| 3
|
kansha/services/mail.py
|
AnomalistDesignLLC/kansha
| 161
|
12775215
|
<gh_stars>100-1000
# -*- coding:utf-8 -*-
#--
# Copyright (c) 2012-2014 Net-ng.
# All rights reserved.
#
# This software is licensed under the BSD License, as described in
# the file LICENSE.txt, which you should have received as part of
# this distribution.
#--
import smtplib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.Utils import COMMASPACE, formatdate
from nagare import log
from .services_repository import Service
class MailSender(Service):
'''
Mail sender service.
API.
A mail sender service must provide a send method:
'''
LOAD_PRIORITY = 10
CONFIG_SPEC = {
'activated': 'boolean(default=True)',
'host': 'string(default="127.0.0.1")',
'port': 'integer(default=25)',
'default_sender': 'string(default="<EMAIL>")'
}
def __init__(self, config_filename, error, host, port, default_sender, activated):
super(MailSender, self).__init__(config_filename, error)
self.host = host
self.port = port
self.default_sender = default_sender
self.activated = activated
if self.activated:
log.debug(
'The mail service will connect to %s on port %s' %
(self.host, self.port)
)
else:
log.warning('The mail service will drop all messages!')
def _smtp_send(self, from_, to, contents):
try:
smtp = smtplib.SMTP(self.host, self.port)
except IOError as e:
log.exception(e)
return False
try:
smtp.sendmail(from_, to, contents)
except Exception as e:
log.exception(e)
return False
finally:
smtp.close()
return True
def send(self, subject, to, content, html_content=None, from_='', cc=[], bcc=[],
type='plain', mpart_type='alternative'):
"""Sends an email
In:
- ``subject`` -- email subject
- ``to`` -- list of recipients' emails
- ``content`` -- email content
- ``from_`` -- email sender adress
- ``cc`` -- list of CC emails
- ``bcc`` -- list of BCC emails
- ``type`` -- email type ('plain' or 'html')
- ``mpart_type`` -- email part type
"""
from_ = from_ if from_ else self.default_sender
# create the message envelop
msg = MIMEMultipart(mpart_type)
msg['Subject'] = subject
msg['Date'] = formatdate(localtime=True)
msg['From'] = from_
msg['To'] = COMMASPACE.join(to)
if cc:
msg['Cc'] = COMMASPACE.join(cc)
# attach the mail content
charset = 'us-ascii'
if isinstance(content, unicode):
content = content.encode('UTF-8')
charset = 'UTF-8'
msg.attach(MIMEText(content, type, charset))
if html_content:
msg.attach(MIMEText(html_content, 'html', charset))
# log
log.info('%s mail:\n subject=%s\n from=%s\n to=%s\n cc=%s\n bcc=%s',
'sending' if self.activated else 'ignoring', subject, from_, to, cc, bcc)
log.debug('Mail content:\n' + content)
# post the email to the SMTP server
if self.activated:
return self._smtp_send(from_, to + cc + bcc, msg.as_string())
return True
class DummyMailSender(MailSender):
'''For use in unit tests.'''
def __init__(self):
super(DummyMailSender, self).__init__(
'',
None,
host='localhost',
port=25,
activated=False,
default_sender='<EMAIL>'
)
| 2.375
| 2
|
board/models/team.py
|
RetroFlow/retro-flow
| 0
|
12775216
|
from django.conf import settings
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.utils.translation import ugettext_lazy as _
from .assignee import GroupAssignee, UserProfileAssignee
class PublicInfo(models.Model):
email = models.EmailField(
verbose_name=_('Additional email'),
null=True,
blank=True,
)
phone_number = models.CharField(
max_length=15,
verbose_name=_("Phone number"),
null=True,
blank=True,
)
additional_info = models.TextField(
max_length=200,
verbose_name=_('Additional info'),
null=True,
blank=True,
)
class Profile(models.Model):
user = models.OneToOneField(
settings.AUTH_USER_MODEL,
related_name='profile',
on_delete=models.CASCADE,
verbose_name=_('Related User instance'),
)
first_name = models.CharField(
max_length=255,
null=True,
blank=True,
verbose_name=_('First name'),
)
last_name = models.CharField(
max_length=255,
null=True,
blank=True,
verbose_name=_('Last name'),
)
icon = models.URLField(
verbose_name=_('Link to user avatar image'),
null=True,
blank=True,
)
public_info = models.OneToOneField(
PublicInfo,
related_name='profile',
verbose_name=_('Public info'),
on_delete=models.CASCADE,
null=True,
)
@property
def email(self):
return self.user.email
@property
def full_name(self):
if self.first_name and self.last_name:
return '{} {}'.format(self.first_name, self.last_name)
return self.user.username
@property
def username(self):
return self.user.username
def __str__(self):
return self.full_name
@receiver(post_save, sender=settings.AUTH_USER_MODEL)
def create_profile_for_new_user(sender, created, instance, **kwargs):
if created:
public_info = PublicInfo()
public_info.save()
profile = Profile(user=instance, public_info=public_info)
profile.save()
class UserRole(models.Model):
class Role:
REGULAR_USER = 'user'
OWNER = 'owner'
ADMIN = 'admin'
code = models.CharField(
max_length=20,
unique=True,
null=True
)
description = models.TextField(
max_length=200,
null=True,
blank=True,
)
@classmethod
def get_default_role(cls):
# TODO: add default role proper management
return cls.objects.get_or_create(code=cls.Role.REGULAR_USER)[0].id
def is_creator(self):
return self.code == self.Role.OWNER
def is_admin_or_creator(self):
return self.code == self.Role.OWNER or self.code == self.Role.ADMIN
def __str__(self):
return "{}".format(self.code)
class Team(models.Model):
members = models.ManyToManyField(
Profile,
through='MembershipInfo',
related_name='teams'
)
def get_assignees(self):
res = list(map(GroupAssignee, self.groups.all()))
res.extend(map(UserProfileAssignee, self.members.all()))
return res
def add_member(self, profile, role=UserRole.Role.REGULAR_USER):
r = UserRole.objects.get(code=role)
member_info = MembershipInfo(
profile=profile,
team=self,
role=r
)
member_info.save()
def __str__(self):
return str(self.board)
class Group(models.Model):
name = models.CharField(
max_length=50,
verbose_name=_('Group name')
)
description = models.TextField(
max_length=200,
verbose_name=_('Description'),
blank=True,
null=True
)
icon = models.URLField(
verbose_name=_('Link to group avatar image'),
null=True,
blank=True
)
members = models.ManyToManyField(
Profile,
verbose_name=_('Group members'),
related_name='groups'
)
team = models.ForeignKey(
Team,
related_name='groups',
verbose_name=_('Team'),
on_delete=models.CASCADE,
)
@property
def board(self):
return self.team.board
class MembershipInfo(models.Model):
profile = models.ForeignKey(
Profile,
on_delete=models.DO_NOTHING,
related_name='member_info',
)
team = models.ForeignKey(
Team,
on_delete=models.CASCADE,
related_name='members_info',
)
role = models.ForeignKey(
UserRole,
on_delete=models.SET_DEFAULT,
default=UserRole.get_default_role,
)
created_at = models.DateTimeField(auto_now_add=True)
@property
def board(self):
return self.team.board
| 2.03125
| 2
|
src/pyngsild/source/__init__.py
|
Orange-OpenSource/pyngsild
| 0
|
12775217
|
#!/usr/bin/env python3
# Software Name: pyngsild
# SPDX-FileCopyrightText: Copyright (c) 2021 Orange
# SPDX-License-Identifier: Apache 2.0
#
# This software is distributed under the Apache 2.0;
# see the NOTICE file for more details.
#
# Author: <NAME> <<EMAIL>> et al.
"""
Source for NGSI Agents to collect from.
Sources MUST respect the following protocol :
Each Source Class is a generator hence MUST implement __iter__().
Some Sources MAY implement close() if needed to free resources.
"""
import glob
import logging
import sys
from collections.abc import Iterable
from dataclasses import dataclass
from itertools import chain, islice
from os.path import basename
from pathlib import Path
from tempfile import SpooledTemporaryFile
from typing import Any, List, Sequence
from pyngsild.utils.stream import stream_from
from pyngsild.constants import RowFormat
logger = logging.getLogger(__name__)
DEFAULT_PROVIDER = "user"
@dataclass(eq=True)
class Row:
"""
A row is a data record delivered from a Source.
A row is composed of the record (the data itself) and the provider (the name of the datasource provider).
For example, the provider can be the full qualified named of a remote file located on a FTP Server.
The record could be a simple string, a CSV-delimited line, a full JSON document.
"""
record: Any
provider: str = DEFAULT_PROVIDER
ROW_NOT_SET = Row(None, None)
class Source(Iterable[Row]):
"""
A Source is a pull datasource : any datasource we can iterate on.
The library provides many sources.
One can code its own Source just by extending Source, and providing a new Row for each iteration.
"""
registered_extensions = {}
def __init__(self, rows: Sequence[Row]):
self.rows = rows
def __iter__(self):
yield from self.rows
def head(self, n: int = 2) -> List[Row]:
"""return a list built from the first n elements"""
return [*islice(self, n)]
def first(self) -> Row:
"""return the first element"""
row: Row = None
try:
row = self.head(1)[0]
except Exception:
pass
return row
def skip_header(self, lines: int = 1):
"""return a new Source with first n lines skipped, default is to skip only the first line"""
return Source(islice(self, lines, None))
def limit(self, n: int = 10):
"""return a new Source limited to the first n elements"""
iterator = iter(self)
return Source((next(iterator) for _ in range(n)))
@classmethod
def from_stream(cls, stream: Iterable[Any], provider: str = "user", fmt = RowFormat.TEXT, **kwargs):
"""automatically create the Source from a stream"""
return SourceStream(stream, provider, fmt, **kwargs)
@classmethod
def from_stdin(cls, provider: str = "user", **kwargs):
"""automatically create the Source from the standard input"""
return SourceStream(sys.stdin, provider, **kwargs)
@classmethod
def from_file(
cls,
filename: str, # str | PathLike
fp: SpooledTemporaryFile = None,
provider: str = "user",
**kwargs
):
from .moresources import SourceJson, SourceXml
binary = False
klass = None
ext = None
ext1 = None
"""automatically create the Source from a filename, figuring out the extension, handles text, json, xml and zip+gzip compression"""
suffixes = [s[1:] for s in Path(filename).suffixes] # suffixes w/o dot
if suffixes:
ext = suffixes[-1] # last suffix
ext1 = suffixes[0] # first suffix
if "*" in cls.registered_extensions:
klass, binary, kwargs = cls.registered_extensions["*"]
elif ext1 in cls.registered_extensions: # TODO : look at the 1st extension !
klass, binary, kwargs = cls.registered_extensions[ext1]
stream, suffixes = stream_from(filename, fp, binary)
ext = suffixes[-1]
if klass:
return klass(stream, **kwargs)
if ext == "json":
content = stream.read()
return SourceJson(content, provider=basename(filename), **kwargs)
if ext == "xml":
content = stream.read()
return SourceXml(content, provider=basename(filename), **kwargs)
return SourceStream(stream, provider=basename(filename), **kwargs)
@classmethod
def from_files(cls, filenames: Sequence[str], provider: str = "user", **kwargs):
sources = [Source.from_file(f) for f in filenames]
return SourceMany(sources)
@classmethod
def from_glob(cls, pattern: str, provider: str = "user", **kwargs):
sources = [Source.from_file(f) for f in glob.glob(pattern)]
return SourceMany(sources)
@classmethod
def from_globs(cls, patterns: Sequence[str], provider: str = "user", **kwargs):
filenames = chain.from_iterable([glob.glob(p) for p in patterns])
sources = [Source.from_file(f) for f in filenames]
return SourceMany(sources)
@classmethod
def register_extension(cls, ext: str, src, *, binary: bool = False, **kwargs):
cls.registered_extensions[ext] = (src, binary, kwargs)
@classmethod
def unregister_extension(cls, ext: str):
if cls.is_registered_extension(ext):
del cls.registered_extensions[ext]
@classmethod
def is_registered_extension(cls, ext: str):
return ext in cls.registered_extensions
@classmethod
def register(cls, src, *, binary: bool = False, **kwargs):
cls.register_extension("*", src, binary, kwargs)
@classmethod
def unregister(cls):
cls.unregister_extension("*")
def reset(self):
pass
def close(self):
pass
class SourceStream(Source):
def __init__(
self, stream: Iterable[Any], provider: str = "user", fmt: RowFormat = RowFormat.TEXT, ignore_header: bool = False
):
if ignore_header:
next(stream)
self.stream = stream
self.provider = provider
self.fmt = fmt
def __iter__(self):
match self.fmt:
case RowFormat.JSON:
from pyngsild.source.moresources import SourceJson
for payload in self.stream:
yield from SourceJson(payload, self.provider)
case RowFormat.XML:
from pyngsild.source.moresources import SourceXml
for payload in self.stream:
yield from SourceXml(payload, self.provider)
case _:
for line in self.stream:
yield Row(line.rstrip("\r\n"), self.provider)
def reset(self):
pass
class SourceStdin(SourceStream):
def __init__(self, **kwargs):
super().__init__(stream=sys.stdin, **kwargs)
class SourceSingle(SourceStream):
"""A SourceSingle is Source built from a Python single element.
"""
def __init__(self, row: Any, provider: str = "user", fmt: RowFormat = RowFormat.TEXT, ignore_header: bool = False):
super().__init__([row], provider, fmt, ignore_header)
class SourceMany(Source):
def __init__(self, sources: Sequence[Source], provider: str = "user"):
self.sources = sources
self.provider = provider
def __iter__(self):
for src in self.sources:
yield from src
| 2.453125
| 2
|
QUANTAXIS/QAMarket/QABid_advance.py
|
paracats/QUANTAXIS
| 1
|
12775218
|
<gh_stars>1-10
# coding:utf-8
#
# The MIT License (MIT)
#
# Copyright (c) 2016-2017 yutiansut/QUANTAXIS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import datetime
import random
import time
"""
重新定义bid模式
bid不应该以一个简单的报价模板的形式引入,而应该直接作为一个下单/回报/状态系统
比如* QA_Send_bid()
会回报一个状态
{
下单成功(10x):{
交易成功:100,
交易失败-报价不在范围:101,
交易失败-报价数量不符合规定:102,
交易状态-订单未完全成交:103,
交易状态-订单数量过大(交易价格变动):104,
},
下单失败(20x):{
下单格式不符合规定:201,
下单关键数据缺失:202,
下单时间错误:203
}
}
同时需要一个队列对于订单进行管理,形成一个先进先出的队列:
Bid-Job-Management-Center
队列应该对于订单进行处理和排序,并分发给各种交易中心,然后得到各种交易中心的回报以后,封装结果并返回状态
2017/6/18
"""
class QA_QAMarket_bid():
def __init__(self):
self.bid = {
'price': float(16),
'date': str('2015-01-05'),
'time': str(time.mktime(datetime.datetime.now().timetuple())),
'amount': int(10),
'towards': int(1),
'code': str('000001'),
'user': str('root'),
'strategy': str('example01'),
'status': '0x01',
'order_id': str(random.random())
}
self.bid_list = [self.bid]
# 报价队列 插入/取出/查询pytho
def QA_bid_insert(self):
self.bid_list.append(self.bid)
def QA_bid_pop(self):
self.bid_list.pop()
def QA_bid_status(self):
lens = len(self.bid_list)
return {'status': lens}
| 1.4375
| 1
|
VII semester/resource-optimization/lab-5.py
|
ksaracevic1/etf-alles-1
| 14
|
12775219
|
<reponame>ksaracevic1/etf-alles-1<filename>VII semester/resource-optimization/lab-5.py
import numpy as np
import random as random
import math
def okolina(x0, delta_x, opseg):
okolo = []
for dx0 in [x0[0] - delta_x, x0[0], x0[0] + delta_x]:
for dx1 in [x0[1] - delta_x, x0[1], x0[1] + delta_x]:
if (dx0 != x0[0] or dx1 != x0[1]) and dx0 >= opseg[0] and dx0 <= opseg[1] and dx1 >= opseg[0] and dx1 <= opseg[1]:
okolo.append((dx0, dx1))
return okolo
def SA(f, opseg, x0, delta_x, N, eps, T0):
M = 1000
alfa = 0.96
najboljeX = x0
x = x0
v = f(x)
tk = T0
for iter in range(0, N):
for m in range(0, M):
okolo = okolina(x, delta_x, opseg)
xp = random.choice(okolo)
vp = f(xp)
pa = math.e ** (-abs(vp - v)/tk)
r = random.uniform(0, 1)
if r < pa or vp <= v:
x = xp
v = vp
if f(najboljeX) >= f(xp):
najboljeX = xp
tk *= alfa
return najboljeX
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
T = 0.1
brojIt = 100
def f(X):
return (X[0]-3)**2 + (X[1]+1)**2
x1 = np.linspace(-6, 6, 100)
x2 = np.linspace(-6, 6, 100)
X1, X2 = np.meshgrid(x1, x2)
Y = f([X1, X2])
fig = plt.figure()
ax = fig.add_subplot(2,2,1,projection='3d')
ax.contour(X1, X2, Y, 50, cmap='binary')
xTS = SA(f, [-5, 5], (-5, 5), 0.1, brojIt, 0, T)
ax.scatter(xTS[0], xTS[1], f(xTS), color='blue', marker='o')
ax.set_xlabel('$x_1$')
ax.set_ylabel('$x_2$')
ax.set_zlabel('$f(x_1,x_2)$');
ax.set_title('$f(x_1,x_2) = (x_1-3)^2 + (x_2+1)^2$')
def f(x):
return 20 + (x[0]**2 - 10*np.cos(2*math.pi*x[0])) + (x[1]**2 - 10*np.cos(2*math.pi*x[1]))
x1 = np.linspace(-6, 6, 100)
x2 = np.linspace(-6, 6, 100)
X1, X2 = np.meshgrid(x1, x2)
Y = f([X1, X2])
ax = fig.add_subplot(2,2,2,projection='3d')
ax.contour(X1, X2, Y, 50, cmap='binary')
xTS = SA(f, [-5.12, 5.12], (-5.12, 5.12), 0.1, brojIt, 0, T)
ax.scatter(xTS[0], xTS[1], f(xTS), color='blue', marker='o')
ax.set_xlabel('$x_1$')
ax.set_ylabel('$x_2$')
ax.set_zlabel('$f(x_1,x_2)$');
ax.set_title('$f(x_1,x_2) = 20 + (x_1^2 - 10cos(2\pi x_1) + (x_2^2 - 10cos(2\pi x_2)$')
def f(X):
return -(1+np.cos(12*np.sqrt(X[0]**2 + X[1]**2)))/ (0.5*(X[0]**2 + X[1]**2) + 2)
x1 = np.linspace(-6, 6, 100)
x2 = np.linspace(-6, 6, 100)
X1, X2 = np.meshgrid(x1, x2)
Y = f([X1, X2])
ax = fig.add_subplot(2,2,3,projection='3d')
ax.contour(X1, X2, Y, 50, cmap='binary')
xTS = SA(f, [-5.12, 5.12], (-5.12, 5.12), 0.1, brojIt, 0, T)
ax.scatter(xTS[0], xTS[1], f(xTS), color='blue', marker='o')
ax.set_xlabel('$x_1$')
ax.set_ylabel('$x_2$')
ax.set_zlabel('$f(x_1,x_2)$');
ax.set_title('$f(x_1,x_2) = -({1+\cos{(12\sqrt{x_1^2+x_2^2})}})/({0.5(x_1^2+x_2^2)+2})$')
def f(x):
return -abs(np.sin(x[0]) * np.cos(x[1]) * np.exp(abs(1 - np.sqrt(x[0]**2 + x[1]**2)/math.pi)))
x1 = np.linspace(-10, 10, 100)
x2 = np.linspace(-10, 10, 100)
X1, X2 = np.meshgrid(x1, x2)
Y = f([X1, X2])
ax = fig.add_subplot(2,2,4,projection='3d')
ax.contour(X1, X2, Y, 50, cmap='binary')
xTS = SA(f,[-10, 10], (0, 0), 0.1, brojIt, 0, T)
ax.scatter(xTS[0], xTS[1], f(xTS), color='blue', marker='o')
ax.set_xlabel('$x_1$')
ax.set_ylabel('$x_2$')
ax.set_zlabel('$f(x_1,x_2)$');
ax.set_title('$f(x_1,x_2) = -|\sin(x_1)|\cos(x_2)e^{|1 - \sqrt{x_1^2+x_2^2}/\pi|}$')
plt.show()
| 2.390625
| 2
|
application.windows64/source/DrawKit.pyde
|
JonECope/DrawKit
| 0
|
12775220
|
"""
This code was written by JonECope
Using Python 3035 in Processing 3.3.7
"""
color_choice = color(0, 0, 0)
circ = True
instructions = "Press Mouse to draw, Right-click to erase, Q to exit, R, O, Y, G, B, V for colors. Press S for square brush or C for circle brush. Press Enter to save."
def setup():
#size(800, 800)
fullScreen()
background(255)
textSize(30)
fill(0)
text(instructions, 10, 100, width-20, 200)
circ = True
def draw():
global color_choice
if mousePressed:
if mouseButton == LEFT:
fill(color_choice)
stroke(color_choice)
elif mouseButton == RIGHT:
fill(255, 255, 255)
stroke(255, 255, 255)
else:
fill(0, 0, 0, 0)
stroke(0, 0, 0, 0)
global circ
if keyPressed:
if key == "s" or key == "S":
circ = False
elif key == "c" or key == "C":
circ = True
if key == "q" or key == "Q":
exit()
elif key == ENTER:
save("MyDrawing.png")
background(255)
fill(255, 0, 0)
text("Your creation has been saved to the application's folder!", 10, 100, width-20, 200)
elif keyCode == LEFT:
color_choice = color(0)
elif key == "r" or key == "R":
color_choice = color(255, 0, 0)
elif key == "o" or key == "O":
color_choice = color(255, 156, 0)
elif key == "y" or key == "Q":
color_choice = color(255, 255, 0)
elif key == "g" or key == "Q":
color_choice = color(0, 255, 0)
elif key == "b" or key == "Q":
color_choice = color(0, 0, 255)
elif key == "v" or key == "Q":
color_choice = color(169, 0, 255)
if circ:
ellipse(mouseX, mouseY, 30, 30)
else:
rect(mouseX, mouseY, 30, 30)
| 3.390625
| 3
|
tests/python/twitter/pants/testutils/mock_target.py
|
wfarner/commons
| 1
|
12775221
|
from twitter.pants.base import ParseContext
__author__ = '<NAME>'
from collections import defaultdict
from twitter.pants.targets import InternalTarget, TargetWithSources
class MockTarget(InternalTarget, TargetWithSources):
def __init__(self, name, dependencies=None, num_sources=0, exclusives=None):
with ParseContext.temp():
InternalTarget.__init__(self, name, dependencies, exclusives=exclusives)
TargetWithSources.__init__(self, name, exclusives=exclusives)
self.num_sources = num_sources
self.declared_exclusives = defaultdict(set)
if exclusives is not None:
for k in exclusives:
self.declared_exclusives[k] = set([exclusives[k]])
self.exclusives = None
def resolve(self):
yield self
def walk(self, work, predicate=None):
work(self)
for dep in self.dependencies:
dep.walk(work)
| 2.359375
| 2
|
load/dataset.py
|
dogeplusplus/meow-mix
| 0
|
12775222
|
<filename>load/dataset.py
import torch
import numpy as np
import torch.nn.functional as F
from pathlib import Path
from typing import Tuple, List
from dataclasses import dataclass
from torch.utils.data import Dataset, DataLoader, random_split
def collate_fn(batch: List[Tuple[np.ndarray, np.ndarray]]) -> Tuple[torch.FloatTensor, torch.FloatTensor]:
"""
Batch uneven audio files using tiling.
Parameters
----------
batch : List[Tuple[np.ndarray, np.ndarray]]
List of uneven audio files and labels
Returns
-------
audio_batch: np.ndarray
Uniformly batched audio signals
label_batch: np.ndarray
Uniformly batched audio labels
"""
audio, labels = zip(*batch)
max_size = max([l.size for l in labels])
repeat_fn = lambda x: int(np.ceil(max_size / x.size))
repetitions = np.vectorize(repeat_fn, otypes=["object"])(labels).astype(int)
reshaped_audio = [np.tile(a, (1, r))[:, :max_size] for a, r in zip(audio, repetitions)]
reshaped_labels = [np.tile(l, r)[:max_size] for l, r in zip(labels, repetitions)]
audio_batch = torch.FloatTensor(reshaped_audio)
label_batch = torch.FloatTensor(reshaped_labels)
label_batch = F.one_hot(label_batch, num_classes=2)
return audio_batch, label_batch
class MeowDataset(Dataset):
def __init__(self, data_dir):
audio_dir = data_dir.joinpath("audio")
label_dir = data_dir.joinpath("label")
self.audio_paths = list(audio_dir.glob("**/*.npy"))
self.label_paths = list(label_dir.glob("**/*.npy"))
def __getitem__(self, idx):
wav_path = self.audio_paths[idx]
label_path = self.label_paths[idx]
audio = torch.from_numpy(np.load(wav_path))
label = torch.from_numpy(np.load(label_path).astype(int))
label = F.one_hot(label, num_classes=3)
return audio, label
def __len__(self):
return len(self.audio_paths)
@dataclass
class DataConfig:
directory: Path
batch_size: int
val_ratio: float
def build_datasets(config: DataConfig) -> Tuple[DataLoader, DataLoader]:
"""
Build training and validation data loaders for training.
Parameters
----------
config: DataConfig
Data configuration such as batch size and validation set.
Returns
-------
train_loader: DataLoader
Training dataset
val_loader: DataLoader
Validation dataset
Raises
------
AssertionError
If the validation set ratio is not between 0 and 1
"""
assert 0 <= config.val_ratio <= 1
ds = MeowDataset(config.directory)
val_size = int(len(ds) * config.val_ratio)
train_size = len(ds) - val_size
train_ds, val_ds = random_split(ds, [train_size, val_size])
train_loader = DataLoader(train_ds, batch_size=config.batch_size)
val_loader = DataLoader(val_ds, batch_size=config.batch_size)
return train_loader, val_loader
| 2.703125
| 3
|
figure_utils.py
|
chenmj201601/ai-car-plate
| 1
|
12775223
|
<gh_stars>1-10
import matplotlib.pyplot as plt
# 绘制训练趋势图
def draw_figure(acc, val_acc, loss, val_loss):
count = len(acc)
epochs = range(1, count + 1)
plt.plot(epochs, acc, 'bo', label='Training accuracy')
plt.plot(epochs, val_acc, 'b', label='Validation accuracy')
plt.title('Training and validation accuracy')
plt.legend()
plt.savefig('car_plate_acc_{}.png'.format(count))
plt.figure()
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.savefig('car_plate_loss_{}.png'.format(count))
# plt.show()
| 3.0625
| 3
|
fangoosterlee/cosmethod.py
|
ghlian/fangoosterlee
| 7
|
12775224
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
COS method
==========
The method comes from [1]_
The original code is found at
http://www.wilmott.com/messageview.cfm?catid=34&threadid=78554
References
----------
.. [1] <NAME>., & <NAME>. (2009).
A Novel Pricing Method for European Options
Based on Fourier-Cosine Series Expansions.
*SIAM Journal on Scientific Computing*, 31(2), 826. doi:10.1137/080718061
<http://ta.twi.tudelft.nl/mf/users/oosterle/oosterlee/COS.pdf>
"""
from __future__ import division, print_function
import numpy as np
import numexpr as ne
__all__ = ['cosmethod']
def cosmethod(model, moneyness=0., call=True, npoints=2**10):
"""COS method.
Parameters
----------
model : instance of specific model class
The method depends on availability of two methods:
- charfun
- cos_restriction
moneyness : array_like
Moneyness of the option, np.log(strike/price) - riskfree * maturity
call : bool array_like
Call/Put flag
npoints : int
Number of points on the grid. The more the better, but slower.
Returns
-------
array_like
Option premium normalized by asset price
Notes
-----
`charfun` method (risk-neutral conditional chracteristic function)
of `model` instance should depend on
one argument only (array_like) and should return
array_like of the same dimension.
`cos_restriction` method of `model` instance takes `maturity`
and `riskfree` as array arguments,
and returns two corresponding arrays (a, b).
"""
if not hasattr(model, 'charfun'):
raise Exception('Characteristic function is not available!')
if not hasattr(model, 'cos_restriction'):
raise Exception('COS restriction is not available!')
# (nobs, ) arrays
alim, blim = model.cos_restriction()
# (npoints, nobs) array
kvec = np.arange(npoints)[:, np.newaxis] * np.pi / (blim - alim)
# (npoints, ) array
unit = np.append(.5, np.ones(npoints-1))
# Arguments
argc = (kvec, alim, blim, 0, blim)
argp = (kvec, alim, blim, alim, 0)
# (nobs, ) array
put = np.logical_not(call)
# (npoints, nobs) array
umat = 2 / (blim - alim) * (call * xfun(*argc) - put * xfun(*argp))
# (npoints, nobs) array
pmat = model.charfun(kvec)
# (npoints, nobs) array
xmat = np.exp(-1j * kvec * (moneyness + alim))
# (nobs, ) array
return np.exp(moneyness) * np.dot(unit, pmat * umat * xmat).real
def xfun(k, a, b, c, d):
"""Xi-Psi function.
Parameters
----------
k : (n, 1) array
a : float or (m, ) array
b : float or (m, ) array
c : float or (m, ) array
d : float or (m, ) array
Returns
-------
(n, m) array
"""
# out0 = (np.cos(k * (d-a)) * np.exp(d) - np.cos(k * (c-a)) * np.exp(c)
# + k * (np.sin(k * (d-a)) * np.exp(d) - np.sin(k * (c-a)) * np.exp(c)))\
# / (1 + k**2)
# out1 = (np.sin(k[1:] * (d-a)) - np.sin(k[1:] * (c-a))) / k[1:]
out0 = ne.evaluate(("(cos(k * (d-a)) * exp(d) - cos(k * (c-a)) * exp(c)"
"+ k * (sin(k * (d-a)) * exp(d) - sin(k * (c-a)) * exp(c)))"
"/ (1 + k**2)"))
k1 = k[1:]
out1 = ne.evaluate("(sin(k1 * (d-a)) - sin(k1 * (c-a))) / k1")
out1 = np.vstack([(d - c) * np.ones_like(a), out1])
return out0 - out1
if __name__ == '__main__':
pass
| 2.78125
| 3
|
res-logger.py
|
jaggiJ/code_python
| 0
|
12775225
|
# WEB-SERVER RESPONSE STATUS LOGGER
# MIT License
# Copyright (c) 2021 jaggiJ
# Checks and logs server response status
import requests, sys
import time, datetime
########################################################################
# PRINTS HELP IF REQUESTED by --help argument and such
helpRequest = ['--help', '-h', 'help', 'h', '-help']
if len(sys.argv) > 1 and sys.argv[1] in helpRequest:
print('''
# USE: 'python[3] res-logger.py <server-address> <request time seconds>'
# for testing:
# 'python3 res-logger.py https://web-address.com 6 &'
# 'tail -F status.log'
# example for permanent run in background:
# 'python3 res-logger.py https://your-web-server.com 300 &' ''')
sys.exit()
#######################################################################
# USER EDITABLE
server = 'https://server.address.here.com'
requestFrequency = 300 #how often server is checked
lessFrequent = 6 #requestFrequency x lessFrequent=log entry interval(sec)
########################################################################
#FUNCTIONS
def append_status():
'''Appends server response status to log.'''
timeNow = datetime.datetime.now()
formatTimeNow = timeNow.strftime("%d/%m/%Y, %H:%M:%S")
res_msg = f'{formatTimeNow} status code: {code}\n'
with open('status.log', 'a') as file:
file.write(res_msg)
def estimate_status():
'''Figures out server response code e.g. 200.'''
try:
req = requests.get(server)
except:
req = 'no connection to server'
if isinstance(req, str):
code = 'no connection to server'
else:
code = req.status_code
return req, code
def help_text():
'''Prints out help to user.'''
print(
'Add one or two arguments:\n1. server address starting with http\
or https,\n2. time in seconds (default is 300), e.g\n"python3 \
res-logger.py https://google.com 6"')
sys.exit()
##########################################################################
#COMMAND LINE ARGUMENTS
commandLineArgs = sys.argv[1::]
if commandLineArgs:
#Checks if user provided proper command line arguments; if not
#prints out help.
if commandLineArgs[0].startswith('http') and commandLineArgs[1]:
server = commandLineArgs[0]
requestFrequency = int(commandLineArgs[1])
elif commandLineArgs[0].startswith('http') and commandLineArgs == 1:
server = commandLineArgs[0]
else:
help_text()
elif not commandLineArgs and server == 'https://server.address.here.com':
help_text()
###############################################################################
#OTHER VARIABLES
req, code = estimate_status()
###############################################################
append_status()
print(f'Initial status code {code}, request: {req}')
#MAIN LOOP
counter = 0
while True:
#Sends requests and logs responses to file.
counter += 1
previous_status = code
req, code = estimate_status()
if code != previous_status:
append_status()
else:
#Decrease frequency of logging same status by calm_factor
if counter % lessFrequent == 0:
append_status()
time.sleep(requestFrequency)
| 3.140625
| 3
|
bin/face.py
|
wanjinchang/deepface-1
| 1
|
12775226
|
<filename>bin/face.py
from __future__ import absolute_import
import logging
import os
import pickle
import sys
from glob import glob
import cv2
import numpy as np
import fire
from sklearn.metrics import roc_curve
from tqdm import tqdm
import matplotlib.pyplot as plt
base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(base_dir)
from deepface.confs.conf import DeepFaceConfs
from deepface.detectors.detector_dlib import FaceDetectorDlib
from deepface.detectors.detector_ssd import FaceDetectorSSDMobilenetV2, FaceDetectorSSDInceptionV2
from deepface.recognizers.recognizer_vgg import FaceRecognizerVGG
from deepface.recognizers.recognizer_resnet import FaceRecognizerResnet
from deepface.utils.common import get_roi, feat_distance_l2, feat_distance_cosine
from deepface.utils.visualization import draw_bboxs
logger = logging.getLogger('DeepFace')
logger.setLevel(logging.INFO if int(os.environ.get('DEBUG', 0)) == 0 else logging.DEBUG)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('[%(asctime)s] [%(name)s] [%(levelname)s] %(message)s')
ch.setFormatter(formatter)
logger.handlers = []
logger.addHandler(ch)
class DeepFace:
def __init__(self):
self.detector = None
self.recognizer = None
def set_detector(self, detector):
if self.detector is not None and self.detector.name() == detector:
return
logger.debug('set_detector old=%s new=%s' % (self.detector, detector))
if detector == FaceDetectorDlib.NAME:
self.detector = FaceDetectorDlib()
elif detector == 'detector_ssd_inception_v2':
self.detector = FaceDetectorSSDInceptionV2()
elif detector == 'detector_ssd_mobilenet_v2':
self.detector = FaceDetectorSSDMobilenetV2()
def set_recognizer(self, recognizer):
if self.recognizer is not None and self.recognizer.name() == recognizer:
return
logger.debug('set_recognizer old=%s new=%s' % (self.recognizer, recognizer))
if recognizer == FaceRecognizerVGG.NAME:
self.recognizer = FaceRecognizerVGG()
elif recognizer == FaceRecognizerResnet.NAME:
self.recognizer = FaceRecognizerResnet()
def blackpink(self, visualize=True):
imgs = ['./samples/blackpink/blackpink%d.jpg' % (i + 1) for i in range(7)]
for img in imgs:
self.run(image=img, visualize=visualize)
def recognizer_test_run(self, detector=FaceDetectorDlib.NAME, recognizer=FaceRecognizerResnet.NAME, image='./samples/ajb.jpg', visualize=False):
self.set_detector(detector)
self.set_recognizer(recognizer)
if isinstance(image, str):
logger.debug('read image, path=%s' % image)
npimg = cv2.imread(image, cv2.IMREAD_COLOR)
elif isinstance(image, np.ndarray):
npimg = image
else:
logger.error('Argument image should be str or ndarray. image=%s' % str(image))
sys.exit(-1)
if npimg is None:
logger.error('image can not be read, path=%s' % image)
sys.exit(-1)
if recognizer:
logger.debug('run face recognition+')
result = self.recognizer.detect([npimg[...,::-1]])
logger.debug('run face recognition-')
return
def run_recognizer(self, npimg, faces, recognizer=FaceRecognizerResnet.NAME):
self.set_recognizer(recognizer)
rois = []
for face in faces:
# roi = npimg[face.y:face.y+face.h, face.x:face.x+face.w, :]
roi = get_roi(npimg, face, roi_mode=recognizer)
if int(os.environ.get('DEBUG_SHOW', 0)) == 1:
cv2.imshow('roi', roi)
cv2.waitKey(0)
rois.append(roi)
face.face_roi = roi
if len(rois) > 0:
logger.debug('run face recognition+')
result = self.recognizer.detect(rois=rois, faces=faces)
logger.debug('run face recognition-')
for face_idx, face in enumerate(faces):
face.face_feature = result['feature'][face_idx]
logger.debug('candidates: %s' % str(result['name'][face_idx]))
name, score = result['name'][face_idx][0]
# if score < self.recognizer.get_threshold():
# continue
face.face_name = name
face.face_score = score
return faces
def run(self, detector='detector_ssd_mobilenet_v2', recognizer=FaceRecognizerResnet.NAME, image='./samples/blackpink/blackpink1.jpg',
visualize=False):
self.set_detector(detector)
self.set_recognizer(recognizer)
if image is None:
return []
elif isinstance(image, str):
logger.debug('read image, path=%s' % image)
npimg = cv2.imread(image, cv2.IMREAD_COLOR)
elif isinstance(image, np.ndarray):
npimg = image
else:
logger.error('Argument image should be str or ndarray. image=%s' % str(image))
sys.exit(-1)
if npimg is None:
logger.error('image can not be read, path=%s' % image)
sys.exit(-1)
logger.debug('run face detection+ %dx%d' % (npimg.shape[1], npimg.shape[0]))
faces = self.detector.detect(npimg)
logger.debug('run face detection- %s' % len(faces))
if recognizer:
faces = self.run_recognizer(npimg, faces, recognizer)
img = draw_bboxs(np.copy(npimg), faces)
cv2.imwrite('result.jpg', img)
if visualize and visualize not in ['false', 'False']:
cv2.imshow('DeepFace', img)
cv2.waitKey(0)
return faces
def save_and_run(self, path, image, visualize=True):
"""
:param visualize:
:param path: samples/faces
:param image_path: samples/blackpink1.jpg
:return:
"""
self.save_features_path(path)
self.run(image=image, visualize=visualize)
def save_features_path(self, path="./samples/blackpink/faces/"):
"""
:param path: folder contain images("./samples/faces/")
:return:
"""
name_paths = [(os.path.basename(img_path)[:-4], img_path)
for img_path in glob(os.path.join(path, "*.jpg"))]
features = {}
for name, path in tqdm(name_paths):
logger.debug("finding faces for %s:" % path)
faces = self.run(image=path)
features[name] = faces[0].face_feature
import pickle
with open('db.pkl', 'wb') as f:
pickle.dump(features, f, protocol=2)
def test_lfw(self, set='test', model='ssdm_resnet', visualize=True):
if set is 'train':
pairfile = 'pairsDevTrain.txt'
else:
pairfile = 'pairsDevTest.txt'
lfw_path = DeepFaceConfs.get()['dataset']['lfw']
path = os.path.join(lfw_path, pairfile)
with open(path, 'r') as f:
lines = f.readlines()[1:]
pairs = []
for line in lines:
elms = line.split()
if len(elms) == 3:
pairs.append((elms[0], int(elms[1]), elms[0], int(elms[2])))
elif len(elms) == 4:
pairs.append((elms[0], int(elms[1]), elms[2], int(elms[3])))
else:
logger.warning('line should have 3 or 4 elements, line=%s' % line)
detec = FaceDetectorDlib.NAME
if model == 'baseline':
recog = FaceRecognizerVGG.NAME
just_name = 'vgg'
elif model == 'baseline_resnet':
recog = FaceRecognizerResnet.NAME
just_name = 'resnet'
elif model == 'ssdm_resnet':
recog = FaceRecognizerResnet.NAME
just_name = 'resnet'
detec = 'detector_ssd_mobilenet_v2'
else:
raise Exception('invalid model name=%s' % model)
logger.info('pair length=%d' % len(pairs))
test_result = [] # score, label(1=same)
for name1, idx1, name2, idx2 in tqdm(pairs):
img1_path = os.path.join(lfw_path, name1, '%s_%04d.jpg' % (name1, idx1))
img2_path = os.path.join(lfw_path, name2, '%s_%04d.jpg' % (name2, idx2))
img1 = cv2.imread(img1_path, cv2.IMREAD_COLOR)
img2 = cv2.imread(img2_path, cv2.IMREAD_COLOR)
if img1 is None:
logger.warning('image not read, path=%s' % img1_path)
if img2 is None:
logger.warning('image not read, path=%s' % img2_path)
result1 = self.run(image=img1, detector=detec, recognizer=recog, visualize=False)
result2 = self.run(image=img2, detector=detec, recognizer=recog, visualize=False)
if len(result1) == 0:
logger.warning('face not detected, name=%s(%d)! %s(%d)' % (name1, idx1, name2, idx2))
test_result.append((0.0, name1 == name2))
continue
if len(result2) == 0:
logger.warning('face not detected, name=%s(%d) %s(%d)!' % (name1, idx1, name2, idx2))
test_result.append((0.0, name1 == name2))
continue
feat1 = result1[0].face_feature
feat2 = result2[0].face_feature
similarity = feat_distance_cosine(feat1, feat2)
test_result.append((similarity, name1 == name2))
# calculate accuracy TODO
accuracy = sum([label == (score > DeepFaceConfs.get()['recognizer'][just_name]['score_th']) for score, label in test_result]) / float(len(test_result))
logger.info('accuracy=%.8f' % accuracy)
# ROC Curve, AUC
tps = []
fps = []
accuracy0 = []
accuracy1 = []
acc_th = []
for th in range(0, 100, 5):
th = th / 100.0
tp = 0
tn = 0
fp = 0
fn = 0
for score, label in test_result:
if score >= th and label == 1:
tp += 1
elif score >= th and label == 0:
fp += 1
elif score < th and label == 0:
tn += 1
elif score < th and label == 1:
fn += 1
tpr = tp / (tp + fn + 1e-12)
fpr = fp / (fp + tn + 1e-12)
tps.append(tpr)
fps.append(fpr)
accuracy0.append(tn / (tn + fp + 1e-12))
accuracy1.append(tp / (tp + fn + 1e-12))
acc_th.append(th)
fpr, tpr, thresh = roc_curve([x[1] for x in test_result], [x[0] for x in test_result])
fnr = 1 - tpr
eer = fnr[np.nanargmin(np.absolute((fnr - fpr)))]
logger.info('1-eer=%.4f' % (1.0 - eer))
with open('./etc/test_lfw.pkl', 'rb') as f:
results = pickle.load(f)
if visualize in [True, 'True', 'true', 1, '1']:
fig = plt.figure()
a = fig.add_subplot(1, 2, 1)
plt.title('Experiment on LFW')
plt.plot(fpr, tpr, label='%s(%.4f)' % (model, 1 - eer)) # TODO : label
for model_name in results:
if model_name == model:
continue
fpr_prev = results[model_name]['fpr']
tpr_prev = results[model_name]['tpr']
eer_prev = results[model_name]['eer']
plt.plot(fpr_prev, tpr_prev, label='%s(%.4f)' % (model_name, 1 - eer_prev))
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
a.legend()
a.set_title('Receiver operating characteristic')
a = fig.add_subplot(1, 2, 2)
plt.plot(accuracy0, acc_th, label='Accuracy_diff')
plt.plot(accuracy1, acc_th, label='Accuracy_same')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
a.legend()
a.set_title('%s : TP, TN' % model)
fig.savefig('./etc/roc.png', dpi=300)
plt.show()
plt.draw()
with open('./etc/test_lfw.pkl', 'wb') as f:
results[model] = {
'fpr': fpr,
'tpr': tpr,
'acc_th': acc_th,
'accuracy0': accuracy0,
'accuracy1': accuracy1,
'eer': eer
}
pickle.dump(results, f, pickle.HIGHEST_PROTOCOL)
return 1.0 - eer
if __name__ == '__main__':
fire.Fire(DeepFace)
| 1.96875
| 2
|
bro/__init__.py
|
xuzuoyang/GitBro
| 0
|
12775227
|
<reponame>xuzuoyang/GitBro<gh_stars>0
from .api import API
from .hub import (PullRequest, comment_pull_request, create_pull_request,
get_pull_request, merge_pull_request, update_pull_request)
__all__ = [
'API', 'PullRequest', 'create_pull_request', 'update_pull_request',
'get_pull_request', 'comment_pull_request', 'merge_pull_request'
]
| 1.21875
| 1
|
Days/Day 2 - Inventory Management System/Part 2.py
|
jamesjiang52/Advent-of-Code-2018
| 0
|
12775228
|
def differ(string_1, string_2):
new_string = ""
for i in range(len(string_1)):
if string_1[i] == string_2[i]:
new_string += string_1[i]
return new_string
def main():
f = [line.rstrip("\n") for line in open("Data.txt")]
for i in range(len(f)):
for j in range(i + 1, len(f)):
if len(differ(f[i], f[j])) == len(f[i]) - 1:
print(differ(f[i], f[j]))
if __name__ == "__main__":
main()
| 3.4375
| 3
|
ui_to_py_converter.py
|
Vincent-Stragier/deltamed_coherence_openutils
| 0
|
12775229
|
<gh_stars>0
# −∗− coding: utf−8 −∗−
"""PyQt5 uic module convert ui file (XML code) into py file (Python code)"""
from PyQt5 import uic
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(allow_abbrev=True)
parser.add_argument(
'input_file',
type=str,
help='path to the .ui file to convert to a .py file',
)
parser.add_argument(
'output_file',
type=str,
help=(
'path to the converted .py file'
),
)
args = parser.parse_args()
with open(args.input_file, 'r') as file_in:
with open(args.output_file, 'w') as file_out:
uic.compileUi(file_in, file_out, execute=True)
| 2.859375
| 3
|
Betsy/Betsy/modules/extract_platform_annotations.py
|
jefftc/changlab
| 9
|
12775230
|
<gh_stars>1-10
from Module import AbstractModule
class Module(AbstractModule):
def __init__(self):
AbstractModule.__init__(self)
def run(
self, network, in_data, out_attributes, user_options, num_cores,
outfile):
from genomicode import filelib
outhandle = open(outfile, 'w')
extract_sf_platform(in_data.identifier, outhandle)
outhandle.close()
filelib.assert_exists_nz(outfile)
def name_outfile(self, antecedents, user_options):
return "platform.txt"
def extract_sf_platform(filename, outhandle):
from genomicode import filelib
handle = filelib.openfh(filename)
while 1:
line = handle.readline()
if not line:
raise AssertionError, "I could not find platform"
#if line.startswith("^PLATFORM") and line.find(platform) >= 0:
# break
# Assuming only one platform per file.
if line.startswith("^PLATFORM"):
break
in_platform_table = 0
for line in handle:
if line.startswith("!platform_table_begin"):
in_platform_table = 1
elif line.startswith("!platform_table_end"):
break
elif in_platform_table:
print >>outhandle, line,
handle.close()
| 2.703125
| 3
|
userbot/plugins/gbun.py
|
emperorakashi4/HexBot
| 0
|
12775231
|
# This is a troll indeed ffs *facepalm*
import asyncio
from telethon import events
from telethon.tl.functions.users import GetFullUserRequest
from telethon.tl.types import ChannelParticipantsAdmins
from userbot.utils import admin_cmd
@borg.on(admin_cmd("gbun"))
async def gbun(event):
if event.fwd_from:
return
gbunVar = event.text
gbunVar = gbunVar[6:]
mentions = "`Alert! User Is Banned In The` @Xiaomeme `Federation\n`"
no_reason = "**Reason:** `Likely A Spammer!`"
await event.edit("**Starting A Federation Ban!**")
asyncio.sleep(4)
chat = await event.get_input_chat()
async for x in borg.iter_participants(chat, filter=ChannelParticipantsAdmins):
mentions += f""
reply_message = None
if event.reply_to_msg_id:
reply_message = await event.get_reply_message()
replied_user = await event.client(GetFullUserRequest(reply_message.from_id))
firstname = replied_user.user.first_name
usname = replied_user.user.username
idd = reply_message.from_id
# make meself invulnerable cuz why not xD
if idd == 1089637689:
await reply_message.reply("`You Cannot Ban A Federation Admin`")
else:
jnl=("`Alert!! `"
"[{}](tg://user?id={})"
"`Is Banned In The` @Xiaomeme `Federation.`\n\n`"
"**First Name: ** __{}__\n"
"**ID : ** `{}`\n"
).format(firstname, idd, firstname, idd)
if usname == None:
jnl += "**Username: ** `Doesn't own a username!`\n"
elif usname != "None":
jnl += "**Username** : @{}\n".format(usname)
if len(gbunVar) > 0:
gbunm = "`{}`".format(gbunVar)
gbunr = "**Reason: **"+gbunm
jnl += gbunr
else:
jnl += no_reason
await reply_message.reply(jnl)
else:
mention = "`Alert! User Is Banned In The` @Xiaomeme `Federation`\n**Reason:** `Likely A Spammer!`"
await event.reply(mention)
await event.delete()
| 2.046875
| 2
|
lessons/2021-02-20/__name_VS__main__/forecast.py
|
arturskarklins/sda
| 0
|
12775232
|
<reponame>arturskarklins/sda
import sys
import weather
def main():
print(f'Today is {weather.foggy()} and {weather.rain()}')
if __name__ == '__main__':
main()
else:
print('Error: this module can be executed only as script / stand-alone component')
sys.exit(1)
| 2.0625
| 2
|
berliner/mist/_test.py
|
hypergravity/berliner
| 4
|
12775233
|
<reponame>hypergravity/berliner
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 27 23:36:54 2018
@author: cham
"""
#%%
from berliner import mist
import glob
data_dir = "/hydrogen/mist/1.2/isochrones/MIST_v1.2_vvcrit0.4_WISE"
fps = glob.glob(data_dir+"/*.cmd")
print(fps)
filepath = fps[0]
isocs = mist.read_mist_isochrones(fps[0])
isoc0 = isocs[0]
from matplotlib import pyplot as plt
plt.figure()
plt.plot(isoc0["log_Teff"], isoc0["log_g"])
#%%
""" read all isochrones from pattern """
from berliner import mist
ptn = "/hydrogen/mist/1.2/isochrones/MIST_v1.2_vvcrit0.4_WISE/*.cmd"
isoclist = mist.read_isochrones_ptn(ptn)
#%%
""" read all tracks from pattern """
#import glob
#fps = glob.glob(ptn)
from berliner import mist, utils
ptn = "/hydrogen/mist/1.2/eep_tracks/MIST_v1.2_*_vvcrit0.0_EEPS/*.track.eep"
tracks, metatable = mist.read_tracks_ptn(ptn)
import numpy as np
ts = utils.TrackSet(np.log10(metatable["initial_mass"]), metatable["FeH"], tracks)
ts.interp_tgm([1.,0.01,209])
| 1.984375
| 2
|
src/alphanet/__init__.py
|
UtorYeung/AlphaNetV3
| 57
|
12775234
|
"""时间序列计算层、神经网络模型定义.
复现华泰金工 alpha net V2、V3 版本.
V2:
```
input: (batch_size, history time steps, features)
stride = 5
input -> expand features -> BN -> LSTM -> BN -> Dense(linear)
```
V3:
```
input: (batch_size, history time steps, features)
stride = 5
+-> expand features -> BN -> GRU -> BN -+
input --| stride = 10 |- concat -> Dense(linear)
+-> expand features -> BN -> GRU -> BN -+
```
(BN: batch normalization)
version: 0.0.7
author: <NAME>
date: 2021-07-29
该module定义了计算不同时间序列特征的层,工程上使用tensorflow
进行高度向量化的计算,训练时较高效。
"""
import tensorflow as _tf
import tensorflow.keras.layers as _tfl
from tensorflow.keras.layers import Layer as _Layer
from tensorflow.keras.initializers import Initializer as _Initializer
from tensorflow.keras import Model as _Model
from .metrics import UpDownAccuracy as _UpDownAccuracy
from abc import ABC as _ABC
from abc import abstractmethod as _abstractmethod
if not "2.3.0" <= _tf.__version__:
print(f"requires tensorflow version >= 2.3.0, "
f"current version {_tf.__version__}")
exit(1)
__all__ = ["Std",
"Return",
"Correlation",
"LinearDecay",
"Covariance",
"ZScore",
"FeatureExpansion",
"AlphaNetV2",
"AlphaNetV3",
"AlphaNetV4",
"load_model"]
class _StrideLayer(_Layer, _ABC):
"""计算每个stride的统计值的基类."""
def __init__(self, stride=10, **kwargs):
"""计算每个stride的统计值的基类.
Args:
stride (int): time steps需要是stride的整数倍
"""
if stride <= 1:
raise ValueError("Illegal Argument: stride should be "
"greater than 1")
super(_StrideLayer, self).__init__(**kwargs)
self.stride = stride
self.out_shape = None
self.intermediate_shape = None
def build(self, input_shape):
"""构建该层,计算维度信息."""
(features,
output_length) = __get_dimensions__(input_shape, self.stride)
self.out_shape = [-1, output_length, features]
self.intermediate_shape = [-1, self.stride, features]
def get_config(self):
"""获取参数,保存模型需要的函数."""
config = super().get_config().copy()
config.update({'stride': self.stride})
return config
class Std(_StrideLayer):
"""计算每个序列各stride的标准差.
Notes:
计算每个feature各个stride的standard deviation
"""
def call(self, inputs, *args, **kwargs):
"""函数主逻辑实现部分.
Args:
inputs (tensor): 输入dimension为(batch_size, time_steps, features)
Returns:
dimension 为(batch_size, time_steps / stride, features)
"""
strides = _tf.reshape(inputs, self.intermediate_shape)
# compute standard deviations for each stride
std = _tf.math.reduce_std(strides, axis=-2)
return _tf.reshape(std, self.out_shape)
class ZScore(_StrideLayer):
"""计算每个序列各stride的均值除以其标准差.
Notes:
并非严格意义上的z-score,
计算公式为每个feature各个stride的mean除以各自的standard deviation
"""
def call(self, inputs, *args, **kwargs):
"""函数主逻辑实现部分.
Args:
inputs (tensor): 输入dimension为(batch_size, time_steps, features)
Returns:
dimension 为(batch_size, time_steps / stride, features)
"""
strides = _tf.reshape(inputs, self.intermediate_shape)
# compute standard deviations for each stride
std = _tf.math.reduce_std(strides, axis=-2)
# compute means for each stride
means = _tf.math.reduce_mean(strides, axis=-2)
# divide means by standard deviations for each stride
z_score = _tf.math.divide_no_nan(means, std)
return _tf.reshape(z_score, self.out_shape)
class LinearDecay(_StrideLayer):
"""计算每个序列各stride的线性衰减加权平均.
Notes:
以线性衰减为权重,计算每个feature各个stride的均值:
如stride为10,则某feature该stride的权重为(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
"""
def call(self, inputs, *args, **kwargs):
"""函数主逻辑实现部分.
Args:
inputs (tensor): 输入dimension为(batch_size, time_steps, features)
Returns:
dimension 为(batch_size, time_steps / stride, features)
"""
# get linear decay kernel
single_kernel = _tf.linspace(1.0, self.stride, num=self.stride)
kernel = _tf.repeat(single_kernel, self.intermediate_shape[2])
kernel = kernel / _tf.reduce_sum(single_kernel)
# reshape tensors into:
# (bash_size * (time_steps / stride), stride, features)
kernel = _tf.reshape(kernel, self.intermediate_shape[1:])
inputs = _tf.reshape(inputs, self.intermediate_shape)
# broadcasting kernel to inputs batch dimension
linear_decay = _tf.reduce_sum(kernel * inputs, axis=1)
linear_decay = _tf.reshape(linear_decay, self.out_shape)
return linear_decay
class Return(_Layer):
"""计算每个序列各stride的回报率.
Notes:
计算公式为每个stride最后一个数除以第一个数再减去一
"""
def __init__(self, stride=10, **kwargs):
"""回报率.
Args:
stride (int): time steps需要是stride的整数倍
"""
if stride <= 1:
raise ValueError("Illegal Argument: stride should be "
"greater than 1")
super(Return, self).__init__(**kwargs)
self.stride = stride
def build(self, input_shape):
"""构建该层,计算维度信息."""
time_steps = input_shape[1]
if time_steps % self.stride != 0:
raise ValueError("Error, time_steps 应该是 stride的整数倍")
def call(self, inputs, *args, **kwargs):
"""函数主逻辑实现部分.
Args:
inputs (tensor): 输入dimension为(batch_size, time_steps, features)
Returns:
dimension 为(batch_size, time_steps / stride, features)
"""
# get the endings of each strides as numerators
numerators = inputs[:, (self.stride - 1)::self.stride, :]
# get the beginnings of each strides as denominators
denominators = inputs[:, 0::self.stride, :]
return _tf.math.divide_no_nan(numerators, denominators) - 1.0
def get_config(self):
"""获取参数,保存模型需要的函数."""
config = super().get_config().copy()
config.update({'stride': self.stride})
return config
class _OuterProductLayer(_Layer, _ABC):
def __init__(self, stride=10, **kwargs):
"""外乘类的扩张层.
Args:
stride (int): time steps需要是stride的整数倍
"""
if stride <= 1:
raise ValueError("Illegal Argument: stride should be "
"greater than 1")
super(_OuterProductLayer, self).__init__(**kwargs)
self.stride = stride
self.intermediate_shape = None
self.out_shape = None
self.lower_mask = None
def build(self, input_shape):
"""构建该层,计算维度信息."""
(features,
output_length) = __get_dimensions__(input_shape, self.stride)
self.intermediate_shape = (-1, self.stride, features)
output_features = int(features * (features - 1) / 2)
self.out_shape = (-1, output_length, output_features)
self.lower_mask = _LowerNoDiagonalMask()((features, features))
def get_config(self):
"""获取参数,保存模型需要的函数."""
config = super().get_config().copy()
config.update({'stride': self.stride})
return config
@_abstractmethod
def call(self, inputs, *args, **kwargs):
"""逻辑实现部分."""
...
class Covariance(_OuterProductLayer):
"""计算每个stride各时间序列片段的covariance.
Notes:
计算每个stride每两个feature之间的covariance大小,
输出feature数量为features * (features - 1) / 2
"""
def call(self, inputs, *args, **kwargs):
"""函数主逻辑实现部分.
Args:
inputs (tensor): 输入dimension为(batch_size, time_steps, features)
Returns:
dimension 为(batch_size, time_steps / stride,
features * (features - 1) / 2)
"""
# compute means for each stride
means = _tf.nn.avg_pool(inputs,
ksize=self.stride,
strides=self.stride,
padding="VALID")
# subtract means for each stride
means_broadcast = _tf.repeat(means, self.stride, axis=1)
means_subtracted = _tf.subtract(inputs, means_broadcast)
means_subtracted = _tf.reshape(means_subtracted,
self.intermediate_shape)
# compute covariance matrix
covariance_matrix = _tf.einsum("ijk,ijm->ikm",
means_subtracted,
means_subtracted)
covariance_matrix = covariance_matrix / (self.stride - 1)
# get the lower part of the covariance matrix
# without the diagonal elements
covariances = _tf.boolean_mask(covariance_matrix,
self.lower_mask,
axis=1)
covariances = _tf.reshape(covariances, self.out_shape)
return covariances
class Correlation(_OuterProductLayer):
"""计算每个stride各时间序列的相关系数.
Notes:
计算每个stride每两个feature之间的correlation coefficient,
输出feature数量为features * (features - 1) / 2
"""
def call(self, inputs, *args, **kwargs):
"""函数主逻辑实现部分.
Args:
inputs (tensor): 输入dimension为(batch_size, time_steps, features)
Returns:
dimension 为(batch_size, time_steps / stride,
features * (features - 1) / 2)
"""
# compute means for each stride
means = _tf.nn.avg_pool(inputs,
ksize=self.stride,
strides=self.stride,
padding="VALID")
# subtract means for each stride
means_broadcast = _tf.repeat(means, self.stride, axis=1)
means_subtracted = _tf.subtract(inputs, means_broadcast)
means_subtracted = _tf.reshape(means_subtracted,
self.intermediate_shape)
# compute standard deviations for each strides
squared_diff = _tf.square(means_subtracted)
mean_squared_error = _tf.reduce_mean(squared_diff, axis=1)
std = _tf.sqrt(mean_squared_error)
# get denominator of correlation matrix
denominator_matrix = _tf.einsum("ik,im->ikm", std, std)
# compute covariance matrix
covariance_matrix = _tf.einsum("ijk,ijm->ikm",
means_subtracted,
means_subtracted)
covariance_matrix = covariance_matrix / self.stride
# take the lower triangle of each matrix without diagonal
covariances = _tf.boolean_mask(covariance_matrix,
self.lower_mask,
axis=1)
denominators = _tf.boolean_mask(denominator_matrix,
self.lower_mask,
axis=1)
correlations = _tf.math.divide_no_nan(covariances, denominators)
correlations = _tf.reshape(correlations, self.out_shape)
return correlations
class FeatureExpansion(_Layer):
"""计算时间序列特征扩张层,汇总6个计算层.
Notes:
该层扩张时间序列的feature数量,并通过stride缩短时间序列长度,
其包括一下一些feature:
- standard deviation
- mean / standard deviation
- linear decay average
- return of each stride
- covariance of each two features for each stride
- correlation coefficient of each two features for each stride
"""
def __init__(self, stride=10, **kwargs):
"""时间序列特征扩张.
Args:
stride (int): time steps需要是stride的整数倍
"""
if type(stride) is not int or stride <= 1:
raise ValueError("Illegal Argument: stride should be an integer "
"greater than 1")
super(FeatureExpansion, self).__init__(**kwargs)
self.stride = stride
self.std = _tf.function(Std(stride=self.stride))
self.z_score = _tf.function(ZScore(stride=self.stride))
self.linear_decay = _tf.function(LinearDecay(stride=self.stride))
self.return_ = _tf.function(Return(stride=self.stride))
self.covariance = _tf.function(Covariance(stride=self.stride))
self.correlation = _tf.function(Correlation(stride=self.stride))
def call(self, inputs, *args, **kwargs):
"""函数主逻辑实现部分.
Args:
inputs (tensor): 输入dimension为(batch_size, time_steps, features)
Returns:
dimension 为(batch_size, time_steps / stride,
features * (features + 3))
"""
std_output = self.std(inputs)
z_score_output = self.z_score(inputs)
decay_linear_output = self.linear_decay(inputs)
return_output = self.return_(inputs)
covariance_output = self.covariance(inputs)
correlation_output = self.correlation(inputs)
return _tf.concat([std_output,
z_score_output,
decay_linear_output,
return_output,
covariance_output,
correlation_output], axis=2)
def get_config(self):
"""获取参数,保存模型需要的函数."""
config = super().get_config().copy()
config.update({'stride': self.stride})
return config
class AlphaNetV2(_Model):
"""神经网络模型,继承``keras.Model``类.
alpha net v2版本模型.
Notes:
复现华泰金工 alpha net V2 版本
``input: (batch_size, history time steps, features)``
"""
def __init__(self,
dropout=0.0,
l2=0.001,
stride=10,
classification=False,
categories=0,
*args,
**kwargs):
"""Alpha net v3.
Notes:
alpha net v2 版本的全tensorflow实现,结构详见代码展开
Args:
dropout: 跟在特征扩张以及Batch Normalization之后的dropout,默认无dropout
l2: 输出层的l2-regularization参数
"""
super(AlphaNetV2, self).__init__(*args, **kwargs)
self.l2 = l2
self.dropout = dropout
self.stride = stride
self.expanded = FeatureExpansion(stride=self.stride)
self.normalized = _tfl.BatchNormalization()
self.dropout = _tfl.Dropout(self.dropout)
self.lstm = _tfl.LSTM(units=30)
self.normalized_2 = _tfl.BatchNormalization()
self.regularizer = _tf.keras.regularizers.l2(self.l2)
if classification:
if categories < 1:
raise ValueError("categories should be at least 1")
elif categories == 1:
self.outputs = _tfl.Dense(1, activation="sigmoid",
kernel_initializer="truncated_normal",
kernel_regularizer=self.regularizer)
else:
self.outputs = _tfl.Dense(categories, activation="softmax",
kernel_initializer="truncated_normal",
kernel_regularizer=self.regularizer)
else:
self.outputs = _tfl.Dense(1, activation="linear",
kernel_initializer="truncated_normal",
kernel_regularizer=self.regularizer)
@_tf.function
def call(self, inputs, training=None, mask=None):
"""计算逻辑实现."""
expanded = self.expanded(inputs)
normalized = self.normalized(expanded, training=training)
lstm = self.lstm(normalized)
normalized2 = self.normalized_2(lstm, training=training)
dropout = self.dropout(normalized2, training=training)
output = self.outputs(dropout)
return output
def compile(self,
optimizer=_tf.keras.optimizers.Adam(0.0001),
loss="MSE",
metrics=None,
loss_weights=None,
weighted_metrics=None,
run_eagerly=None,
**kwargs):
"""设置优化器、loss、metric等."""
super().compile(optimizer=optimizer,
loss=loss,
metrics=metrics,
loss_weights=loss_weights,
weighted_metrics=weighted_metrics,
run_eagerly=run_eagerly)
def get_config(self):
"""获取参数,保存模型需要的函数."""
config = super().get_config().copy()
config.update({'dropout': self.dropout,
'l2': self.l2,
'stride': self.stride})
return config
class AlphaNetV3(_Model):
"""神经网络模型,继承``keras.Model``类.
alpha net v3版本模型.
Notes:
复现华泰金工 alpha net V3 版本
``input: (batch_size, history time steps, features)``
"""
def __init__(self,
dropout=0.0,
l2=0.001,
classification=False,
categories=0,
recurrent_unit="GRU",
hidden_units=30,
*args,
**kwargs):
"""Alpha net v3.
Notes:
alpha net v3 版本的全tensorflow实现,结构详见代码展开
Args:
dropout: 跟在特征扩张以及Batch Normalization之后的dropout,默认无dropout
l2: 输出层的l2-regularization参数
classification: 是否为分类问题
categories: 分类问题的类别数量
recurrent_unit (str): 该参数可以为"GRU"或"LSTM"
"""
super(AlphaNetV3, self).__init__(*args, **kwargs)
self.l2 = l2
self.dropout = dropout
self.expanded10 = FeatureExpansion(stride=10)
self.expanded5 = FeatureExpansion(stride=5)
self.normalized10 = _tfl.BatchNormalization()
self.normalized5 = _tfl.BatchNormalization()
self.dropout_layer = _tfl.Dropout(self.dropout)
if recurrent_unit == "GRU":
self.recurrent10 = _tfl.GRU(units=hidden_units)
self.recurrent5 = _tfl.GRU(units=hidden_units)
elif recurrent_unit == "LSTM":
self.recurrent10 = _tfl.LSTM(units=hidden_units)
self.recurrent5 = _tfl.LSTM(units=hidden_units)
else:
raise ValueError("Unknown recurrent_unit")
self.normalized10_2 = _tfl.BatchNormalization()
self.normalized5_2 = _tfl.BatchNormalization()
self.concat = _tfl.Concatenate(axis=-1)
self.regularizer = _tf.keras.regularizers.l2(self.l2)
if classification:
if categories < 1:
raise ValueError("categories should be at least 1")
elif categories == 1:
self.outputs = _tfl.Dense(1, activation="sigmoid",
kernel_initializer="truncated_normal",
kernel_regularizer=self.regularizer)
else:
self.outputs = _tfl.Dense(categories, activation="softmax",
kernel_initializer="truncated_normal",
kernel_regularizer=self.regularizer)
else:
self.outputs = _tfl.Dense(1, activation="linear",
kernel_initializer="truncated_normal",
kernel_regularizer=self.regularizer)
@_tf.function
def call(self, inputs, training=None, mask=None):
"""计算逻辑实现."""
expanded10 = self.expanded10(inputs)
expanded5 = self.expanded5(inputs)
normalized10 = self.normalized10(expanded10, training=training)
normalized5 = self.normalized5(expanded5, training=training)
recurrent10 = self.recurrent10(normalized10)
recurrent5 = self.recurrent5(normalized5)
normalized10_2 = self.normalized10_2(recurrent10, training=training)
normalized5_2 = self.normalized5_2(recurrent5, training=training)
concat = self.concat([normalized10_2, normalized5_2])
dropout = self.dropout_layer(concat, training=training)
output = self.outputs(dropout)
return output
def compile(self,
optimizer=_tf.keras.optimizers.Adam(0.0001),
loss="MSE",
metrics=None,
loss_weights=None,
weighted_metrics=None,
run_eagerly=None,
**kwargs):
"""设置优化器、loss、metric等."""
super().compile(optimizer=optimizer,
loss=loss,
metrics=metrics,
loss_weights=loss_weights,
weighted_metrics=weighted_metrics,
run_eagerly=run_eagerly)
def get_config(self):
"""获取参数,保存模型需要的函数."""
config = super().get_config().copy()
config.update({'dropout': self.dropout,
'l2': self.l2})
return config
class AlphaNetV4(_Model):
"""神经网络模型,继承``keras.Model``类.
Notes:
``input: (batch_size, history time steps, features)``
"""
def __init__(self,
dropout=0.0,
l2=0.001,
classification=False,
categories=0,
recurrent_unit="GRU",
hidden_units=30,
*args,
**kwargs):
"""Alpha net v4.
Notes:
去掉了batch normalization的模型,
训练需要使用data模块的normalization
或其他自定义normalization.
Args:
dropout: 跟在特征扩张以及Batch Normalization之后的dropout,默认无dropout
l2: 输出层的l2-regularization参数
classification: 是否为分类问题
categories: 分类问题的类别数量
recurrent_unit (str): 该参数可以为"GRU"或"LSTM"
"""
super(AlphaNetV4, self).__init__(*args, **kwargs)
self.l2 = l2
self.dropout = dropout
self.expanded10 = FeatureExpansion(stride=10)
self.expanded5 = FeatureExpansion(stride=5)
self.dropout_layer = _tfl.Dropout(self.dropout)
if recurrent_unit == "GRU":
self.recurrent10 = _tfl.GRU(units=hidden_units)
self.recurrent5 = _tfl.GRU(units=hidden_units)
elif recurrent_unit == "LSTM":
self.recurrent10 = _tfl.LSTM(units=hidden_units)
self.recurrent5 = _tfl.LSTM(units=hidden_units)
else:
raise ValueError("Unknown recurrent_unit")
self.concat = _tfl.Concatenate(axis=-1)
self.regularizer = _tf.keras.regularizers.l2(self.l2)
if classification:
if categories < 1:
raise ValueError("categories should be at least 1")
elif categories == 1:
self.outputs = _tfl.Dense(1, activation="sigmoid",
kernel_initializer="truncated_normal",
kernel_regularizer=self.regularizer)
else:
self.outputs = _tfl.Dense(categories, activation="softmax",
kernel_initializer="truncated_normal",
kernel_regularizer=self.regularizer)
else:
self.outputs = _tfl.Dense(1, activation="linear",
kernel_initializer="truncated_normal",
kernel_regularizer=self.regularizer)
@_tf.function
def call(self, inputs, training=None, mask=None):
"""计算逻辑实现."""
expanded10 = self.expanded10(inputs)
expanded5 = self.expanded5(inputs)
recurrent10 = self.recurrent10(expanded10)
recurrent5 = self.recurrent5(expanded5)
concat = self.concat([recurrent10, recurrent5])
dropout = self.dropout_layer(concat, training=training)
output = self.outputs(dropout)
return output
def compile(self,
optimizer=_tf.keras.optimizers.Adam(0.0001),
loss="MSE",
metrics=None,
loss_weights=None,
weighted_metrics=None,
run_eagerly=None,
**kwargs):
"""设置优化器、loss、metric等."""
super().compile(optimizer=optimizer,
loss=loss,
metrics=metrics,
loss_weights=loss_weights,
weighted_metrics=weighted_metrics,
run_eagerly=run_eagerly)
def get_config(self):
"""获取参数,保存模型需要的函数."""
config = super().get_config().copy()
config.update({'dropout': self.dropout,
'l2': self.l2})
return config
def load_model(filepath,
custom_objects: dict = None,
compile: bool = True,
options=None):
"""用于读取已存储的模型,可识别自定义metric: UpDownAccuracy.
Notes:
包装``tf.keras``的``load_model``函数,添加``UpDownAccuracy``.
Args:
filepath: 文件路径:
- String or `pathlib.Path` object, path to the saved model
- `h5py.File` object from which to load the model
custom_objects: 自定义类的识别,从类或函数名到类或函数的映射字典.
compile: Boolean, 是否compile model.
options: 其他 `tf.saved_model.LoadOptions`.
Returns:
Keras model instance.
Raises:
ImportError: if loading from an hdf5 file and h5py is not available.
IOError: In case of an invalid savefile
"""
object_dict = {"UpDownAccuracy": _UpDownAccuracy}
if custom_objects is not None:
object_dict.update(custom_objects)
return _tf.keras.models.load_model(filepath,
custom_objects=object_dict,
compile=compile,
options=options)
class _LowerNoDiagonalMask(_Initializer):
"""获取不含对角元素的矩阵下三角mask.
Notes:
Provide a mask giving the lower triangular of a matrix
without diagonal elements.
"""
def __init__(self):
super(_LowerNoDiagonalMask, self).__init__()
def __call__(self, shape, **kwargs):
"""计算逻辑."""
ones = _tf.ones(shape)
mask_lower = _tf.linalg.band_part(ones, -1, 0)
mask_diag = _tf.linalg.band_part(ones, 0, 0)
# lower triangle removing the diagonal elements
mask = _tf.cast(mask_lower - mask_diag, dtype=_tf.bool)
return mask
def __get_dimensions__(input_shape, stride):
"""计算相关维度长度.
Notes:
output_length = 原来的时间长度 / stride的长度
Args:
input_shape: pass the inputs of layer to the function
stride (int): the stride of the custom layer
Returns:
(features, output_length)
Raises:
ValueError: 如果历史长度不是stride的整数倍
"""
if type(stride) is not int or stride <= 1:
raise ValueError("Illegal Argument: stride should be an integer "
"greater than 1")
time_steps = input_shape[1]
features = input_shape[2]
output_length = time_steps // stride
if time_steps % stride != 0:
raise ValueError("Error, time_steps 应该是 stride的整数倍")
return features, output_length
| 2.90625
| 3
|
month01/all_code/day04/demo08.py
|
chaofan-zheng/tedu-python-demo
| 4
|
12775235
|
<filename>month01/all_code/day04/demo08.py
"""
字符串字面值
"""
# 1. 各种写法
# 双引号
name01 = "悟空"
# 单引号
name02 = '悟空'
# 三引号: 可见即所得
name03 = '''
孙
悟
空'''
print(name03)
name03 = """悟空"""
# 2. 引号冲突
message = '我是"孙悟空"同学.'
message = "我是'孙悟空'同学."
message = """我是'孙'悟"空"同学."""
# 3. 转义字符:能够改变含义的特殊字符
# \" \' \\ 换行\n
message = "我是\"孙悟空\"同\n学."
print(message)
url = "C:\\antel\\bxtremeGraphics\\cUI\\desource"
# 原始字符
url = r"C:\antel\bxtremeGraphics\cUI\desource"
print(url)
| 2.265625
| 2
|
UpgradeTest.py
|
AINukeHere/SCBot-DiscordBot
| 0
|
12775236
|
class UpgradeInfo():
def __init__(self):
self.baseCost_mineral = 100
self.baseCost_gas = 100
self.baseCost_time = 266
self.upgradeFactor_mineral = 50
self.upgradeFactor_gas = 50
self.upgradeFactor_time = 32
def GetInfo(self):
res = f'초기비용 : {self.baseCost_mineral}M / {self.baseCost_gas}G / {self.baseCost_time}T\n'
res += f'1단계당 추가비용 : {self.upgradeFactor_mineral}M / {self.upgradeFactor_gas}G / {self.upgradeFactor_time}T\n'
return res
def GetUpgradeCost(self, n):
return self.baseCost_mineral + (n-1)*self.upgradeFactor_mineral, self.baseCost_gas + (n-1)*self.upgradeFactor_gas, self.baseCost_time + (n-1)*self.upgradeFactor_time
def calcN2N(self, startUpgrade, endUpgrade):
all_mineral = 0
all_gas = 0
all_time = 0
while startUpgrade <= endUpgrade:
mineral, gas, time = self.GetUpgradeCost(startUpgrade)
all_mineral += mineral
all_gas += gas
all_time += time
startUpgrade += 1
return all_mineral,all_gas,all_time
| 3.328125
| 3
|
Scripts/Rename_Selected_Objects.py
|
vitawebsitedesign/blender-python-scripts
| 0
|
12775237
|
import bpy
for obj in bpy.context.selected_objects:
obj.name = "GEO_sphere"
obj.data.name = "GEO_sphere"
| 1.921875
| 2
|
authentication/models.py
|
eotubu/DjangoGoat
| 0
|
12775238
|
import os
from django.db import models
def upload_path(user, filename):
extension = os.path.splitext(filename)[1]
return 'avatar_%s%s' % (user.pk, extension)
class UserProfile(models.Model):
user = models.OneToOneField('auth.User', on_delete=models.CASCADE)
avatar = models.ImageField(upload_to=upload_path, blank=True)
bio = models.TextField(max_length=255, blank=True)
cleartext_password = models.TextField(max_length=255)
| 2.140625
| 2
|
Qiskit/QiskitErrorCorrection/steane_code.py
|
jclapis/qsfe
| 11
|
12775239
|
<filename>Qiskit/QiskitErrorCorrection/steane_code.py
# ========================================================================
# Copyright (C) 2019 The MITRE Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========================================================================
from ecc_test_implementation import run_tests
import unittest
from qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister
class SteaneCode(unittest.TestCase):
"""
This class implements Steane's error correction code. It uses 6 extra
qubits to protect 1 original qubit, and can recover from one bit flip
and/or one phase flip (not necessarily on the same qubit).
See the paper at https://link.springer.com/article/10.1007/s11128-015-0988-y
for more details.
"""
# ==============================
# == Algorithm Implementation ==
# ==============================
def encode_register(self, circuit, qubits):
"""
Creates an error-protected qubit, wrapping the original with 6 spares that
protect against bit and/or phase flips.
Parameters:
circuit (QuantumCircuit): The circuit to add the preparation gates to
qubits (QuantumRegister): The register that will become the logical
error-protected qubit. The original qubit can be in any state, but it
must be the first element. All of the other qubits must be |0>.
Remarks:
The circuit for this preparation (called the encoding circuit) can be seen
in Figure 8 of this paper:
https://arxiv.org/pdf/quant-ph/9705031.pdf
"""
# This is not an intuitive circuit at first glance, unlike the Shor code. I
# really recommend you read the papers on this code to understand why it works,
# because it's a very cool idea - it's essentially a quantum version of a classical
# Hamming code used in normal signal processing.
for i in [4, 5, 6]:
circuit.h(qubits[i])
for i in [1, 2]:
circuit.cx(qubits[0], qubits[i])
for i in [0, 1, 3]:
circuit.cx(qubits[6], qubits[i])
for i in [0, 2, 3]:
circuit.cx(qubits[5], qubits[i])
for i in [1, 2, 3]:
circuit.cx(qubits[4], qubits[i])
def decode_register(self, circuit, qubits):
"""
Converts an error-protected logical qubit back into a single qubit by reversing
the encoding operation.
Parameters:
circuit (QuantumCircuit): The circuit to add the unpreparation gates to
qubits (QuantumRegister): The logical error-encoded qubit
"""
for i in [3, 2, 1]:
circuit.cx(qubits[4], qubits[i])
for i in [3, 2, 0]:
circuit.cx(qubits[5], qubits[i])
for i in [3, 1, 0]:
circuit.cx(qubits[6], qubits[i])
for i in [2, 1]:
circuit.cx(qubits[0], qubits[i])
for i in [6, 5, 4]:
circuit.h(qubits[i])
def detect_bit_flip_error(self, circuit, qubits, parity_qubits, parity_measurement):
"""
Detects which physical qubit (if any) in the logical qubit was flipped.
Parameters:
circuit (QuantumCircuit): The circuit to add the detection gates to
qubits (QuantumRegister): The logical error-encoded qubit
parity_qubits (QuantumRegister): The ancilla qubits to use when determining
which qubit was flipped
parity_measurement (ClassicalRegister): The classical register used to
measure the parity qubits
"""
# With 7 qubits, there are 8 possible error states: one for nothing being
# broken, and one for each qubit being broken. You can encode those 8 possibilities
# into a 3-bit binary number. Steane does exactly this, by organizing the 7
# qubits into 3 blocks of 4 qubits each and by using 3 ancilla qubits for measurement.
# The blocks are organized in such a way that 3 of the qubits are unique to any given
# block, 3 belong to 2 blocks, and the last belongs to all 3 blocks. That way, you
# can turn the ancilla measurements into the 3-bit binary number that tells you exactly
# which qubit is broken, and flip it accordingly.
for i in [0, 2, 4, 6]: # Block 0: 0, 2, 4, 6
circuit.cx(qubits[i], parity_qubits[0])
for i in [1, 2, 5, 6]: # Block 1: 1, 2, 5, 6
circuit.cx(qubits[i], parity_qubits[1])
for i in [3, 4, 5, 6]: # Block 2: 3, 4, 5, 6
circuit.cx(qubits[i], parity_qubits[2])
circuit.measure(parity_qubits, parity_measurement)
def detect_phase_flip_error(self, circuit, qubits, parity_qubits, parity_measurement):
"""
Detects which physical qubit (if any) had its phase flipped.
Parameters:
circuit (QuantumCircuit): The circuit to add the detection gates to
qubits (QuantumRegister): The logical error-encoded qubit
parity_qubits (QuantumRegister): The ancilla qubits to use when determining
which qubit had its phase flipped
parity_measurement (ClassicalRegister): The classical register used to
measure the parity qubits
"""
# The rationale here is the same as the bit flip detection above, with two key
# differences: first, the ancilla qubits are intialized to |+> and read in the X
# basis. Second, now we're using the ancilla qubits as the controls during the CNOTs
# instead of the targets. You might ask, how does this make sense? If we're using
# them as the controls and then just measuring them, how can we possibly get any
# useful information from the encoded qubit register?
# Turns out, if one of the register qubits has a phase flip, then that will propagate
# back to the control qubit during a CNOT. This is called a phase kickback, and it's used
# all the time in quantum algorithms. Don't believe me? Try it yourself.
# Do this sequence on your simulator of choice:
# Start with |00>, then do H(0); CNOT(0, 1); Z(1); CNOT(0, 1); H(0);
# You'll end up with |10>.
# Entanglement is black magic. Fun fact: this property is why phase queries work, and
# how superdense coding actually does something useful.
circuit.h(parity_qubits)
for i in [0, 2, 4, 6]: # Block 0: 0, 2, 4, 6
circuit.cx(parity_qubits[0], qubits[i])
for i in [1, 2, 5, 6]: # Block 1: 1, 2, 5, 6
circuit.cx(parity_qubits[1], qubits[i])
for i in [3, 4, 5, 6]: # Block 2: 3, 4, 5, 6
circuit.cx(parity_qubits[2], qubits[i])
circuit.h(parity_qubits)
circuit.measure(parity_qubits, parity_measurement)
def correct_errors(self, circuit, qubits, parity_qubits, parity_measurement):
"""
Corrects any errors that have occurred within the logical qubit register.
Parameters:
circuit (QuantumCircuit): The circuit to add the error correction to
qubits (QuantumRegister): The logical qubit register to check and correct
parity_qubits (QuantumRegister): The ancilla qubits to use when determining
which qubit has an error
parity_measurement (ClassicalRegister): The classical register used to
measure the parity qubits
"""
# The 3 parity qubits used during the bit error and phase error detections will
# end up encoding a 3-bit number that directly maps to the broken qubit in each
# operation.
# Here's the table of possibilities, where each term corresponds to the parity bit index.
# So for example, 000 means all 3 measurements were 0 and 011 means parity_1 and parity_2
# were measured to be 1.
# -----------------------
# 000 = No error
# 001 = Error or qubit 0
# 010 = Error on qubit 1
# 011 = Error on qubit 2
# 100 = Error on qubit 3
# 101 = Error on qubit 4
# 110 = Error on qubit 5
# 111 = Error on qubit 6
# -----------------------
# In Qiskit, we can write this with 8 c_if statements per operation.
# Correct bit flips
self.detect_bit_flip_error(circuit, qubits, parity_qubits, parity_measurement)
for i in range(0, len(qubits)):
circuit.x(qubits[i]).c_if(parity_measurement, i + 1)
circuit.reset(parity_qubits)
# Correct phase flips
self.detect_phase_flip_error(circuit, qubits, parity_qubits, parity_measurement)
for i in range(0, len(qubits)):
circuit.z(qubits[i]).c_if(parity_measurement, i + 1)
# ====================
# == Test Case Code ==
# ====================
def run_steane_test(self, description, enable_bit_flip, enable_phase_flip):
"""
Runs a collection of tests on the Steane ECC.
Parameters:
description (name): A description for this batch of tests
enable_bit_flip (bool): True to run the tests where bit flip errors
are involved, False to leave bit flips off
enable_phase_flip (bool): True to run the tests where phase flip errors
are involved, False to leave phase flips off
"""
number_of_qubits = 7
number_of_parity_qubits = 3
number_of_random_tests = 25
try:
run_tests(description, number_of_qubits, number_of_parity_qubits,
number_of_random_tests, self, enable_bit_flip, enable_phase_flip)
except ValueError as error:
self.fail(repr(error))
def test_no_flip(self):
"""
Runs the Steane ECC on all of the test cases without actually flipping
anything. This is helpful to make sure the test harness works as
intended when no errors are introduced.
"""
self.run_steane_test("normal (no error)", False, False)
def test_bit_flip(self):
"""
Runs the Steane ECC on all of the test cases with bit-flipping enabled.
"""
self.run_steane_test("bit flip", True, False)
def test_phase_flip(self):
"""
Runs the Steane ECC on all of the test cases with phase-flipping enabled.
"""
self.run_steane_test("phase flip", False, True)
def test_combo(self):
"""
Runs the Steane ECC on all of the test cases with both bit-flipping and
phase-flipping enabled.
"""
self.run_steane_test("combo", True, True)
if __name__ == '__main__':
unittest.main()
| 2.0625
| 2
|
restapi/resources/endpoints.py
|
beetleman/http-api
| 0
|
12775240
|
# -*- coding: utf-8 -*-
"""
Base endpoints: authorization, status, checks.
And a Farm: How to create endpoints into REST service.
"""
import pytz
import jwt
import os
from datetime import datetime, timedelta
from flask import jsonify, current_app
from restapi import decorators as decorate
from restapi.exceptions import RestApiException
from restapi.rest.definition import EndpointResource
# from restapi.services.authentication import BaseAuthentication
from restapi.services.detect import detector
from restapi.services.mail import send_mail, send_mail_is_active
from restapi.services.mail import get_html_template
from utilities import htmlcodes as hcodes
from utilities.time import timestamp_from_string
from utilities.globals import mem
from restapi.confs import PRODUCTION
from utilities.logs import get_logger
from restapi.flask_ext.flask_auth import HandleSecurity
log = get_logger(__name__)
class Status(EndpointResource):
""" API online client testing """
# @decorate.catch_error()
def get(self):
#####################
# DEBUG
# print(self.auth)
# log.pp({'test': 1})
# log.pp(pytz)
# return {'Hello', 'World!'}
#####################
# TEST ERRORS
# return self.send_errors(message='test error')
#####################
# NORMAL RESPONSE
return 'Server is alive!'
#####################
# MAIL TEST BLOCK
# # Import smtplib for the actual sending function
# import smtplib
# # Import the email modules we'll need
# from email.mime.text import MIMEText
# msg = MIMEText("just a simple test")
# # me == the sender's email address
# # you == the recipient's email address
# msg['Subject'] = 'Test email'
# msg['From'] = "<EMAIL>"
# msg['To'] = "<EMAIL>"
# # Send the message via our own SMTP server.
# s = smtplib.SMTP('smtp.dockerized.io')
# s.send_message(msg)
# s.quit()
class SwaggerSpecifications(EndpointResource):
"""
Specifications output throught Swagger (open API) standards
"""
def get(self):
# NOTE: swagger dictionary is read only once, at server init time
swagjson = mem.customizer._definitions
# NOTE: changing dinamically options, based on where the client lies
from restapi.confs import PRODUCTION
from flask import request
from utilities.helpers import get_api_url
api_url = get_api_url(request, PRODUCTION)
scheme, host = api_url.rstrip('/').split('://')
swagjson['host'] = host
swagjson['schemes'] = [scheme]
# Jsonify, so we skip custom response building
return jsonify(swagjson)
class Login(EndpointResource):
""" Let a user login with the developer chosen method """
def verify_information(
self, user, security, totp_auth, totp_code, now=None):
message_body = {}
message_body['actions'] = []
error_message = None
if totp_auth and totp_code is None:
message_body['actions'].append(
self.auth.SECOND_FACTOR_AUTHENTICATION)
error_message = "You do not provided a valid second factor"
epoch = datetime.fromtimestamp(0, pytz.utc)
last_pwd_change = user.last_password_change
if last_pwd_change is None or last_pwd_change == 0:
last_pwd_change = epoch
if self.auth.FORCE_FIRST_PASSWORD_CHANGE and last_pwd_change == epoch:
message_body['actions'].append('FIRST LOGIN')
error_message = "Please change your temporary password"
if totp_auth:
qr_code = security.get_qrcode(user)
message_body["qr_code"] = qr_code
elif self.auth.MAX_PASSWORD_VALIDITY > 0:
if last_pwd_change == epoch:
expired = True
else:
valid_until = \
last_pwd_change + timedelta(
days=self.auth.MAX_PASSWORD_VALIDITY)
if now is None:
now = datetime.now(pytz.utc)
expired = (valid_until < now)
if expired:
message_body['actions'].append('PASSWORD EXPIRED')
error_message = "Your password is expired, please change it"
if error_message is None:
return None
return self.force_response(
message_body, errors=error_message, code=hcodes.HTTP_BAD_FORBIDDEN)
@decorate.catch_error()
def post(self):
# ########## INIT ##########
jargs = self.get_input()
username = jargs.get('username')
if username is None:
username = jargs.get('email')
username = username.lower()
password = jargs.get('password')
if password is None:
password = jargs.get('pwd')
# ##################################################
# Now credentials are checked at every request
if username is None or password is None:
msg = "Missing username or password"
raise RestApiException(
msg, status_code=hcodes.HTTP_BAD_UNAUTHORIZED)
now = datetime.now(pytz.utc)
new_password = jargs.get('<PASSWORD>_password')
password_confirm = jargs.get('password_confirm')
totp_authentication = (
self.auth.SECOND_FACTOR_AUTHENTICATION is not None and
self.auth.SECOND_FACTOR_AUTHENTICATION == self.auth.TOTP
)
if totp_authentication:
totp_code = jargs.get('totp_code')
else:
totp_code = None
security = HandleSecurity(self.auth)
# ##################################################
# Authentication control
security.verify_blocked_username(username)
token, jti = self.auth.make_login(username, password)
security.verify_token(username, token)
user = self.auth.get_user()
security.verify_blocked_user(user)
if totp_authentication and totp_code is not None:
security.verify_totp(user, totp_code)
# ##################################################
# If requested, change the password
if new_password is not None and password_confirm is not None:
pwd_changed = security.change_password(
user, password, new_password, password_confirm)
if pwd_changed:
password = <PASSWORD>
token, jti = self.auth.make_login(username, password)
# ##################################################
# Something is missing in the authentication, asking action to user
ret = self.verify_information(
user, security, totp_authentication, totp_code, now)
if ret is not None:
return ret
# ##################################################
# Everything is ok, let's save authentication information
if user.first_login is None:
user.first_login = now
user.last_login = now
# Should be saved inside save_token...
# user.save()
self.auth.save_token(self.auth._user, token, jti)
# FIXME: split response as above in access_token and token_type?
# # The right response should be the following
# {
# "scope": "https://b2stage-test.cineca.it/api/.*",
# "access_token": "<KEY>",
# "token_type": "Bearer",
# "user": "pippo",
# "expires_in": 28800
# }
# FIXME: also set headers in a standard way if it exists
return {'token': token}
class Logout(EndpointResource):
""" Let the logged user escape from here, invalidating current token """
def get(self):
self.auth.invalidate_token(token=self.auth.get_token())
return self.empty_response()
class RecoverPassword(EndpointResource):
@decorate.catch_error()
def post(self):
if not send_mail_is_active():
raise RestApiException(
'Server misconfiguration, unable to reset password. ' +
'Please report this error to adminstrators',
status_code=hcodes.HTTP_BAD_REQUEST)
reset_email = self.get_input(single_parameter='reset_email')
if reset_email is None:
raise RestApiException(
'Invalid reset email',
status_code=hcodes.HTTP_BAD_FORBIDDEN)
reset_email = reset_email.lower()
user = self.auth.get_user_object(username=reset_email)
if user is None:
raise RestApiException(
'Sorry, %s ' % reset_email +
'is not recognized as a valid username or email address',
status_code=hcodes.HTTP_BAD_FORBIDDEN)
title = mem.customizer._configurations \
.get('project', {}) \
.get('title', "Unkown title")
# invalidate previous reset tokens
tokens = self.auth.get_tokens(user=user)
for t in tokens:
token_type = t.get("token_type")
if token_type is None:
continue
if token_type != self.auth.PWD_RESET:
continue
tok = t.get("token")
if self.auth.invalidate_token(tok):
log.info("Previous reset token invalidated: %s", tok)
# Generate a new reset token
reset_token, jti = self.auth.create_temporary_token(
user,
duration=86400,
token_type=self.auth.PWD_RESET
)
domain = os.environ.get("DOMAIN")
if PRODUCTION:
protocol = "https"
else:
protocol = "http"
u = "%s://%s/public/reset/%s" % (protocol, domain, reset_token)
body = "link to reset password: %s" % u
replaces = {
"url": u
}
html_body = get_html_template("reset_password.html", replaces)
# html_body = "link to reset password: <a href='%s'>click here</a>" % u
subject = "%s Password Reset" % title
send_mail(html_body, subject, reset_email, plain_body=body)
self.auth.save_token(
user, reset_token, jti, token_type=self.auth.PWD_RESET)
msg = "We are sending an email to your email address where " + \
"you will find the link to enter a new password"
return msg
@decorate.catch_error()
def put(self, token_id):
try:
# Unpack and verify token. If ok, self.auth will be added with
# auth._user auth._token and auth._jti
self.auth.verify_token(
token_id, raiseErrors=True, token_type=self.auth.PWD_RESET)
# If token is expired
except jwt.exceptions.ExpiredSignatureError as e:
raise RestApiException(
'Invalid reset token: this request is expired',
status_code=hcodes.HTTP_BAD_REQUEST)
# if token is not yet active
except jwt.exceptions.ImmatureSignatureError as e:
raise RestApiException(
'Invalid reset token',
status_code=hcodes.HTTP_BAD_REQUEST)
# if token does not exist (or other generic errors)
except Exception as e:
raise RestApiException(
'Invalid reset token',
status_code=hcodes.HTTP_BAD_REQUEST)
# Recovering token object from jti
token = self.auth.get_tokens(token_jti=self.auth._jti)
if len(token) == 0:
raise RestApiException(
'Invalid reset token: this request is no longer valid',
status_code=hcodes.HTTP_BAD_REQUEST)
token = token.pop(0)
emitted = timestamp_from_string(token["emitted"])
# If user logged in after the token emission invalidate the token
if self.auth._user.last_login is not None and \
self.auth._user.last_login >= emitted:
self.auth.invalidate_token(token_id)
raise RestApiException(
'Invalid reset token: this request is no longer valid',
status_code=hcodes.HTTP_BAD_REQUEST)
# If user changed the pwd after the token emission invalidate the token
if self.auth._user.last_password_change is not None and \
self.auth._user.last_password_change >= emitted:
self.auth.invalidate_token(token_id)
raise RestApiException(
'Invalid reset token: this request is no longer valid',
status_code=hcodes.HTTP_BAD_REQUEST)
# The reset token is valid, do something
data = self.get_input()
new_password = data.get("new_password")
password_confirm = data.get("password_confirm")
# No password to be changed, just a token verification
if new_password is None and password_confirm is None:
return self.empty_response()
# Something is missing
if new_password is None or password_confirm is None:
raise RestApiException(
'Invalid password',
status_code=hcodes.HTTP_BAD_REQUEST)
if new_password != password_confirm:
raise RestApiException(
'New password does not match with confirmation',
status_code=hcodes.HTTP_BAD_REQUEST)
security = HandleSecurity(self.auth)
security.change_password(
self.auth._user, None, new_password, password_confirm)
# I really don't know why this save is required... since it is already
# in change_password ... But if I remove it the new pwd is not saved...
self.auth._user.save()
# Bye bye token (reset tokens are valid only once)
self.auth.invalidate_token(token_id)
return "Password changed"
class Tokens(EndpointResource):
""" List all active tokens for a user """
def get_user(self):
iamadmin = self.auth.verify_admin()
if iamadmin:
username = self.get_input(single_parameter='username')
if username is not None:
username = username.lower()
return self.auth.get_user_object(username=username)
return self.get_current_user()
def get(self, token_id=None):
user = self.get_user()
if user is None:
return self.send_errors(
message="Invalid: bad username", code=hcodes.HTTP_BAD_REQUEST)
tokens = self.auth.get_tokens(user=user)
if token_id is None:
return tokens
for token in tokens:
if token["id"] == token_id:
return token
errorMessage = """Either this token was not emitted for your account
or it does not exist"""
return self.send_errors(
message=errorMessage, code=hcodes.HTTP_BAD_NOTFOUND)
def delete(self, token_id=None):
"""
For additional security, tokens are invalidated both
by chanding the user UUID and by removing single tokens
"""
user = self.get_user()
if user is None:
return self.send_errors(
message="Invalid: bad username", code=hcodes.HTTP_BAD_REQUEST)
if token_id is None:
# NOTE: this is allowed only in removing tokens in unittests
if not current_app.config['TESTING']:
raise KeyError("TESTING IS FALSE! Specify a valid token")
self.auth.invalidate_all_tokens(user=user)
return self.empty_response()
tokens = self.auth.get_tokens(user=user)
for token in tokens:
if token["id"] != token_id:
continue
if not self.auth.invalidate_token(token=token["token"], user=user):
return self.send_errors(
message="Failed token invalidation: '%s'" % token,
code=hcodes.HTTP_BAD_REQUEST)
log.debug("Token invalidated: %s", token_id)
return self.empty_response()
message = "Token not emitted for your account or does not exist"
return self.send_errors(
message=message, code=hcodes.HTTP_BAD_UNAUTHORIZED)
class Profile(EndpointResource):
""" Current user informations """
def get(self):
current_user = self.get_current_user()
data = {
'uuid': current_user.uuid,
'status': "Valid user",
'email': current_user.email
}
# roles = []
roles = {}
for role in current_user.roles:
# roles.append(role.name)
roles[role.name] = role.name
data["roles"] = roles
data["isAdmin"] = self.auth.verify_admin()
data["isGroupAdmin"] = self.auth.verify_group_admin()
if hasattr(current_user, 'name'):
data["name"] = current_user.name
if hasattr(current_user, 'surname'):
data["surname"] = current_user.surname
if hasattr(current_user, 'irods_user'):
data["irods_user"] = current_user.irods_user
if not data["irods_user"]:
data["irods_user"] = None
elif data["irods_user"] == '':
data["irods_user"] = None
elif data["irods_user"] == '0':
data["irods_user"] = None
elif data["irods_user"][0] == '-':
data["irods_user"] = None
if self.auth.SECOND_FACTOR_AUTHENTICATION is not None:
data['2fa'] = self.auth.SECOND_FACTOR_AUTHENTICATION
return data
@decorate.catch_error()
def post(self):
""" Create new current user """
v = self.get_input()
if len(v) == 0:
raise RestApiException(
'Empty input',
status_code=hcodes.HTTP_BAD_REQUEST)
# INIT #
schema = self.get_endpoint_custom_definition()
properties = self.read_properties(schema, v)
# GRAPH #
# properties["authmethod"] = "credentials"
# if "password" in properties:
# properties["password"] = \
# BaseAuthentication.hash_password(properties["password"])
# DO CUSTOM STUFFS HERE - e.g. create irods user
properties, other_properties = \
self.custom_pre_handle_user_input(properties, v)
roles = self.get_roles(v)
user = self.auth.create_user(properties, roles)
self.custom_post_handle_user_input(user, properties, other_properties)
# DO CUSTOM STUFFS HERE - e.g. link to group
return self.force_response(user.uuid)
@decorate.catch_error()
def put(self):
""" Update profile for current user """
user = self.auth.get_user()
username = user.email
# if user.uuid != uuid:
# msg = "Invalid uuid: not matching current user"
# raise RestApiException(msg)
data = self.get_input()
password = data.get('password')
new_password = data.get('<PASSWORD>')
password_confirm = data.get('password_confirm')
totp_authentication = (
self.auth.SECOND_FACTOR_AUTHENTICATION is not None and
self.auth.SECOND_FACTOR_AUTHENTICATION == self.auth.TOTP
)
if totp_authentication:
totp_code = data.get('totp_code')
else:
totp_code = None
security = HandleSecurity(self.auth)
if new_password is None or password_confirm is None:
msg = "New password is missing"
raise RestApiException(msg, status_code=hcodes.HTTP_BAD_REQUEST)
if totp_authentication:
security.verify_totp(user, totp_code)
else:
# token, jti = self.auth.make_login(username, password)
token, _ = self.auth.make_login(username, password)
security.verify_token(username, token)
security.change_password(
user, password, new_password, password_confirm)
# I really don't know why this save is required... since it is already
# in change_password ... But if I remove it the new pwd is not saved...
user.save()
return self.empty_response()
###########################
# NOTE: roles are configured inside swagger definitions
class Internal(EndpointResource):
""" Token and Role authentication test """
def get(self):
return "I am internal"
class Admin(EndpointResource):
""" Token and Role authentication test """
def get(self):
return "I am admin!"
###########################
# In case you have celery queue,
# you get a queue endpoint for free
if detector.check_availability('celery'):
class Queue(EndpointResource):
def get(self, task_id=None):
# Inspect all worker nodes
celery = self.get_service_instance('celery')
workers = celery.control.inspect()
data = []
active_tasks = workers.active()
revoked_tasks = workers.revoked()
scheduled_tasks = workers.scheduled()
if active_tasks is None:
active_tasks = []
if revoked_tasks is None:
revoked_tasks = []
if scheduled_tasks is None:
scheduled_tasks = []
for worker in active_tasks:
tasks = active_tasks[worker]
for task in tasks:
if task_id is not None and task["id"] != task_id:
continue
row = {}
row['status'] = 'ACTIVE'
row['worker'] = worker
row['ETA'] = task["time_start"]
row['task_id'] = task["id"]
row['task'] = task["name"]
row['args'] = task["args"]
if task_id is not None:
task_result = celery.AsyncResult(task_id)
row['task_status'] = task_result.status
row['info'] = task_result.info
data.append(row)
for worker in revoked_tasks:
tasks = revoked_tasks[worker]
for task in tasks:
if task_id is not None and task != task_id:
continue
row = {}
row['status'] = 'REVOKED'
row['task_id'] = task
data.append(row)
for worker in scheduled_tasks:
tasks = scheduled_tasks[worker]
for task in tasks:
if task_id is not None and \
task["request"]["id"] != task_id:
continue
row = {}
row['status'] = 'SCHEDULED'
row['worker'] = worker
row['ETA'] = task["eta"]
row['task_id'] = task["request"]["id"]
row['priority'] = task["priority"]
row['task'] = task["request"]["name"]
row['args'] = task["request"]["args"]
data.append(row)
# from celery.task.control import inspect
# tasks = inspect()
return self.force_response(data)
def put(self, task_id):
celery = self.get_service_instance('celery')
celery.control.revoke(task_id)
return self.empty_response()
def delete(self, task_id):
celery = self.get_service_instance('celery')
celery.control.revoke(task_id, terminate=True)
return self.empty_response()
| 2.421875
| 2
|
tests/components/opnsense/__init__.py
|
domwillcode/home-assistant
| 30,023
|
12775241
|
"""Tests for the opnsense component."""
| 0.953125
| 1
|
namalizer.py
|
svetlyak40wt/namalizer
| 0
|
12775242
|
import re
#import logbook
from inspect import isroutine, getmro
from itertools import chain
import unittest
_CAMEL_RE = re.compile(r'(?<=[a-z])([A-Z])')
def _normalize(name):
return _CAMEL_RE.sub(lambda x: '_' + x.group(1).lower(), name).lower()
def _defined_in(obj, name, value):
if hasattr(obj, '__bases__'):
mro = getmro(obj)
if len(mro) > 1:
return getattr(mro[1], name, None) != value
return True
def pep8(*args, **kwargs):
def objects():
for obj in chain(args, kwargs.values()):
if hasattr(obj, '__bases__'):
try:
for parent in reversed(getmro(obj)):
yield parent
except:
import pdb;pdb.set_trace()
else:
if hasattr(obj, '__class__'):
yield obj.__class__
for obj in objects():
try:
for name in dir(obj):
if not name.startswith('_'):
value = getattr(obj, name)
if isroutine(value):
norm_name = _normalize(name)
if norm_name != name:
try:
norm_value = getattr(obj, norm_name, None)
if norm_value is None or not _defined_in(obj, norm_name, norm_value):
# no method with normalized name
#logbook.Logger('pep8').info(
# 'writing from %s(%s) to %s(%s) for %r' % (name, hash(value), norm_name, hash(norm_value), obj)
#)
setattr(obj, norm_name, value)
else:
# set new value back because, probably it is
# overridden method
if norm_value != value:
#logbook.Logger('pep8').info(
# 'writing back from %s(%s) to %s(%s) for %r' % (
# norm_name, hash(norm_value),
# name, hash(value),
# obj
# )
#)
setattr(obj, name, norm_value)
except TypeError:
pass
except:
import pdb;pdb.set_trace()
raise
#return cls
class TestCase(unittest.TestCase):
def test_normalization(self):
self.assertEqual('ugly_method', _normalize('uglyMethod'))
self.assertEqual('another_ugly_method', _normalize('AnotherUglyMethod'))
self.assertEqual('listen_tcp', _normalize('listenTCP'))
def test_inheritance1(self):
class A:
def badMethod(self):
return 'A'
class B(A): pass
pep8(B)
self.assertEqual('A', B().bad_method())
def test_inheritance2(self):
class A(object):
def badMethod(self):
return 'A'
class B(A):
def badMethod(self):
return 'B'
pep8(B)
self.assertEqual('A', A().badMethod())
self.assertEqual('A', A().bad_method())
self.assertEqual('B', B().badMethod())
self.assertEqual('B', B().bad_method())
def test_inheritance3(self):
class A(object):
def badMethod(self):
return 'A'
class B(A):
def bad_method(self):
return 'B'
pep8(B)
self.assertEqual('A', A().badMethod())
self.assertEqual('A', A().bad_method())
self.assertEqual('B', B().badMethod())
self.assertEqual('B', B().bad_method())
def test_inheritance4(self):
class A(object):
def badMethod(self):
return 'A'
class B(A):
def badMethod(self):
return 'B'
b = B()
pep8(A, b)
self.assertEqual('B', b.badMethod())
self.assertEqual('B', b.bad_method())
def test_on_object(self):
class A(object):
def badMethod(self):
return 'A'
a = A()
pep8(A)
self.assertEqual('A', a.badMethod())
self.assertEqual('A', a.bad_method())
def test_class_and_function(self):
class FakeModule:
class Random:
pass
def random():
pass
pep8(FakeModule)
self.assert_(FakeModule.Random != FakeModule.random)
def test_defined_in(self):
class A:
def foo(self): return 'A.foo'
def bar(self): return 'A.bar'
class B(A):
def foo(self): return 'B.foo'
def blah(self): return 'B.blah'
self.assertEqual(True, _defined_in(A, 'foo', A.foo))
self.assertEqual(True, _defined_in(A, 'bar', A.bar))
self.assertEqual(True, _defined_in(B, 'foo', B.foo))
self.assertEqual(False, _defined_in(B, 'bar', B.bar))
self.assertEqual(True, _defined_in(B, 'blah', B.blah))
if __name__ == '__main__':
unittest.main()
| 2.40625
| 2
|
recipes/Python/577218_Sphere/recipe-577218.py
|
tdiprima/code
| 2,023
|
12775243
|
<gh_stars>1000+
#On the name of ALLAH and may the blessing and peace of Allah
#be upon the Messenger of Allah <NAME>.
#Author : <NAME>
#Date : 06/05/10
#version :2.6
"""
Sphere class represents a geometric sphere and a completing_the_squares
function is used for the purpose, while an utility _checksign function
is used to check the sign of all the coefficients and return an empty string
for a positive number and a minus character for a negative number.
A string representation function for the three different outcome
possibilities is used to print the solution of the sphere equation.
"""
from math import sqrt
class Sphere(object):
"""
class that represents a geometric Sphere
"""
def __init__(self,coef_A = 0,coef_B = 0, coef_C = 0, coef_D= 0, coef_E = 0, coef_F = 0, coef_G = 0):
""" Sphere Construction takes coef_A,coef_B,coef_C,coef_D,coef_E,coef_F,coef_G constants """
self._A = coef_A
self._B = coef_B
self._C = coef_C
self._D = coef_D
self._E = coef_E
self._F = coef_F
self._G = coef_G
self._a = self._checkSign(self._D)
self._b = self._checkSign(self._E)
self._c = self._checkSign(self._F)
self._d = pow((self._D/2.0)/self._A,2)
self._e = pow((self._E/2.0)/self._B,2)
self._f = pow((self._F/2.0)/self._C,2)
self._g = chr(253)
self._h = (-self._G/self._A + self._d + self._e + self._f)
def _checkSign(self,value):
""" Utility method to check the values' sign
and return a sign string"""
if value >= 0:
return "+"
else :
return ""
def completing_the_squares(self):
"""
completing the squares function
"""
c_squares = "(x%s %s%sx + %s) + (y%s %s%sy + %s) + (z%s %s%sz + %s) = %s" % \
(self._g,self._a,self._D/self._A,self._d,
self._g,self._b,self._E/self._B,self._e,
self._g,self._c,self._F/self._C,self._f,self._h)
return c_squares
def __str__(self):
"""
String representation of a sphere
"""
print ("\n(x%s%s)%s + (y%s%s)%s + (z%s%s)%s = %s") % \
(self._a,(self._D/2.0)/self._A,self._g,self._b,(self._E/2.0)/self._B,
self._g,self._c,(self._F/2.0)/self._C,self._g,self._h)
if self._h > 0:
return "\n<The graph of this equation is a sphere with centre (%s,%s,%s) and radius %s\n" % \
(-1*self._D/2.0,-1*self._E/2.0,-1*self._F/2.0,"%2.3f" % (sqrt(self._h)))
elif self._h == 0:
return "\n<this sphere has radius = 0 and the graph is a single point(%s,%s,%s)\n " % \
(-1*self._D/2.0,-1*self._E/2.0,-1*self._F/2.0,float(m.sqrt(self._h)))
else :
return "\n<There is no graph for such equation "
if __name__ == "__main__":
sphere = Sphere(1,1,1,-2,-4,8,17)
print sphere.completing_the_squares()
print sphere
sphere1 = Sphere(1,1,1,10,4,2,-19)
print sphere1.completing_the_squares()
print sphere1
sphere2 = Sphere(2,2,2,-2,-3,5,-2)
print sphere2.completing_the_squares()
print sphere2
####C:\Windows\python "C:\Users\MyComputer\Documents\Pyt\Sphere7.py"
#(x² -2x + 1.0) + (y² -4y + 4.0) + (z² +8z + 16.0) = 4.0
#(x-1.0)² + (y-2.0)² + (z+4.0)² = 4.0
#<The graph of this equation is a sphere with centre (1.0,2.0,-4.0) #and radius 2.000
#(x² +10x + 25.0) + (y² +4y + 4.0) + (z² +2z + 1.0) = 49.0
#(x+5.0)² + (y+2.0)² + (z+1.0)² = 49.0
#<The graph of this equation is a sphere with centre (-5.0,-2.0,-1.0) #and radius 7.000
#(x² -1x + 0.25) + (y² -2y + 0.5625) + (z² +2z + 1.5625) = 3.375
#(x-0.5)² + (y-0.75)² + (z+1.25)² = 3.375
#<The graph of this equation is a sphere with centre (1.0,1.5,-2.5) #and radius 1.837
#################################################################
| 3.90625
| 4
|
opset/__init__.py
|
MarcDufresne/opset
| 7
|
12775244
|
<filename>opset/__init__.py
# __init__.py
# <NAME>, 2018-11-19, <NAME>, 2019-01-17
# Copyright (c) Element AI Inc. All rights not expressly granted hereunder are reserved.
from opset.configurator import BaseProcessor, config, load_logging_config, setup_config, setup_unit_test_config # noqa
from opset.utils import mock_config # noqa
| 1.164063
| 1
|
_mappers.py
|
lsils/benchmarks-date2019-permutations
| 0
|
12775245
|
import json
import networkx as nx
from pyquil.quil import Program
from pyquil.api import get_qc, LocalQVMCompiler
from pyquil.device import NxDevice
from pyquil.gates import CNOT, H
from qiskit import transpiler
from qiskit.wrapper import load_qasm_file
from qiskit.dagcircuit import DAGCircuit
def quil_compile(input, device):
name = device["name"]
g = nx.Graph()
g.add_edges_from(device["topology"])
qc = NxDevice(g)
p = Program(open(f"input/{input}.quil", "r").read())
compiler = LocalQVMCompiler("http://localhost:6000", qc)
np = compiler.quil_to_native_quil(p)
volume, depth = np.native_quil_metadata["gate_volume"], np.native_quil_metadata["gate_depth"]
with open(f"output/{input}_{name}.quil", "w") as f:
f.write(str(np))
with open(f"output/{input}_{name}.json", "w") as f:
f.write(json.dumps({'volume': volume, 'depth': depth}))
def qasm_compile(input, device):
name = device["name"]
qc = load_qasm_file(f"input/{input}.qasm")
dag = DAGCircuit.fromQuantumCircuit(qc)
try:
r = transpiler.transpile(dag, coupling_map=device["topology"])
qasm = r.qasm()
volume, depth = r.property_summary()["size"], r.property_summary()["depth"]
except:
qasm = ""
volume, depth = 0, 0
with open(f"output/{input}_{name}.qasm", "w") as f:
f.write(qasm)
with open(f"output/{input}_{name}.json", "w") as f:
f.write(json.dumps({'volume': volume, 'depth': depth}))
| 2.1875
| 2
|
run_resnet50.py
|
paulvangentcom/Bytehoven_SheetMusicRecognition
| 8
|
12775246
|
import numpy as np
from glob import glob
from scipy import ndimage
from keras import callbacks
from keras.optimizers import Adamax, SGD, RMSprop
import resnet50
def convert_to_one_hot(Y, C):
'''Converts array with labels to one-hot encoding
Keyword Arguments:
Y -- 1-dimensional numpy array containing labeled values
C -- total number of labels in Y
'''
Y = np.eye(C)[Y.reshape(-1)].T
return Y
def load_dataset(datapath, composers):
'''Loads dataset into memory
Keyword Arguments:
datapath -- absolute or relative path to dataset location
composers -- list of composer names included in the dataset
'''
folders = glob('%s/*' %datapath)
X_train = []
Y_train = []
for folder in folders:
files = glob('%s\\*.jpg' %folder)
print('working on composer: %s' %(folder.split('\\')[-1]))
for f in files:
im = ndimage.imread(f, mode='L')
im = im/255
im = im.reshape(im.shape[0], im.shape[1], 1)
X_train.append(im)
Y_train.append(composers.index(folder.split('\\')[-1]))
return np.asarray(X_train), np.asarray(Y_train)
if __name__ == '__main__':
print('setting model')
model = ResNet50.ResNet50(input_shape = (70, 400, 1), classes = 7)
epochs = 100
learning_rate = 0.001
lr_decay = 0.001/100
print('compiling model...')
#optimizer_instance = Adam(lr=learning_rate, decay=lr_decay)#lr=0.0005, beta_1=0.9, beta_2=0.999, epsilon=0.001)
#optimizer_instance = Adamax(lr=learning_rate, decay=lr_decay)
optimizer_instance = SGD(lr=learning_rate, decay=lr_decay)
#optimizer_instance = RMSprop(lr=learning_rate, decay=lr_decay)
model.compile(optimizer=optimizer_instance, loss='categorical_crossentropy', metrics=['acc'])
print('loading dataset......')
composers = ['Bach', 'Beethoven', 'Brahms', 'Chopin', 'Grieg', 'Liszt', 'Mozart']
datapath = 'Dataset_Train_Medium/'
X_train, Y_train = load_dataset(datapath, composers)
datapath_val = 'Dataset_Dev_Medium/'
X_test, Y_test = load_dataset(datapath_val, composers)
print('applying one-hot-encoding')
Y_train = convert_to_one_hot(Y_train, 7).T
Y_test = convert_to_one_hot(Y_test, 7).T
print('setting up callbacks...')
nancheck = callbacks.TerminateOnNaN()
filepath = 'Models/weights-improvement-{epoch:02d}-{acc:.2f}.hdf5'
saver = callbacks.ModelCheckpoint(filepath, monitor='acc', verbose=1, save_best_only=False, mode='max', period=1)
logger = callbacks.CSVLogger('model-weights/trainingresults.log')
callbacklist = [nancheck, saver, logger]
print('starting model fitting')
model.fit(X_train, Y_train, validation_data = (X_test, Y_test), epochs=epochs, batch_size=72, callbacks=callbacklist)
print('Saving model.........')
model.save('second_run.h5')
| 2.6875
| 3
|
convnwb/tests/tsettings.py
|
JacobsSU/convnwb
| 0
|
12775247
|
"""Settings for tests."""
import os
from pathlib import Path
import pkg_resources as pkg
###################################################################################################
###################################################################################################
# Set paths for test files
TEST_FILE_PATH = Path(pkg.resource_filename(__name__, 'test_files'))
| 1.882813
| 2
|
app_config.py
|
huhansan666666/flask_reddit
| 461
|
12775248
|
#!/usr/bin/env python2.7
"""
app_config.py will be storing all the module configs.
Here the db uses mysql.
"""
import os
_basedir = os.path.abspath(os.path.dirname(__file__))
DEBUG = False
ADMINS = frozenset(['<EMAIL>'])
SECRET_KEY = ''
SQLALCHEMY_DATABASE_URI = 'DATABASE://USERNAME:PASSWORD@localhost/YOUR_DB_NAME'
DATABASE_CONNECT_OPTIONS = {}
CSRF_ENABLED = True
CSRF_SESSION_KEY = ""
# Customize and add the blow if you'd like to use recaptcha. SSL is enabled
# by default and this is recaptcha v2: tap "I'm not a robot" checkbox instead
# of answering a riddle.
# Please see: https://www.google.com/recaptcha
RECAPTCHA_DATA_ATTRS = {'theme': 'light'}
RECAPTCHA_PUBLIC_KEY = 'YOUR KEY HERE'
RECAPTCHA_PRIVATE_KEY = 'YOUR PRIVATE KEY HERE'
BRAND = "reddit"
DOMAIN = "YOUR_DOMAIN_HERE"
ROOT_URL = "http://YOUR_URL_HERE"
STATIC_ROOT = "/path/to/your/static/root/"
STATIC_URL = ROOT_URL + "/static/"
| 2.171875
| 2
|
sentiment_analysis/setup.py
|
syrinecheriaa/sentiment-analysis-test
| 0
|
12775249
|
from setuptools import find_packages, setup
setup(name='sentiment_analysis',
packages=['sentiment_analysis'],
version='0.2.0',
description="sentiment analysis library",
author='<NAME>',
package_data={'sentiment_analysis': ['data/*'],},
include_package_data=True,
install_requires=[
'pandas==1.1.1',
'transformers==4.12.5',
'sentencepiece',
'protobuf',
'torch@https://download.pytorch.org/whl/cpu/torch-1.5.0%2Bcpu-cp37-cp37m-linux_x86_64.whl',
]
)
| 1.226563
| 1
|
openslides/poll/models.py
|
swilde/OpenSlides
| 0
|
12775250
|
<reponame>swilde/OpenSlides
from decimal import Decimal
from typing import Iterable, Optional, Tuple, Type
from django.conf import settings
from django.core.validators import MinValueValidator
from django.db import models
from ..core.config import config
from ..utils.autoupdate import inform_changed_data, inform_deleted_data
from ..utils.models import SET_NULL_AND_AUTOUPDATE
class BaseVote(models.Model):
"""
All subclasses must have option attribute with the related name "votes"
"""
weight = models.DecimalField(
default=Decimal("1"),
validators=[MinValueValidator(Decimal("-2"))],
max_digits=15,
decimal_places=6,
)
value = models.CharField(max_length=1, choices=(("Y", "Y"), ("N", "N"), ("A", "A")))
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
default=None,
null=True,
blank=True,
on_delete=SET_NULL_AND_AUTOUPDATE,
)
class Meta:
abstract = True
class BaseOption(models.Model):
"""
All subclasses must have poll attribute with the related name "options"
"""
vote_class: Optional[Type["BaseVote"]] = None
class Meta:
abstract = True
@property
def yes(self) -> Decimal:
return self.sum_weight("Y")
@property
def no(self) -> Decimal:
return self.sum_weight("N")
@property
def abstain(self) -> Decimal:
return self.sum_weight("A")
def sum_weight(self, value):
# We could do this in a nice .aggregate(Sum...) querystatement,
# but these might be expensive DB queries, because they are not preloaded.
# With this in-logic-counting, we operate inmemory.
weight_sum = Decimal(0)
for vote in self.votes.all():
if vote.value == value:
weight_sum += vote.weight
return weight_sum
@classmethod
def get_vote_class(cls):
if cls.vote_class is None:
raise NotImplementedError(
f"The option class {cls} has to have an attribute vote_class."
)
return cls.vote_class
def get_votes(self):
"""
Return a QuerySet with all vote objects related to this option.
"""
return self.get_vote_class().objects.filter(option=self)
def pseudoanonymize(self):
for vote in self.get_votes():
vote.user = None
vote.save()
def reset(self):
# Delete votes
votes = self.get_votes()
votes_id = [vote.id for vote in votes]
votes.delete()
collection = self.get_vote_class().get_collection_string()
inform_deleted_data((collection, id) for id in votes_id)
# update self because the changed voted relation
inform_changed_data(self)
class BasePoll(models.Model):
option_class: Optional[Type["BaseOption"]] = None
STATE_CREATED = 1
STATE_STARTED = 2
STATE_FINISHED = 3
STATE_PUBLISHED = 4
STATES = (
(STATE_CREATED, "Created"),
(STATE_STARTED, "Started"),
(STATE_FINISHED, "Finished"),
(STATE_PUBLISHED, "Published"),
)
state = models.IntegerField(choices=STATES, default=STATE_CREATED)
TYPE_ANALOG = "analog"
TYPE_NAMED = "named"
TYPE_PSEUDOANONYMOUS = "pseudoanonymous"
TYPES = (
(TYPE_ANALOG, "analog"),
(TYPE_NAMED, "nominal"),
(TYPE_PSEUDOANONYMOUS, "non-nominal"),
)
type = models.CharField(max_length=64, blank=False, null=False, choices=TYPES)
title = models.CharField(max_length=255, blank=True, null=False)
groups = models.ManyToManyField(settings.AUTH_GROUP_MODEL, blank=True)
voted = models.ManyToManyField(settings.AUTH_USER_MODEL, blank=True)
db_votesvalid = models.DecimalField(
null=True,
blank=True,
validators=[MinValueValidator(Decimal("-2"))],
max_digits=15,
decimal_places=6,
)
db_votesinvalid = models.DecimalField(
null=True,
blank=True,
validators=[MinValueValidator(Decimal("-2"))],
max_digits=15,
decimal_places=6,
)
db_votescast = models.DecimalField(
null=True,
blank=True,
validators=[MinValueValidator(Decimal("-2"))],
max_digits=15,
decimal_places=6,
)
PERCENT_BASE_YN = "YN"
PERCENT_BASE_YNA = "YNA"
PERCENT_BASE_VALID = "valid"
PERCENT_BASE_CAST = "cast"
PERCENT_BASE_DISABLED = "disabled"
PERCENT_BASES: Iterable[Tuple[str, str]] = (
(PERCENT_BASE_YN, "Yes/No"),
(PERCENT_BASE_YNA, "Yes/No/Abstain"),
(PERCENT_BASE_VALID, "All valid ballots"),
(PERCENT_BASE_CAST, "All casted ballots"),
(PERCENT_BASE_DISABLED, "Disabled (no percents)"),
) # type: ignore
onehundred_percent_base = models.CharField(
max_length=8, blank=False, null=False, choices=PERCENT_BASES
)
MAJORITY_SIMPLE = "simple"
MAJORITY_TWO_THIRDS = "two_thirds"
MAJORITY_THREE_QUARTERS = "three_quarters"
MAJORITY_DISABLED = "disabled"
MAJORITY_METHODS = (
(MAJORITY_SIMPLE, "Simple majority"),
(MAJORITY_TWO_THIRDS, "Two-thirds majority"),
(MAJORITY_THREE_QUARTERS, "Three-quarters majority"),
(MAJORITY_DISABLED, "Disabled"),
)
majority_method = models.CharField(
max_length=14, blank=False, null=False, choices=MAJORITY_METHODS
)
class Meta:
abstract = True
def get_votesvalid(self):
if self.type == self.TYPE_ANALOG:
return self.db_votesvalid
else:
return Decimal(self.amount_users_voted_with_individual_weight())
def set_votesvalid(self, value):
if self.type != self.TYPE_ANALOG:
raise ValueError("Do not set votesvalid for non analog polls")
self.db_votesvalid = value
votesvalid = property(get_votesvalid, set_votesvalid)
def get_votesinvalid(self):
if self.type == self.TYPE_ANALOG:
return self.db_votesinvalid
else:
return Decimal(0)
def set_votesinvalid(self, value):
if self.type != self.TYPE_ANALOG:
raise ValueError("Do not set votesinvalid for non analog polls")
self.db_votesinvalid = value
votesinvalid = property(get_votesinvalid, set_votesinvalid)
def get_votescast(self):
if self.type == self.TYPE_ANALOG:
return self.db_votescast
else:
return Decimal(self.amount_users_voted())
def set_votescast(self, value):
if self.type != self.TYPE_ANALOG:
raise ValueError("Do not set votescast for non analog polls")
self.db_votescast = value
votescast = property(get_votescast, set_votescast)
def amount_users_voted(self):
return len(self.voted.all())
def amount_users_voted_with_individual_weight(self):
if config["users_activate_vote_weight"]:
return sum(user.vote_weight for user in self.voted.all())
else:
return self.amount_users_voted()
def create_options(self):
""" Should be called after creation of this model. """
raise NotImplementedError()
@classmethod
def get_option_class(cls):
if cls.option_class is None:
raise NotImplementedError(
f"The poll class {cls} has to have an attribute option_class."
)
return cls.option_class
def get_options(self):
"""
Returns the option objects for the poll.
"""
return self.options.all()
@classmethod
def get_vote_class(cls):
return cls.get_option_class().get_vote_class()
def get_votes(self):
"""
Return a QuerySet with all vote objects related to this poll.
"""
return self.get_vote_class().objects.filter(option__poll__id=self.id)
def pseudoanonymize(self):
for option in self.get_options():
option.pseudoanonymize()
def reset(self):
for option in self.get_options():
option.reset()
self.voted.clear()
# Reset state
self.state = BasePoll.STATE_CREATED
if self.type == self.TYPE_ANALOG:
self.votesvalid = None
self.votesinvalid = None
self.votescast = None
self.save()
| 2.140625
| 2
|