hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
52b16b71969cb385515263b871a73adfb7b77434 | 4,812 | py | Python | src/olympia/blocklist/tests/test_cron.py | CSCD01/addons-server-team02 | 053f43d00fec71d8b9d3280ba9ef1ca46aae3aa6 | [
"BSD-3-Clause"
] | 3 | 2020-03-05T18:17:14.000Z | 2020-03-09T01:24:38.000Z | src/olympia/blocklist/tests/test_cron.py | CSCD01/addons-server-team02 | 053f43d00fec71d8b9d3280ba9ef1ca46aae3aa6 | [
"BSD-3-Clause"
] | null | null | null | src/olympia/blocklist/tests/test_cron.py | CSCD01/addons-server-team02 | 053f43d00fec71d8b9d3280ba9ef1ca46aae3aa6 | [
"BSD-3-Clause"
] | null | null | null | import datetime
import json
import os
from unittest import mock
from django.conf import settings
from django.core.files.storage import default_storage as storage
from freezegun import freeze_time
from waffle.testutils import override_switch
from olympia.amo.tests import addon_factory, TestCase, user_factory
from olympia.blocklist.cron import upload_mlbf_to_kinto
from olympia.blocklist.mlbf import MLBF
from olympia.blocklist.models import Block
from olympia.blocklist.tasks import MLBF_TIME_CONFIG_KEY
from olympia.lib.kinto import KintoServer
from olympia.zadmin.models import get_config, set_config
class TestUploadToKinto(TestCase):
def setUp(self):
addon_factory()
self.block = Block.objects.create(
addon=addon_factory(
file_kw={'is_signed': True, 'is_webextension': True}),
updated_by=user_factory())
@freeze_time('2020-01-01 12:34:56')
@override_switch('blocklist_mlbf_submit', active=True)
@mock.patch.object(KintoServer, 'publish_attachment')
def test_upload_mlbf_to_kinto(self, publish_mock):
upload_mlbf_to_kinto()
generation_time = int(
datetime.datetime(2020, 1, 1, 12, 34, 56).timestamp() * 1000)
publish_mock.assert_called_with(
{'key_format': MLBF.KEY_FORMAT,
'generation_time': generation_time},
('filter.bin', mock.ANY, 'application/octet-stream'))
assert (
get_config(MLBF_TIME_CONFIG_KEY, json_value=True) ==
generation_time)
mlfb_path = os.path.join(
settings.MLBF_STORAGE_PATH, str(generation_time), 'filter')
assert os.path.exists(mlfb_path)
assert os.path.getsize(mlfb_path)
blocked_path = os.path.join(
settings.MLBF_STORAGE_PATH, str(generation_time), 'blocked.json')
assert os.path.exists(blocked_path)
assert os.path.getsize(blocked_path)
not_blocked_path = os.path.join(
settings.MLBF_STORAGE_PATH, str(generation_time),
'notblocked.json')
assert os.path.exists(not_blocked_path)
assert os.path.getsize(not_blocked_path)
@freeze_time('2020-01-01 12:34:56')
@override_switch('blocklist_mlbf_submit', active=True)
@mock.patch.object(KintoServer, 'publish_attachment')
def test_stash_file(self, publish_mock):
set_config(MLBF_TIME_CONFIG_KEY, 123456, json_value=True)
prev_blocked_path = os.path.join(
settings.MLBF_STORAGE_PATH, '123456', 'blocked.json')
with storage.open(prev_blocked_path, 'w') as blocked_file:
json.dump(['madeup@guid:123'], blocked_file)
upload_mlbf_to_kinto()
generation_time = int(
datetime.datetime(2020, 1, 1, 12, 34, 56).timestamp() * 1000)
stash_path = os.path.join(
settings.MLBF_STORAGE_PATH, str(generation_time), 'stash.json')
assert os.path.exists(stash_path)
assert os.path.getsize(stash_path)
with open(stash_path) as stash_file:
blocked_guid = (
f'{self.block.guid}:'
f'{self.block.addon.current_version.version}')
assert json.load(stash_file) == {
'blocked': [blocked_guid],
'unblocked': ['madeup@guid:123']}
@override_switch('blocklist_mlbf_submit', active=False)
@mock.patch.object(KintoServer, 'publish_attachment')
def test_waffle_off_disables_publishing(self, publish_mock):
upload_mlbf_to_kinto()
publish_mock.assert_not_called()
assert not get_config(MLBF_TIME_CONFIG_KEY)
@freeze_time('2020-01-01 12:34:56')
@override_switch('blocklist_mlbf_submit', active=True)
@mock.patch.object(KintoServer, 'publish_attachment')
def test_no_need_for_new_mlbf(self, publish_mock):
# This was the last time the mlbf was generated
last_time = int(
datetime.datetime(2020, 1, 1, 12, 34, 1).timestamp() * 1000)
# And the Block was modified just before so would be included
self.block.update(modified=datetime.datetime(2020, 1, 1, 12, 34, 0))
set_config(MLBF_TIME_CONFIG_KEY, last_time, json_value=True)
upload_mlbf_to_kinto()
# So no need for a new bloomfilter
publish_mock.assert_not_called()
# But if we add a new Block a new filter is needed
addon_factory()
Block.objects.create(
addon=addon_factory(
file_kw={'is_signed': True, 'is_webextension': True}),
updated_by=user_factory())
upload_mlbf_to_kinto()
publish_mock.assert_called_once()
assert (
get_config(MLBF_TIME_CONFIG_KEY, json_value=True) ==
int(datetime.datetime(2020, 1, 1, 12, 34, 56).timestamp() * 1000))
| 39.768595 | 78 | 0.67207 | 4,202 | 0.873234 | 0 | 0 | 3,913 | 0.813175 | 0 | 0 | 746 | 0.155029 |
52b263d46a219a4b05582cac52569631b70e8be8 | 1,442 | py | Python | PuLP/magic_sqare.py | yunzhang599/Python3_Package_Examples | 3e479925f3f6818bf35e46123f720839acf075eb | [
"MIT"
] | 1 | 2019-11-16T05:06:01.000Z | 2019-11-16T05:06:01.000Z | PuLP/magic_sqare.py | yunzhang599/Python3_Package_Examples | 3e479925f3f6818bf35e46123f720839acf075eb | [
"MIT"
] | null | null | null | PuLP/magic_sqare.py | yunzhang599/Python3_Package_Examples | 3e479925f3f6818bf35e46123f720839acf075eb | [
"MIT"
] | null | null | null |
from pulp import *
prob = LpProblem("PULPTEST", LpMinimize)
# model variables
XCOORD = [0, 1, 2]
YCOORD = [0, 1, 2]
NUMBERS = [1, 2, 3, 4, 5, 6, 7, 8, 9]
# variable is a 3 x 3 x 9 matrix of binary values
allocation = LpVariable.dicts("square", (XCOORD, YCOORD, NUMBERS), 0, 1, LpInteger)
# target function
prob += 0, "Arbitrary Objective Function"
# constraint: sum over rows
for x in XCOORD:
prob += lpSum([n * allocation[x][y][n] for y in YCOORD for n in NUMBERS]) == 15
# constraint: sum over columns
for y in YCOORD:
prob += lpSum([n * allocation[x][y][n] for x in XCOORD for n in NUMBERS]) == 15
# constraint: each number only once
for n in NUMBERS:
prob += lpSum([allocation[x][y][n] for x in XCOORD for y in YCOORD]) == 1
# constraint: three numbers per column
for x in XCOORD:
prob += lpSum([allocation[x][y][n] for y in YCOORD for n in NUMBERS]) == 3
# constraint: three numbers per row
for y in YCOORD:
prob += lpSum([allocation[x][y][n] for x in XCOORD for n in NUMBERS]) == 3
# constraint: 9 numbers set
prob += lpSum([allocation[x][y][n] for x in XCOORD for y in YCOORD for n in NUMBERS]) == 9
# run the solver
prob.solve()
print("Status:", LpStatus[prob.status])
# print the numbers that have been found
for y in YCOORD:
for x in XCOORD:
for n in NUMBERS:
if value(allocation[x][y][n]) == 1:
print(n, end=' ')
#print(x, y, n)
print()
| 27.207547 | 90 | 0.633842 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 406 | 0.281553 |
52b2cef2321b28d9b382b11b584f50285ddca3b9 | 482 | py | Python | api/features/exceptions.py | SolidStateGroup/Bullet-Train-API | ea47ccbdadf665a806ae4e0eff6ad1a2f1b0ba19 | [
"BSD-3-Clause"
] | 126 | 2019-12-13T18:41:43.000Z | 2020-11-10T13:33:55.000Z | api/features/exceptions.py | SolidStateGroup/Bullet-Train-API | ea47ccbdadf665a806ae4e0eff6ad1a2f1b0ba19 | [
"BSD-3-Clause"
] | 30 | 2019-12-12T16:52:01.000Z | 2020-11-09T18:55:29.000Z | api/features/exceptions.py | SolidStateGroup/Bullet-Train-API | ea47ccbdadf665a806ae4e0eff6ad1a2f1b0ba19 | [
"BSD-3-Clause"
] | 20 | 2020-02-14T21:55:36.000Z | 2020-11-03T22:29:03.000Z | from rest_framework import status
from rest_framework.exceptions import APIException
class FeatureStateVersionError(APIException):
status_code = status.HTTP_400_BAD_REQUEST
class FeatureStateVersionAlreadyExistsError(FeatureStateVersionError):
status_code = status.HTTP_400_BAD_REQUEST
def __init__(self, version: int):
super(FeatureStateVersionAlreadyExistsError, self).__init__(
f"Version {version} already exists for FeatureState."
)
| 30.125 | 70 | 0.786307 | 391 | 0.811203 | 0 | 0 | 0 | 0 | 0 | 0 | 53 | 0.109959 |
52b4503e9b65d5bfcaf5744d5b9da4e173053293 | 74,777 | py | Python | ai_economist/real_business_cycle/rbc/cuda_manager.py | tljstewart/ai-economist | 4f57accbed95522a1ad90c50f7810282fde6e52c | [
"BSD-3-Clause"
] | 1 | 2022-03-28T21:07:08.000Z | 2022-03-28T21:07:08.000Z | ai_economist/real_business_cycle/rbc/cuda_manager.py | SocioProphet/ai-economist | 4ae6a7ddb7481eb22cde8f42267024fc9a6dbc01 | [
"BSD-3-Clause"
] | null | null | null | ai_economist/real_business_cycle/rbc/cuda_manager.py | SocioProphet/ai-economist | 4ae6a7ddb7481eb22cde8f42267024fc9a6dbc01 | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2021, salesforce.com, inc.
# All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
# For full license text, see the LICENSE file in the repo root
# or https://opensource.org/licenses/BSD-3-Clause
import itertools
import os
import random
from pathlib import Path
import numpy as np
import pycuda
import pycuda.autoinit
import pycuda.driver as cuda_driver
import scipy
import scipy.stats
import torch
from pycuda.compiler import SourceModule
from torch.distributions import Categorical
from tqdm import tqdm
from .constants import (
consumer_state_scaling_factors,
firm_state_scaling_factors,
govt_state_scaling_factors,
)
from .networks import DeterministicPolicy, IndependentPolicyNet, PolicyNet
from .util import expand_to_digit_form, size_after_digit_expansion
_NP_DTYPE = np.float32
# the below line is 'strangely' necessary to make PyTorch work with PyCUDA
pytorch_cuda_init_success = torch.cuda.FloatTensor(8)
# for opening source files within module
module_path = Path(__file__).parent
def interval_list_contains(interval_list, step):
for (lower, upper_non_inclusive) in interval_list:
if lower <= step < upper_non_inclusive:
return True
return False
class NoOpOptimizer:
"""
Dummy Optimizer.
"""
def __init__(self):
pass
def step(self):
pass
def seed_everything(seed):
torch.manual_seed(seed)
random.seed(seed)
np.random.seed(seed)
def reverse_cumsum(x):
# assumes summing along episode iteration dim
return x + torch.sum(x, dim=-2, keepdims=True) - torch.cumsum(x, dim=-2)
def discounted_returns(rewards, gamma):
maxt = rewards.shape[-2]
cumulative_rewards = 0
returns = torch.zeros_like(rewards)
for t in reversed(range(maxt)):
returns[:, t, :] = rewards[:, t, :] + gamma * cumulative_rewards
cumulative_rewards = rewards[:, t, :] + cumulative_rewards
return returns
def compute_theta_coef(hparams_dict, episode):
anneal_dict = hparams_dict["agents"]["consumer_anneal_theta"]
if anneal_dict["anneal_on"]:
exp_decay_length_in_steps = anneal_dict["exp_decay_length_in_steps"]
theta_coef = np.float32(1.0 - (np.exp(-episode / exp_decay_length_in_steps)))
else:
return np.float32(1.0)
return theta_coef
def government_action_mask(hparams_dict, step):
government_actions_array = hparams_dict["agents"]["government_actions_array"]
tax_annealing_params = hparams_dict["agents"]["government_anneal_taxes"]
income_tax = torch.tensor(government_actions_array[:, 0]).cuda()
corporate_tax = torch.tensor(government_actions_array[:, 1]).cuda()
mask = torch.zeros(income_tax.shape[0]).cuda()
if not tax_annealing_params["anneal_on"]:
return None
a0 = tax_annealing_params["start"]
max_tax = tax_annealing_params["increase_const"] * step + a0
mask[(income_tax > max_tax) | (corporate_tax > max_tax)] -= 1000.0
return mask
def firm_action_mask(hparams_dict, step):
# pick out all firm actions where wage is the wrong height,
# and assign -1000.0 to those
firm_actions_array = hparams_dict["agents"]["firm_actions_array"]
wage_annealing_params = hparams_dict["agents"]["firm_anneal_wages"]
price_annealing_params = hparams_dict["agents"]["firm_anneal_prices"]
wages = torch.tensor(firm_actions_array[:, 1]).cuda()
prices = torch.tensor(firm_actions_array[:, 0]).cuda()
mask = torch.zeros(wages.shape[0]).cuda()
if not (wage_annealing_params["anneal_on"] or price_annealing_params["anneal_on"]):
return None
if wage_annealing_params["anneal_on"]:
a0 = wage_annealing_params["start"]
max_wage = wage_annealing_params["increase_const"] * step + a0
min_wage = -wage_annealing_params["decrease_const"] * step + a0
mask[(wages < min_wage) | (wages > max_wage)] -= 1000.0
if price_annealing_params["anneal_on"]:
a0 = price_annealing_params["start"]
max_price = price_annealing_params["increase_const"] * step + a0
min_price = -price_annealing_params["decrease_const"] * step + a0
mask[(prices < min_price) | (prices > max_price)] -= 1000.0
return mask
def get_cuda_code(rel_path_to_cu_file, **preprocessor_vars_to_replace):
with open(module_path / rel_path_to_cu_file) as cudasource:
code_string = cudasource.read()
# format for preprocessor macros in firm_rbc.cu is M_VARNAME.
# Specify all these as args to nvcc.
options_list = [
f"-D M_{k.upper()}={v}" for k, v in preprocessor_vars_to_replace.items()
]
return code_string, options_list
def add_penalty_for_no_ponzi(
states, rewards, budget_offset, penalty_coef=20.0, penalty_scale=100.0
):
budget_violations = -torch.clamp_max(states[..., budget_offset], 0.0)
rewards[:, -1, :] -= penalty_coef * budget_violations / penalty_scale
def update_government_rewards(
government_rewards, consumer_rewards, firm_rewards, cfg_dict
):
assert (
government_rewards == 0.0
).all() # govt should have been assigned exactly 0 in cuda step function
total_rewards = consumer_rewards.sum(dim=-1)
if cfg_dict["agents"]["government_counts_firm_reward"] == 1:
total_rewards = total_rewards + cfg_dict["agents"].get(
"firm_reward_for_government_factor", 1.0
) * firm_rewards.sum(dim=-1)
government_rewards[..., 0] = total_rewards[:] # one govt for now
def update_penalty_coef(
states,
budget_offset,
prev_penalty_coef,
penalty_step_size=0.01,
penalty_scale=100.0,
):
budget_violations = -torch.clamp_max(states[..., budget_offset], 0.0)
new_penalty_coef = (
prev_penalty_coef
+ penalty_step_size * (budget_violations / penalty_scale).mean().item()
)
return new_penalty_coef
def get_actions_from_inds(action_inds, agents_dict):
_action_inds = action_inds.cpu().to(torch.long)
consumption_action_tensor = torch.tensor(
agents_dict["consumer_consumption_actions_array"]
)
work_action_tensor = torch.tensor(agents_dict["consumer_work_actions_array"])
num_firms = agents_dict["num_firms"]
out_shape = _action_inds.shape[:-1] + (agents_dict["consumer_action_dim"],)
consumer_actions_out = torch.zeros(out_shape)
idx_hours_worked = num_firms
idx_which_firm = num_firms + 1
for i in range(num_firms):
consumer_actions_out[..., i] = consumption_action_tensor[
_action_inds[..., i], :
].squeeze(dim=-1)
consumer_actions_out[..., num_firms] = work_action_tensor[
_action_inds[..., idx_hours_worked], :
].squeeze(dim=-1)
consumer_actions_out[..., (num_firms + 1)] = _action_inds[..., idx_which_firm]
return consumer_actions_out
def anneal_entropy_coef(entropy_dict, step):
if entropy_dict is None:
return 1.0
if entropy_dict["anneal_on"]:
coef_floor = entropy_dict.get("coef_floor", 0.0)
return max(
np.exp(-step / entropy_dict["exp_decay_length_in_steps"]), coef_floor
)
return 1.0
def get_grad_norm(policy):
grad_norm = 0.0
for p in list(filter(lambda p: p.grad is not None, policy.parameters())):
grad_norm += (p.grad.data.norm(2).item()) ** 2
return grad_norm
def get_ev(adv, returns, cutoff=-1.0):
return max(cutoff, (1 - (adv.detach().var() / returns.detach().var())).item())
def consumer_ppo_step(
policy,
states,
actions,
rewards,
optimizer,
gamma_const,
entropy_val=0.0,
value_loss_weight=1.0,
ppo_num_updates=3,
reward_scale=1.0,
clip_grad_norm=None,
clip_param=0.1,
):
# Get initial policy predictions
multi_action_probs, old_value_preds = policy(states)
old_value_preds = old_value_preds.detach()
# Get returns
rescaled_rewards = rewards / reward_scale
G_discounted_returns = discounted_returns(rescaled_rewards, gamma_const)
# Value function loss
sum_old_log_probs = 0.0
for action_ind, probs in enumerate(multi_action_probs):
_CategoricalDist = Categorical(probs)
sum_old_log_probs += -1.0 * _CategoricalDist.log_prob(actions[..., action_ind])
sum_old_log_probs = sum_old_log_probs.detach()
assert not G_discounted_returns.requires_grad
assert not sum_old_log_probs.requires_grad
assert not old_value_preds.requires_grad
# Compute ppo loss
for _ in range(ppo_num_updates):
multi_action_probs, value_preds = policy(states)
get_huber_loss = torch.nn.SmoothL1Loss()
value_pred_clipped = old_value_preds + (value_preds - old_value_preds).clamp(
-clip_param, clip_param
)
value_loss_new = get_huber_loss(
value_preds.squeeze(dim=-1), G_discounted_returns
) # can use huber loss instead
value_loss_clipped = get_huber_loss(
value_pred_clipped.squeeze(dim=-1), G_discounted_returns
)
value_loss = torch.max(value_loss_new, value_loss_clipped).mean()
# Policy loss with value function baseline.
advantages = G_discounted_returns - value_preds.detach().squeeze(dim=-1)
# Don't propagate through to VF network.
assert not advantages.requires_grad
# Trick: standardize advantages
standardized_advantages = (advantages - advantages.mean()) / (
advantages.std() + 1e-6
)
sum_mean_entropy = 0.0 # mean over batch and agents
sum_neg_log_probs = 0.0
for action_ind, probs in enumerate(multi_action_probs):
_CategoricalDist = Categorical(probs)
sum_neg_log_probs += -1.0 * _CategoricalDist.log_prob(
actions[..., action_ind]
)
sum_mean_entropy += _CategoricalDist.entropy().mean()
assert sum_neg_log_probs.requires_grad
# note: log probs are negative, so negate again here
ratio = torch.exp(-sum_neg_log_probs + sum_old_log_probs)
surr1 = ratio * standardized_advantages
surr2 = (
torch.clamp(ratio, 1.0 - clip_param, 1.0 + clip_param)
* standardized_advantages
)
ppo_loss = -torch.min(surr1, surr2).mean()
loss = (
ppo_loss - entropy_val * sum_mean_entropy + value_loss_weight * value_loss
)
# Apply gradients
optimizer.zero_grad()
loss.backward()
if clip_grad_norm is not None:
torch.nn.utils.clip_grad_norm_(policy.parameters(), max_norm=clip_grad_norm)
optimizer.step()
def ppo_step(
policy,
states,
actions,
rewards,
optimizer,
gamma_const,
entropy_val=0.0,
value_loss_weight=1.0,
ppo_num_updates=3,
actions_mask=None,
reward_scale=1.0,
clip_grad_norm=None,
clip_param=0.1,
):
# Get initial policy predictions
probs, old_value_preds = policy(states, actions_mask=actions_mask)
old_value_preds = old_value_preds.detach()
# Get returns
rescaled_rewards = rewards / reward_scale
G_discounted_returns = discounted_returns(rescaled_rewards, gamma_const)
# Value function loss
_CategoricalDist = Categorical(probs)
old_log_probs = -1.0 * _CategoricalDist.log_prob(actions).detach()
assert not G_discounted_returns.requires_grad
assert not old_log_probs.requires_grad
assert not old_value_preds.requires_grad
# Compute ppo loss
for _ in range(ppo_num_updates):
probs, value_preds = policy(states, actions_mask=actions_mask)
get_huber_loss = torch.nn.SmoothL1Loss()
value_pred_clipped = old_value_preds + (value_preds - old_value_preds).clamp(
-clip_param, clip_param
)
value_loss_new = get_huber_loss(
value_preds.squeeze(dim=-1), G_discounted_returns
) # can use huber loss instead
value_loss_clipped = get_huber_loss(
value_pred_clipped.squeeze(dim=-1), G_discounted_returns
)
value_loss = torch.max(value_loss_new, value_loss_clipped).mean()
# Policy loss with value function baseline.
advantages = G_discounted_returns - value_preds.detach().squeeze(dim=-1)
# Don't propagate through to VF network.
assert not advantages.requires_grad
# Trick: standardize advantages
standardized_advantages = (advantages - advantages.mean()) / (
advantages.std() + 1e-6
)
_CategoricalDist = Categorical(probs)
neg_log_probs = -1.0 * _CategoricalDist.log_prob(actions)
mean_entropy = _CategoricalDist.entropy().mean()
assert neg_log_probs.requires_grad
# note: log probs are negative, so negate again here
ratio = torch.exp(-neg_log_probs + old_log_probs)
surr1 = ratio * standardized_advantages
surr2 = (
torch.clamp(ratio, 1.0 - clip_param, 1.0 + clip_param)
* standardized_advantages
)
ppo_loss = -torch.min(surr1, surr2).mean()
loss = ppo_loss - entropy_val * mean_entropy + value_loss_weight * value_loss
# Apply gradients
optimizer.zero_grad()
loss.backward()
if clip_grad_norm is not None:
torch.nn.utils.clip_grad_norm_(policy.parameters(), max_norm=clip_grad_norm)
optimizer.step()
def consumer_policy_gradient_step(
policy,
states,
actions,
rewards,
optimizer,
gamma_const,
entropy_val=0.0,
value_loss_weight=1.0,
reward_scale=1.0,
clip_grad_norm=None,
):
# Get policy and value predictions
multi_action_probs, value_preds = policy(states)
# Get returns
rescaled_rewards = rewards / reward_scale
G_discounted_returns = discounted_returns(rescaled_rewards, gamma_const)
# Value function loss
get_huber_loss = torch.nn.SmoothL1Loss()
value_loss = get_huber_loss(
value_preds.squeeze(dim=-1), G_discounted_returns
).mean() # can use huber loss instead
# Policy loss with value function baseline.
advantages = G_discounted_returns - value_preds.detach().squeeze(dim=-1)
# Don't propagate through to VF network.
assert not advantages.requires_grad
# Trick: standardize advantages
standardized_advantages = (advantages - advantages.mean()) / (
advantages.std() + 1e-6
)
# Compute policy loss
sum_mean_entropy = 0.0 # mean over batch and agents
sum_neg_log_probs = 0.0
for action_ind, probs in enumerate(multi_action_probs):
_CategoricalDist = Categorical(probs)
sum_neg_log_probs += -1.0 * _CategoricalDist.log_prob(actions[..., action_ind])
sum_mean_entropy += _CategoricalDist.entropy().mean()
pg_loss = (sum_neg_log_probs * standardized_advantages).mean()
assert sum_neg_log_probs.requires_grad
loss = pg_loss - entropy_val * sum_mean_entropy + value_loss_weight * value_loss
# Apply gradients
optimizer.zero_grad()
loss.backward()
if clip_grad_norm is not None:
torch.nn.utils.clip_grad_norm_(policy.parameters(), max_norm=clip_grad_norm)
optimizer.step()
def policy_gradient_step(
policy,
states,
actions,
rewards,
optimizer,
gamma_const,
entropy_val=0.0,
value_loss_weight=1.0,
actions_mask=None,
reward_scale=1.0,
clip_grad_norm=None,
):
# here, we must perform digit scaling
optimizer.zero_grad()
probs, value_preds = policy(states, actions_mask=actions_mask)
rewards = rewards / reward_scale
G_discounted_returns = discounted_returns(rewards, gamma_const)
get_huber_loss = torch.nn.SmoothL1Loss()
value_loss = get_huber_loss(
value_preds.squeeze(dim=-1), G_discounted_returns
).mean() # can use huber loss instead
advantages = G_discounted_returns - value_preds.detach().squeeze(
dim=-1
) # compute advantages (don't propagate through to VF network)
assert not advantages.requires_grad
# mean and standardize advantages
standardized_advantages = (advantages - advantages.mean()) / (
advantages.std() + 1e-6
)
assert not standardized_advantages.requires_grad
m = Categorical(probs)
pg_loss = (-m.log_prob(actions) * standardized_advantages).mean()
assert pg_loss.requires_grad
entropy_regularize = entropy_val * m.entropy().mean()
loss = pg_loss - entropy_regularize + value_loss_weight * value_loss
loss.backward()
if clip_grad_norm is not None:
torch.nn.utils.clip_grad_norm_(policy.parameters(), max_norm=clip_grad_norm)
optimizer.step()
def save_dense_log(
save_dir,
epi,
agent_type_arrays,
agent_action_arrays,
agent_aux_arrays,
):
print(f"Saving dense log at episode {epi}")
for agent_type in ["consumer", "firm", "government"]:
states_batch, actions_batch, rewards_batch = agent_type_arrays[agent_type]
aux_array = agent_aux_arrays[agent_type]
if aux_array is not None:
aux_array = aux_array.cpu().numpy()
np.savez(
str(Path(save_dir) / Path(f"episode_{epi}_{agent_type}.npz")),
states=states_batch.cpu().numpy(),
actions=actions_batch.cpu().numpy(),
rewards=rewards_batch.cpu().numpy(),
action_array=agent_action_arrays[agent_type],
aux_array=aux_array,
)
def save_policy_parameters(
save_dir,
epi,
consumer_policy,
firm_policy,
government_policy,
freeze_firms,
freeze_govt,
):
print(f"saving model parameters at episode {epi}")
consumer_path = (
Path(save_dir) / Path("saved_models") / Path(f"consumer_policy_{epi}.pt")
)
# always save the latest, to be overwritten later
consumer_path_latest = (
Path(save_dir) / Path("saved_models") / Path("consumer_policy_latest.pt")
)
os.makedirs(consumer_path.parent, exist_ok=True)
torch.save(consumer_policy.state_dict(), consumer_path)
torch.save(consumer_policy.state_dict(), consumer_path_latest)
if freeze_firms is None:
firm_path = (
Path(save_dir) / Path("saved_models") / Path(f"firm_policy_{epi}.pt")
)
firm_path_latest = (
Path(save_dir) / Path("saved_models") / Path("firm_policy_latest.pt")
)
os.makedirs(firm_path.parent, exist_ok=True)
torch.save(firm_policy.state_dict(), firm_path)
torch.save(firm_policy.state_dict(), firm_path_latest)
if freeze_govt is None:
government_path = (
Path(save_dir) / Path("saved_models") / Path(f"government_policy_{epi}.pt")
)
government_path_latest = (
Path(save_dir) / Path("saved_models") / Path("government_policy_latest.pt")
)
os.makedirs(government_path.parent, exist_ok=True)
torch.save(government_policy.state_dict(), government_path)
torch.save(government_policy.state_dict(), government_path_latest)
class ConsumerFirmRunManagerBatchParallel:
"""
The Real Business Cycle Experiment Management Class.
"""
def __init__(self, cfg_dict, freeze_firms=None, freeze_govt=None):
self.cfg_dict = cfg_dict
self.train_dict = cfg_dict["train"]
self.agents_dict = cfg_dict["agents"]
self.world_dict = cfg_dict["world"]
self.save_dense_every = self.train_dict["save_dense_every"]
self.save_dir = self.train_dict["save_dir"]
self.freeze_firms = freeze_firms
self.freeze_govt = freeze_govt
self.__init_cuda_functions()
self.__init_cuda_data_structs()
self.__init_torch_data()
def __init_cuda_data_structs(self):
__td = self.train_dict
__ad = self.agents_dict
__wd = self.world_dict
batch_size = __td["batch_size"]
num_consumers = __ad["num_consumers"]
num_firms = __ad["num_firms"]
num_governments = __ad["num_governments"]
firm_action_dim = __ad["firm_action_dim"]
government_action_dim = __ad["government_action_dim"]
consumer_state_dim = __ad["consumer_state_dim"]
firm_state_dim = __ad["firm_state_dim"]
government_state_dim = __ad["government_state_dim"]
global_state_dim = __ad["global_state_dim"]
consumer_endowment = __wd["initial_consumer_endowment"]
firm_endowment = __wd["initial_firm_endowment"]
initial_stocks = __wd["initial_stocks"]
initial_wages = __wd["initial_wages"]
initial_prices = __wd["initial_prices"]
consumer_theta = __wd["consumer_theta"]
consumer_rewards = np.zeros((batch_size, num_consumers), dtype=_NP_DTYPE)
consumer_states = np.zeros(
(batch_size, num_consumers, consumer_state_dim), dtype=_NP_DTYPE
)
firm_action_indices = np.zeros((batch_size, num_firms), dtype=np.int32)
firm_actions = np.zeros(
(batch_size, num_firms, firm_action_dim), dtype=_NP_DTYPE
)
firm_rewards = np.zeros((batch_size, num_firms), dtype=_NP_DTYPE)
firm_states = np.zeros((batch_size, num_firms, firm_state_dim), dtype=_NP_DTYPE)
government_action_indices = np.zeros(
(batch_size, num_governments), dtype=np.int32
)
government_actions = np.zeros(
(batch_size, num_governments, government_action_dim), dtype=_NP_DTYPE
)
government_rewards = np.zeros((batch_size, num_governments), dtype=_NP_DTYPE)
government_states = np.zeros(
(batch_size, num_governments, government_state_dim), dtype=_NP_DTYPE
)
# initialize states to right values here
# global state init
# for consumers, firms, and governments
for state_arr in [consumer_states, firm_states, government_states]:
# set prices to 1.0
state_arr[:, :, 0:num_firms] = initial_prices
# set wages to 0.0
state_arr[:, :, num_firms : (2 * num_firms)] = initial_wages
# set stocks to 0.0
state_arr[:, :, (2 * num_firms) : (3 * num_firms)] = initial_stocks
# set goods overdemanded to 0.0
state_arr[:, :, (3 * num_firms) : (4 * num_firms)] = 0.0
# set taxes to 0.0
state_arr[:, :, (4 * num_firms)] = 0.0
state_arr[:, :, (4 * num_firms) + 1] = 0.0
# consumer states, set theta and initial budget
if "paretoscaletheta" in __wd:
pareto_vals = np.expand_dims(
scipy.stats.pareto.ppf(
(np.arange(num_consumers) / num_consumers), __wd["paretoscaletheta"]
),
axis=0,
)
consumer_states[:, :, consumer_state_dim - 1] = consumer_theta * (
1.0 / pareto_vals
)
else:
consumer_states[:, :, consumer_state_dim - 1] = consumer_theta
consumer_states[:, :, global_state_dim] = consumer_endowment
# firm states
# capital
if __wd.get("initial_capital", None) == "proportional":
for i in range(num_firms):
firm_states[:, i, global_state_dim + 1] = ((i + 1) / 10.0) * 2.0
elif __wd.get("initial_capital", None) == "twolevel":
for i in range(num_firms):
if i < (num_firms // 2):
firm_states[:, i, global_state_dim + 1] = 5000
else:
firm_states[:, i, global_state_dim + 1] = 10000
else:
firm_states[:, :, global_state_dim + 1] = 1.0
# production alpha
if __wd["production_alpha"] == "proportional":
half_firms = num_firms // 2
for i in range(num_firms):
firm_states[:, i, global_state_dim + 2] = ((i % half_firms) + 1) * 0.2
elif __wd["production_alpha"] == "fixed_array":
alpha_arr = [0.2, 0.3, 0.4, 0.6, 0.8, 0.2, 0.3, 0.4, 0.6, 0.8]
for i in range(num_firms):
firm_states[:, i, global_state_dim + 2] = alpha_arr[i]
else:
for i in range(num_firms):
firm_states[:, i, global_state_dim + 2] = __wd["production_alpha"]
# set one-hot fields correctly by index for each firm
onehot_rows = np.eye(num_firms)
firm_states[:, :, (global_state_dim + 3) :] = onehot_rows
firm_states[:, :, global_state_dim] = firm_endowment
# government states
# for now, nothing beyond global state
self.consumer_states_gpu_tensor = torch.from_numpy(consumer_states).cuda()
# these are now tensors bc sampling for consumers via pytorch
self.consumer_rewards_gpu_pycuda = cuda_driver.mem_alloc(
consumer_rewards.nbytes
)
self.consumer_states_checkpoint_gpu_pycuda = cuda_driver.mem_alloc(
consumer_states.nbytes
)
cuda_driver.memcpy_htod(self.consumer_rewards_gpu_pycuda, consumer_rewards)
cuda_driver.memcpy_htod(
self.consumer_states_checkpoint_gpu_pycuda, consumer_states
)
self.firm_states_gpu_tensor = torch.from_numpy(firm_states).cuda()
self.firm_action_indices_gpu_pycuda = cuda_driver.mem_alloc(
firm_action_indices.nbytes
)
self.firm_actions_gpu_pycuda = cuda_driver.mem_alloc(firm_actions.nbytes)
self.firm_rewards_gpu_pycuda = cuda_driver.mem_alloc(firm_rewards.nbytes)
self.firm_states_checkpoint_gpu_pycuda = cuda_driver.mem_alloc(
firm_states.nbytes
)
cuda_driver.memcpy_htod(
self.firm_action_indices_gpu_pycuda, firm_action_indices
)
cuda_driver.memcpy_htod(self.firm_actions_gpu_pycuda, firm_actions)
cuda_driver.memcpy_htod(self.firm_rewards_gpu_pycuda, firm_rewards)
cuda_driver.memcpy_htod(self.firm_states_checkpoint_gpu_pycuda, firm_states)
self.government_states_gpu_tensor = torch.from_numpy(government_states).cuda()
self.government_action_indices_gpu_pycuda = cuda_driver.mem_alloc(
government_action_indices.nbytes
)
self.government_actions_gpu_pycuda = cuda_driver.mem_alloc(
government_actions.nbytes
)
self.government_rewards_gpu_pycuda = cuda_driver.mem_alloc(
government_rewards.nbytes
)
self.government_states_checkpoint_gpu_pycuda = cuda_driver.mem_alloc(
government_states.nbytes
)
cuda_driver.memcpy_htod(
self.government_action_indices_gpu_pycuda, government_action_indices
)
cuda_driver.memcpy_htod(self.government_actions_gpu_pycuda, government_actions)
cuda_driver.memcpy_htod(self.government_rewards_gpu_pycuda, government_rewards)
cuda_driver.memcpy_htod(
self.government_states_checkpoint_gpu_pycuda, government_states
)
def __init_torch_data(self):
__td = self.train_dict
__ad = self.agents_dict
batch_size = __td["batch_size"]
num_consumers = __ad["num_consumers"]
num_firms = __ad["num_firms"]
num_governments = __ad["num_governments"]
consumer_action_dim = __ad["consumer_action_dim"]
consumer_state_dim = __ad["consumer_state_dim"]
firm_state_dim = __ad["firm_state_dim"]
government_state_dim = __ad["government_state_dim"]
num_iters = int(self.world_dict["maxtime"])
consumer_states_batch = torch.zeros(
batch_size,
num_iters,
num_consumers,
consumer_state_dim,
dtype=torch.float32,
device="cpu",
)
consumer_actions_single = torch.zeros(
batch_size,
num_consumers,
num_firms + 1 + 1,
dtype=torch.int32,
device="cpu",
)
consumer_actions_batch = torch.zeros(
batch_size,
num_iters,
num_consumers,
num_firms + 1 + 1,
dtype=torch.int32,
device="cpu",
)
# auxiliary state info that is not part of observables.
# currently just the realized consumption
consumer_aux_batch = torch.zeros(
batch_size,
num_iters,
num_consumers,
num_firms,
dtype=torch.float32,
device="cpu",
)
consumer_rewards_batch = torch.zeros(
batch_size, num_iters, num_consumers, dtype=torch.float32, device="cpu"
)
self.consumer_states_batch_gpu_tensor = consumer_states_batch.cuda()
self.consumer_actions_batch_gpu_tensor = consumer_actions_batch.cuda()
self.consumer_actions_index_single_gpu_tensor = consumer_actions_single.cuda()
self.consumer_actions_single_gpu_tensor = torch.zeros(
batch_size,
num_consumers,
consumer_action_dim,
dtype=torch.float32,
device="cpu",
).cuda()
self.consumer_rewards_batch_gpu_tensor = consumer_rewards_batch.cuda()
self.consumer_aux_batch_gpu_tensor = consumer_aux_batch.cuda()
firm_states_batch = torch.zeros(
batch_size,
num_iters,
num_firms,
firm_state_dim,
dtype=torch.float32,
device="cpu",
)
firm_actions_batch = torch.zeros(
batch_size, num_iters, num_firms, dtype=torch.int32, device="cpu"
)
firm_rewards_batch = torch.zeros(
batch_size, num_iters, num_firms, dtype=torch.float32, device="cpu"
)
firm_aux_batch = torch.zeros(
batch_size, num_iters, num_firms, dtype=torch.float32, device="cpu"
)
self.firm_states_batch = firm_states_batch.cuda()
self.firm_actions_batch = firm_actions_batch.cuda()
self.firm_rewards_batch = firm_rewards_batch.cuda()
self.firm_aux_batch = firm_aux_batch.cuda()
government_states_batch = torch.zeros(
batch_size,
num_iters,
num_governments,
government_state_dim,
dtype=torch.float32,
device="cpu",
)
government_actions_batch = torch.zeros(
batch_size, num_iters, num_governments, dtype=torch.int32, device="cpu"
)
government_rewards_batch = torch.zeros(
batch_size, num_iters, num_governments, dtype=torch.float32, device="cpu"
)
self.government_states_batch = government_states_batch.cuda()
self.government_actions_batch = government_actions_batch.cuda()
self.government_rewards_batch = government_rewards_batch.cuda()
def __init_cuda_functions(self):
__td = self.train_dict
__ad = self.agents_dict
__wd = self.world_dict
if self.freeze_firms is not None:
countfirmreward = 0
else:
countfirmreward = self.agents_dict["government_counts_firm_reward"]
code, compiler_options = get_cuda_code(
Path("cuda") / Path("firm_rbc.cu"),
batchsize=__td["batch_size"],
numconsumers=__ad["num_consumers"],
numfirms=__ad["num_firms"],
numgovernments=__ad["num_governments"],
maxtime=__wd["maxtime"],
# numactionsconsumer=__ad["consumer_num_actions"],
numactionsconsumer=__ad["consumer_num_work_actions"],
numactionsfirm=__ad["firm_num_actions"],
numactionsgovernment=__ad["government_num_actions"],
interestrate=__wd["interest_rate"],
crra_param=__wd["crra_param"],
shouldboostfirmreward=int(__td["should_boost_firm_reward"]),
boostfirmrewardfactor=__td["boost_firm_reward_factor"],
countfirmreward=countfirmreward,
importerprice=__wd["importer_price"],
importerquantity=__wd["importer_quantity"],
laborfloor=__wd.get("labor_floor", 0.0),
useimporter=__wd["use_importer"],
)
mod = SourceModule(code, options=compiler_options, no_extern_c=True)
self.mod = mod
# --------------------------------------------------------------------
# Define Consumer actions -- maanged in Pytorch
# --------------------------------------------------------------------
self.consumption_action_tensor = torch.tensor(
__ad["consumer_consumption_actions_array"].astype(_NP_DTYPE)
).cuda()
self.work_action_tensor = torch.tensor(
__ad["consumer_work_actions_array"].astype(_NP_DTYPE)
).cuda()
# --------------------------------------------------------------------
# Define Firm actions -- maanged in CUDA
# --------------------------------------------------------------------
firm_index_to_action_gpu, _ = mod.get_global("kFirmIndexToAction")
cuda_driver.memcpy_htod(
firm_index_to_action_gpu,
__ad["firm_actions_array"].astype(_NP_DTYPE),
)
# --------------------------------------------------------------------
# Define Govt actions -- maanged in CUDA
# --------------------------------------------------------------------
government_index_to_action_gpu, _ = mod.get_global("kGovernmentIndexToAction")
cuda_driver.memcpy_htod(
government_index_to_action_gpu,
__ad["government_actions_array"].astype(_NP_DTYPE),
)
# --------------------------------------------------------------------
# Get handles to CUDA methods
# --------------------------------------------------------------------
self.cuda_init_random = mod.get_function("CudaInitKernel")
self.cuda_reset_env = mod.get_function("CudaResetEnv")
self.cuda_sample_actions = mod.get_function(
"CudaSampleFirmAndGovernmentActions"
)
self.cuda_step = mod.get_function("CudaStep")
self.cuda_free_mem = mod.get_function("CudaFreeRand")
def _update_consumer_actions_inplace(self):
# call after consumer_actions_single is updated
__ad = self.agents_dict
# Add asserts when ``loading'' arrays
# assert consumption_action_array.shape == (1, 1, 1)
# assert len(consumption_action_array.shape) == 3
num_firms = __ad["num_firms"]
idx_hours = num_firms
idx_which_firm = num_firms + 1
for i in range(num_firms):
consumption_actions_at_firm_i = (
self.consumer_actions_index_single_gpu_tensor[..., i].to(torch.long)
)
self.consumer_actions_single_gpu_tensor[
..., i
] = self.consumption_action_tensor[
consumption_actions_at_firm_i, :
].squeeze(
dim=-1
)
consumer_hours_worked = self.consumer_actions_index_single_gpu_tensor[
..., idx_hours
].to(torch.long)
self.consumer_actions_single_gpu_tensor[
..., num_firms
] = self.work_action_tensor[consumer_hours_worked, :].squeeze(dim=-1)
self.consumer_actions_single_gpu_tensor[
..., num_firms + 1
] = self.consumer_actions_index_single_gpu_tensor[..., idx_which_firm]
def sample_consumer_actions_and_store(self, consumer_probs_list):
# Every consumer has A action heads, output as a list of tensors.
# Sample from each of these lists and store the results.
with torch.no_grad():
for i, probs in enumerate(consumer_probs_list):
dist = Categorical(probs)
samples = dist.sample()
self.consumer_actions_index_single_gpu_tensor[..., i] = samples
self._update_consumer_actions_inplace()
def consumers_will_train_this_episode(self, epi):
__ad = self.agents_dict
if "training_schedule_mod" in self.agents_dict:
mod_val = epi % __ad["training_schedule_mod"]
return mod_val <= __ad["consumer_mod_threshold"]
if "consumer_training_list" in self.agents_dict:
return interval_list_contains(__ad["consumer_training_list"], epi)
if "train_consumers_every" in self.agents_dict:
mod_val = epi % __ad["train_consumers_every"]
else:
mod_val = 0
return epi >= self.agents_dict.get("consumer_training_start", 0) and (
mod_val == 0
)
def firms_will_train_this_episode(self, epi):
__ad = self.agents_dict
if "training_schedule_mod" in self.agents_dict:
mod_val = epi % __ad["training_schedule_mod"]
return mod_val > __ad["consumer_mod_threshold"]
if "firm_training_list" in self.agents_dict:
return interval_list_contains(__ad["firm_training_list"], epi) and (
self.freeze_firms is None
)
if "train_firms_every" in self.agents_dict:
mod_val = epi % __ad["train_firms_every"]
else:
mod_val = 0
return (
(epi >= self.agents_dict.get("firm_training_start", 0))
and (self.freeze_firms is None)
and (mod_val == 0)
)
def governments_will_train_this_episode(self, epi):
__ad = self.agents_dict
if "government_training_list" in self.agents_dict:
return interval_list_contains(__ad["government_training_list"], epi) and (
self.freeze_govt is None
)
if "train_government_every" in self.agents_dict:
mod_val = epi % self.agents_dict["train_government_every"]
else:
mod_val = 0
return (
(epi >= self.agents_dict.get("government_training_start", 0))
and (self.freeze_govt is None)
and (mod_val == 0)
)
def bestresponse_train(
self, train_type, num_episodes, rollout_path, ep_str="latest", checkpoint=100
):
# train one single type only
# load all policies from state dict
# reset all the environment stuff
__td = self.train_dict
__ad = self.agents_dict
num_iters = int(self.world_dict["maxtime"])
num_consumers = __ad["num_consumers"]
num_firms = __ad["num_firms"]
num_governments = __ad["num_governments"]
num_agents = num_consumers + num_firms + num_governments
block = (num_agents, 1, 1)
grid = (__td["batch_size"], 1)
seed_everything(__td["seed"])
self.cuda_init_random(np.int32(__td["seed"]), block=block, grid=grid)
# --------------------------------------------
# Define Consumer policy + optimizers
# --------------------------------------------
lr = __td["lr"]
consumer_expanded_size = size_after_digit_expansion(
__ad["consumer_state_dim"],
__ad["consumer_digit_dims"],
__td["digit_representation_size"],
)
consumer_policy = IndependentPolicyNet(
consumer_expanded_size,
[__ad["consumer_num_consume_actions"]] * num_firms
+ [
__ad["consumer_num_work_actions"],
__ad["consumer_num_whichfirm_actions"],
],
norm_consts=(
torch.zeros(consumer_expanded_size).cuda(), # don't center for now
consumer_state_scaling_factors(self.cfg_dict),
),
).to("cuda")
consumer_policy.load_state_dict(
torch.load(
rollout_path
/ Path("saved_models")
/ Path(f"consumer_policy_{ep_str}.pt")
)
)
consumer_optim = torch.optim.Adam(consumer_policy.parameters(), lr=lr)
firm_expanded_size = size_after_digit_expansion(
__ad["firm_state_dim"],
__ad["firm_digit_dims"],
__td["digit_representation_size"],
)
firm_policy = PolicyNet(
firm_expanded_size,
__ad["firm_num_actions"],
norm_consts=(
torch.zeros(firm_expanded_size).cuda(),
firm_state_scaling_factors(self.cfg_dict),
),
).to("cuda")
firm_policy.load_state_dict(
torch.load(
rollout_path / Path("saved_models") / Path(f"firm_policy_{ep_str}.pt")
)
)
firm_optim = torch.optim.Adam(firm_policy.parameters(), lr=lr)
government_expanded_size = size_after_digit_expansion(
__ad["government_state_dim"],
__ad["government_digit_dims"],
__td["digit_representation_size"],
)
government_policy = PolicyNet(
government_expanded_size,
__ad["government_num_actions"],
norm_consts=(
torch.zeros(government_expanded_size).cuda(),
govt_state_scaling_factors(self.cfg_dict),
),
).to("cuda")
government_policy.load_state_dict(
torch.load(
rollout_path
/ Path("saved_models")
/ Path(f"government_policy_{ep_str}.pt")
)
)
government_optim = torch.optim.Adam(government_policy.parameters(), lr=lr)
rewards = []
agent_type_arrays = {
"consumer": (
self.consumer_states_batch_gpu_tensor,
self.consumer_actions_batch_gpu_tensor,
self.consumer_rewards_batch_gpu_tensor,
),
"firm": (
self.firm_states_batch,
self.firm_actions_batch,
self.firm_rewards_batch,
),
"government": (
self.government_states_batch,
self.government_actions_batch,
self.government_rewards_batch,
),
}
agent_action_arrays = {
"consumer": __ad["consumer_work_actions_array"],
"firm": __ad["firm_actions_array"],
"government": __ad["government_actions_array"],
}
agent_aux_arrays = {
"consumer": (self.consumer_aux_batch_gpu_tensor),
"firm": (self.firm_aux_batch),
"government": None,
}
pbar = tqdm(range(num_episodes))
for epi in pbar:
annealed_entropy_coef = 0.1 # later, do some computation to anneal this
self.cuda_reset_env(
CudaTensorHolder(self.consumer_states_gpu_tensor),
CudaTensorHolder(self.firm_states_gpu_tensor),
CudaTensorHolder(self.government_states_gpu_tensor),
self.consumer_states_checkpoint_gpu_pycuda,
self.firm_states_checkpoint_gpu_pycuda,
self.government_states_checkpoint_gpu_pycuda,
np.float32(1.0),
block=block,
grid=grid,
)
for _iter in range(num_iters):
# ------------------------
# Run policy and get probs
# ------------------------
with torch.no_grad():
# here, we must perform digit scaling
consumer_probs_list, _ = consumer_policy(
expand_to_digit_form(
self.consumer_states_gpu_tensor,
__ad["consumer_digit_dims"],
__td["digit_representation_size"],
)
)
firm_probs, _ = firm_policy(
expand_to_digit_form(
self.firm_states_gpu_tensor,
__ad["firm_digit_dims"],
__td["digit_representation_size"],
),
actions_mask=None,
)
government_probs, _ = government_policy(
expand_to_digit_form(
self.government_states_gpu_tensor,
__ad["government_digit_dims"],
__td["digit_representation_size"],
),
actions_mask=None,
)
# ------------------------
# Get action samples
# ------------------------
# Sample consumer actions using PyTorch here on GPU!
self.sample_consumer_actions_and_store(consumer_probs_list)
# Sample firms + govt actions using PyCUDA on GPU!
self.cuda_sample_actions(
CudaTensorHolder(firm_probs),
self.firm_action_indices_gpu_pycuda,
self.firm_actions_gpu_pycuda,
CudaTensorHolder(government_probs),
self.government_action_indices_gpu_pycuda,
self.government_actions_gpu_pycuda,
block=block,
grid=grid,
)
# ------------------------
# Step on GPU
# ------------------------
self.cuda_step(
CudaTensorHolder(
# size: batches x n_consumers x consumer_state float
self.consumer_states_gpu_tensor
),
CudaTensorHolder(
# size: batches x n_consumers x consumer_action_dim float
self.consumer_actions_single_gpu_tensor
),
# size: batches x n_consumers x 1 float
self.consumer_rewards_gpu_pycuda,
CudaTensorHolder(
self.consumer_states_batch_gpu_tensor
), # size: batches x episode x n_consumers x consumer_state float
CudaTensorHolder(self.consumer_rewards_batch_gpu_tensor),
CudaTensorHolder(self.firm_states_gpu_tensor),
self.firm_action_indices_gpu_pycuda,
self.firm_actions_gpu_pycuda,
self.firm_rewards_gpu_pycuda,
CudaTensorHolder(self.firm_states_batch),
CudaTensorHolder(self.firm_actions_batch),
CudaTensorHolder(self.firm_rewards_batch),
CudaTensorHolder(self.government_states_gpu_tensor),
self.government_action_indices_gpu_pycuda,
self.government_actions_gpu_pycuda,
self.government_rewards_gpu_pycuda,
CudaTensorHolder(self.government_states_batch),
CudaTensorHolder(self.government_actions_batch),
CudaTensorHolder(self.government_rewards_batch),
CudaTensorHolder(self.consumer_aux_batch_gpu_tensor),
CudaTensorHolder(self.firm_aux_batch),
np.int32(_iter),
block=block,
grid=grid,
)
self.consumer_actions_batch_gpu_tensor[
:, _iter, :, :
] = self.consumer_actions_index_single_gpu_tensor
update_government_rewards(
self.government_rewards_batch,
self.consumer_rewards_batch_gpu_tensor,
self.firm_rewards_batch,
self.cfg_dict,
)
if train_type == "consumer":
consumer_reward_scale = self.agents_dict.get(
"consumer_reward_scale", 1.0
)
consumer_policy_gradient_step(
consumer_policy,
expand_to_digit_form(
self.consumer_states_batch_gpu_tensor,
__ad["consumer_digit_dims"],
__td["digit_representation_size"],
),
self.consumer_actions_batch_gpu_tensor,
self.consumer_rewards_batch_gpu_tensor,
consumer_optim,
__td["gamma"],
entropy_val=annealed_entropy_coef * __td["entropy"],
value_loss_weight=__td["value_loss_weight"],
reward_scale=consumer_reward_scale,
clip_grad_norm=self.train_dict.get("clip_grad_norm", None),
)
rewards.append(self.consumer_rewards_batch_gpu_tensor.mean().item())
elif train_type == "firm":
firm_reward_scale = self.agents_dict.get("firm_reward_scale", 1.0)
policy_gradient_step(
firm_policy,
expand_to_digit_form(
self.firm_states_batch,
__ad["firm_digit_dims"],
__td["digit_representation_size"],
),
self.firm_actions_batch,
self.firm_rewards_batch,
firm_optim,
__td["gamma"],
entropy_val=annealed_entropy_coef * __td["entropy"],
value_loss_weight=__td["value_loss_weight"],
actions_mask=None,
reward_scale=firm_reward_scale,
clip_grad_norm=self.train_dict.get("clip_grad_norm", None),
)
rewards.append(self.firm_rewards_batch.mean().item())
elif train_type == "government":
government_reward_scale = self.agents_dict.get(
"government_reward_scale", 1.0
)
policy_gradient_step(
government_policy,
expand_to_digit_form(
self.government_states_batch,
__ad["government_digit_dims"],
__td["digit_representation_size"],
),
self.government_actions_batch,
self.government_rewards_batch,
government_optim,
__td["gamma"],
entropy_val=annealed_entropy_coef * __td["entropy"],
value_loss_weight=__td["value_loss_weight"],
actions_mask=None,
reward_scale=government_reward_scale,
clip_grad_norm=self.train_dict.get("clip_grad_norm", None),
)
rewards.append(self.government_rewards_batch.mean().item())
pbar.set_postfix({"reward": rewards[-1]})
if (epi % checkpoint) == 0:
# save policy every checkpoint steps
save_policy_parameters(
str(Path(self.save_dir) / f"br{train_type}"),
epi,
consumer_policy,
firm_policy,
government_policy,
self.freeze_firms,
self.freeze_govt,
)
save_dense_log(
str(Path(self.save_dir) / f"br{train_type}"),
epi,
agent_type_arrays,
agent_action_arrays,
agent_aux_arrays,
)
print(
f"{train_type}: starting reward {rewards[0]}, "
f"ending reward {rewards[-1]}, "
f"improvement in reward after {num_episodes}: {rewards[-1] - rewards[0]}"
)
self.cuda_free_mem(block=block, grid=grid)
return rewards
def train(self):
__td = self.train_dict
__ad = self.agents_dict
# Create logdir
os.makedirs(__td["save_dir"], exist_ok=True)
# Constants
num_iters = int(self.world_dict["maxtime"])
num_consumers = __ad["num_consumers"]
num_firms = __ad["num_firms"]
num_governments = __ad["num_governments"]
num_agents = num_consumers + num_firms + num_governments
# CUDA params: defines data shape on the GPU
block = (num_agents, 1, 1)
grid = (__td["batch_size"], 1)
# Set seeds
seed_everything(__td["seed"])
self.cuda_init_random(np.int32(__td["seed"]), block=block, grid=grid)
# --------------------------------------------
# Define Consumer policy + optimizers
# --------------------------------------------
lr = __td["lr"]
consumer_expanded_size = size_after_digit_expansion(
__ad["consumer_state_dim"],
__ad["consumer_digit_dims"],
__td["digit_representation_size"],
)
consumer_policy = IndependentPolicyNet(
consumer_expanded_size,
[__ad["consumer_num_consume_actions"]] * num_firms
+ [
__ad["consumer_num_work_actions"],
__ad["consumer_num_whichfirm_actions"],
],
norm_consts=(
torch.zeros(consumer_expanded_size).cuda(), # don't center for now
consumer_state_scaling_factors(self.cfg_dict),
),
).to("cuda")
consumer_optim = torch.optim.Adam(
consumer_policy.parameters(),
lr=lr * self.agents_dict.get("consumer_lr_multiple", 1.0),
)
# --------------------------------------------
# Define Firm policy + optimizers
# --------------------------------------------
firm_expanded_size = size_after_digit_expansion(
__ad["firm_state_dim"],
__ad["firm_digit_dims"],
__td["digit_representation_size"],
)
if self.freeze_firms is not None:
firm_policy = DeterministicPolicy(
firm_expanded_size,
__ad["firm_num_actions"],
self.freeze_firms,
)
firm_optim = NoOpOptimizer()
else:
firm_policy = PolicyNet(
firm_expanded_size,
__ad["firm_num_actions"],
norm_consts=(
torch.zeros(firm_expanded_size).cuda(),
firm_state_scaling_factors(self.cfg_dict),
),
).to("cuda")
firm_optim = torch.optim.Adam(
firm_policy.parameters(),
lr=lr * self.agents_dict.get("firm_lr_multiple", 1.0),
)
# --------------------------------------------
# Define Government policy + optimizers
# --------------------------------------------
government_expanded_size = size_after_digit_expansion(
__ad["government_state_dim"],
__ad["government_digit_dims"],
__td["digit_representation_size"],
)
if self.freeze_govt is not None:
government_policy = DeterministicPolicy(
government_expanded_size,
__ad["government_num_actions"],
self.freeze_govt,
)
government_optim = NoOpOptimizer()
else:
government_policy = PolicyNet(
government_expanded_size,
__ad["government_num_actions"],
norm_consts=(
torch.zeros(government_expanded_size).cuda(),
govt_state_scaling_factors(self.cfg_dict),
),
).to("cuda")
government_optim = torch.optim.Adam(
government_policy.parameters(),
lr=lr * self.agents_dict.get("government_lr_multiple", 1.0),
)
# --------------------------------------------
# Logging
# --------------------------------------------
# For looking up GPU tensors
# --------------------------------------------
agent_type_arrays = {
"consumer": (
self.consumer_states_batch_gpu_tensor,
self.consumer_actions_batch_gpu_tensor,
self.consumer_rewards_batch_gpu_tensor,
),
"firm": (
self.firm_states_batch,
self.firm_actions_batch,
self.firm_rewards_batch,
),
"government": (
self.government_states_batch,
self.government_actions_batch,
self.government_rewards_batch,
),
}
agent_action_arrays = {
"consumer": __ad["consumer_work_actions_array"],
"firm": __ad["firm_actions_array"],
"government": __ad["government_actions_array"],
}
agent_aux_arrays = {
"consumer": (self.consumer_aux_batch_gpu_tensor),
"firm": (self.firm_aux_batch),
"government": None,
}
# --------------------------------------------
# Training policy XYZ starts at which step?
# --------------------------------------------
firm_no_ponzi_coef = self.agents_dict.get("firm_noponzi_start", 0.0)
consumer_no_ponzi_coef = self.agents_dict.get("consumer_noponzi_start", 0.0)
lagr_num_steps = self.train_dict.get("lagr_num_steps", 1)
firm_training_start = self.agents_dict.get("firm_training_start", 0)
consumer_training_start = self.agents_dict.get("consumer_training_start", 0)
government_training_start = self.agents_dict.get("government_training_start", 0)
firm_action_start = self.agents_dict.get("firm_begin_anneal_action", 0)
government_action_start = self.agents_dict.get(
"government_begin_anneal_action", 0
)
# --------------------------------------------
# Training loop
# --------------------------------------------
if self.train_dict.get("infinite_episodes", False):
epi_iterator = itertools.count(0, 1)
else:
epi_iterator = range(__td["num_episodes"])
final_epi = None
for epi in tqdm(epi_iterator):
firm_actions_mask = firm_action_mask(
self.cfg_dict,
max(epi - firm_action_start, 0),
)
government_actions_mask = government_action_mask(
self.cfg_dict,
max(epi - government_action_start, 0),
)
theta_coef = compute_theta_coef(self.cfg_dict, epi)
# Reset environment for all agents
self.cuda_reset_env(
CudaTensorHolder(self.consumer_states_gpu_tensor),
CudaTensorHolder(self.firm_states_gpu_tensor),
CudaTensorHolder(self.government_states_gpu_tensor),
self.consumer_states_checkpoint_gpu_pycuda,
self.firm_states_checkpoint_gpu_pycuda,
self.government_states_checkpoint_gpu_pycuda,
theta_coef,
block=block,
grid=grid,
)
# Learning Loop
for _iter in range(num_iters):
# ------------------------
# Run policy and get probs
# ------------------------
with torch.no_grad():
# here, we must perform digit scaling
consumer_probs_list, _ = consumer_policy(
expand_to_digit_form(
self.consumer_states_gpu_tensor,
__ad["consumer_digit_dims"],
__td["digit_representation_size"],
)
)
firm_probs, _ = firm_policy(
expand_to_digit_form(
self.firm_states_gpu_tensor,
__ad["firm_digit_dims"],
__td["digit_representation_size"],
),
actions_mask=firm_actions_mask,
)
government_probs, _ = government_policy(
expand_to_digit_form(
self.government_states_gpu_tensor,
__ad["government_digit_dims"],
__td["digit_representation_size"],
),
actions_mask=government_actions_mask,
)
# ------------------------
# Get action samples
# ------------------------
# Sample consumer actions using PyTorch here on GPU!
self.sample_consumer_actions_and_store(consumer_probs_list)
# Sample firms + govt actions using PyCUDA on GPU!
self.cuda_sample_actions(
CudaTensorHolder(firm_probs),
self.firm_action_indices_gpu_pycuda,
self.firm_actions_gpu_pycuda,
CudaTensorHolder(government_probs),
self.government_action_indices_gpu_pycuda,
self.government_actions_gpu_pycuda,
block=block,
grid=grid,
)
# ------------------------
# Step on GPU
# ------------------------
self.cuda_step(
CudaTensorHolder(
# size: batches x n_consumers x consumer_state float
self.consumer_states_gpu_tensor
),
CudaTensorHolder(
# size: batches x n_consumers x consumer_action_dim float
self.consumer_actions_single_gpu_tensor
),
# size: batches x n_consumers x 1 float
self.consumer_rewards_gpu_pycuda,
CudaTensorHolder(
# size: batches x episode x n_consumers x consumer_state float
self.consumer_states_batch_gpu_tensor
),
CudaTensorHolder(self.consumer_rewards_batch_gpu_tensor),
CudaTensorHolder(self.firm_states_gpu_tensor),
self.firm_action_indices_gpu_pycuda,
self.firm_actions_gpu_pycuda,
self.firm_rewards_gpu_pycuda,
CudaTensorHolder(self.firm_states_batch),
CudaTensorHolder(self.firm_actions_batch),
CudaTensorHolder(self.firm_rewards_batch),
CudaTensorHolder(self.government_states_gpu_tensor),
self.government_action_indices_gpu_pycuda,
self.government_actions_gpu_pycuda,
self.government_rewards_gpu_pycuda,
CudaTensorHolder(self.government_states_batch),
CudaTensorHolder(self.government_actions_batch),
CudaTensorHolder(self.government_rewards_batch),
CudaTensorHolder(self.consumer_aux_batch_gpu_tensor),
CudaTensorHolder(self.firm_aux_batch),
np.int32(_iter),
block=block,
grid=grid,
)
self.consumer_actions_batch_gpu_tensor[
:, _iter, :, :
] = self.consumer_actions_index_single_gpu_tensor
# ------------------------
# Add penalty for no-Ponzi
# ------------------------
add_penalty_for_no_ponzi(
self.firm_states_gpu_tensor,
self.firm_rewards_batch,
__ad["global_state_dim"],
penalty_coef=firm_no_ponzi_coef,
)
add_penalty_for_no_ponzi(
self.consumer_states_gpu_tensor,
self.consumer_rewards_batch_gpu_tensor,
__ad["global_state_dim"],
penalty_coef=consumer_no_ponzi_coef,
penalty_scale=__ad["consumer_penalty_scale"],
)
# add government rewards -- sum of consumer rewards
update_government_rewards(
self.government_rewards_batch,
self.consumer_rewards_batch_gpu_tensor,
self.firm_rewards_batch,
self.cfg_dict,
)
# Save dense logs
# ------------------------
if (epi % __td["save_model_every"]) == 0:
save_policy_parameters(
self.save_dir,
epi,
consumer_policy,
firm_policy,
government_policy,
self.freeze_firms,
self.freeze_govt,
)
if (epi % self.save_dense_every) == 0:
save_dense_log(
self.save_dir,
epi,
agent_type_arrays,
agent_action_arrays,
agent_aux_arrays,
)
# --------------------------------
# Curriculum: Train Consumers
# --------------------------------
if self.consumers_will_train_this_episode(epi):
consumer_entropy_coef = anneal_entropy_coef(
self.agents_dict.get("consumer_anneal_entropy", None),
epi - consumer_training_start,
)
consumer_reward_scale = self.agents_dict.get(
"consumer_reward_scale", 1.0
)
if __td["use_ppo"]:
consumer_ppo_step(
consumer_policy,
expand_to_digit_form(
self.consumer_states_batch_gpu_tensor,
__ad["consumer_digit_dims"],
__td["digit_representation_size"],
),
self.consumer_actions_batch_gpu_tensor,
self.consumer_rewards_batch_gpu_tensor,
consumer_optim,
__td["gamma"],
entropy_val=consumer_entropy_coef * __td["entropy"],
value_loss_weight=__td["value_loss_weight"],
reward_scale=consumer_reward_scale,
ppo_num_updates=__td["ppo_num_updates"],
clip_param=__td["ppo_clip_param"],
clip_grad_norm=self.train_dict.get("clip_grad_norm", None),
)
else:
consumer_policy_gradient_step(
consumer_policy,
expand_to_digit_form(
self.consumer_states_batch_gpu_tensor,
__ad["consumer_digit_dims"],
__td["digit_representation_size"],
),
self.consumer_actions_batch_gpu_tensor,
self.consumer_rewards_batch_gpu_tensor,
consumer_optim,
__td["gamma"],
entropy_val=consumer_entropy_coef * __td["entropy"],
value_loss_weight=__td["value_loss_weight"],
reward_scale=consumer_reward_scale,
clip_grad_norm=self.train_dict.get("clip_grad_norm", None),
)
if (epi % lagr_num_steps) == 0:
consumer_no_ponzi_coef = update_penalty_coef(
self.consumer_states_gpu_tensor,
__ad["global_state_dim"],
consumer_no_ponzi_coef,
penalty_step_size=__ad["consumer_noponzi_eta"],
penalty_scale=__ad["consumer_penalty_scale"],
)
else:
pass
# --------------------------------
# Curriculum: Train Firms
# --------------------------------
if self.firms_will_train_this_episode(epi):
firm_entropy_coef = anneal_entropy_coef(
self.agents_dict.get("firm_anneal_entropy", None),
epi - firm_training_start,
)
firm_reward_scale = self.agents_dict.get("firm_reward_scale", 1.0)
if __td["use_ppo"]:
ppo_step(
firm_policy,
expand_to_digit_form(
self.firm_states_batch,
__ad["firm_digit_dims"],
__td["digit_representation_size"],
),
self.firm_actions_batch,
self.firm_rewards_batch,
firm_optim,
__td["gamma"],
entropy_val=firm_entropy_coef * __td["entropy"],
value_loss_weight=__td["value_loss_weight"],
actions_mask=firm_actions_mask,
reward_scale=firm_reward_scale,
ppo_num_updates=__td["ppo_num_updates"],
clip_param=__td["ppo_clip_param"],
clip_grad_norm=self.train_dict.get("clip_grad_norm", None),
)
else:
policy_gradient_step(
firm_policy,
expand_to_digit_form(
self.firm_states_batch,
__ad["firm_digit_dims"],
__td["digit_representation_size"],
),
self.firm_actions_batch,
self.firm_rewards_batch,
firm_optim,
__td["gamma"],
entropy_val=firm_entropy_coef * __td["entropy"],
value_loss_weight=__td["value_loss_weight"],
actions_mask=firm_actions_mask,
reward_scale=firm_reward_scale,
clip_grad_norm=self.train_dict.get("clip_grad_norm", None),
)
if (epi % lagr_num_steps) == 0:
firm_no_ponzi_coef = update_penalty_coef(
self.firm_states_gpu_tensor,
__ad["global_state_dim"],
firm_no_ponzi_coef,
penalty_step_size=__ad["firm_noponzi_eta"],
)
else:
pass
# --------------------------------
# Curriculum: Train Governments
# --------------------------------
if self.governments_will_train_this_episode(epi):
government_entropy_coef = anneal_entropy_coef(
self.agents_dict.get("govt_anneal_entropy", None),
epi - government_training_start,
)
government_reward_scale = self.agents_dict.get(
"government_reward_scale", 1.0
)
if __td["use_ppo"]:
ppo_step(
government_policy,
expand_to_digit_form(
self.government_states_batch,
__ad["government_digit_dims"],
__td["digit_representation_size"],
),
self.government_actions_batch,
self.government_rewards_batch,
government_optim,
__td["gamma"],
entropy_val=government_entropy_coef * __td["entropy"],
value_loss_weight=__td["value_loss_weight"],
actions_mask=government_actions_mask,
reward_scale=government_reward_scale,
ppo_num_updates=__td["ppo_num_updates"],
clip_param=__td["ppo_clip_param"],
clip_grad_norm=self.train_dict.get("clip_grad_norm", None),
)
else:
policy_gradient_step(
government_policy,
expand_to_digit_form(
self.government_states_batch,
__ad["government_digit_dims"],
__td["digit_representation_size"],
),
self.government_actions_batch,
self.government_rewards_batch,
government_optim,
__td["gamma"],
entropy_val=government_entropy_coef * __td["entropy"],
value_loss_weight=__td["value_loss_weight"],
actions_mask=government_actions_mask,
reward_scale=government_reward_scale,
clip_grad_norm=self.train_dict.get("clip_grad_norm", None),
)
else:
pass
# Store the value of the final episode
final_epi = epi
# ------------------------------------------------------------------
# Post-Training (may not reach this with an infinite training loop!)
# Save FINAL dense log.
# ------------------------------------------------------------------
save_dense_log(
self.save_dir,
"final",
agent_type_arrays,
agent_action_arrays,
agent_aux_arrays,
)
save_policy_parameters(
self.save_dir,
final_epi,
consumer_policy,
firm_policy,
government_policy,
self.freeze_firms,
self.freeze_govt,
)
# ------------------------------------------------------------------
# Clean up
# ------------------------------------------------------------------
self.cuda_free_mem(block=block, grid=grid)
class CudaTensorHolder(pycuda.driver.PointerHolderBase):
"""
A class that facilitates casting tensors to pointers.
"""
def __init__(self, t):
super().__init__()
self.t = t
self.gpudata = t.data_ptr()
def get_pointer(self):
return self.t.data_ptr()
| 38.724495 | 88 | 0.567527 | 55,896 | 0.747503 | 0 | 0 | 0 | 0 | 0 | 0 | 13,173 | 0.176164 |
52b53fc840acc1bf09eb244812212a0134629fbd | 11,030 | py | Python | tests/test_transforms/test_encoders/test_categorical_transform.py | Pacman1984/etna | 9b3ccb980e576d56858f14aca2e06ce2957b0fa9 | [
"Apache-2.0"
] | 96 | 2021-09-05T06:29:34.000Z | 2021-11-07T15:22:54.000Z | tests/test_transforms/test_encoders/test_categorical_transform.py | Pacman1984/etna | 9b3ccb980e576d56858f14aca2e06ce2957b0fa9 | [
"Apache-2.0"
] | 188 | 2021-09-06T15:59:58.000Z | 2021-11-17T09:34:16.000Z | tests/test_transforms/test_encoders/test_categorical_transform.py | Pacman1984/etna | 9b3ccb980e576d56858f14aca2e06ce2957b0fa9 | [
"Apache-2.0"
] | 8 | 2021-09-06T09:18:35.000Z | 2021-11-11T21:18:39.000Z | import numpy as np
import pandas as pd
import pytest
from etna.datasets import TSDataset
from etna.datasets import generate_ar_df
from etna.datasets import generate_const_df
from etna.datasets import generate_periodic_df
from etna.metrics import R2
from etna.models import LinearPerSegmentModel
from etna.transforms import FilterFeaturesTransform
from etna.transforms.encoders.categorical import LabelEncoderTransform
from etna.transforms.encoders.categorical import OneHotEncoderTransform
@pytest.fixture
def two_df_with_new_values():
d = {
"timestamp": list(pd.date_range(start="2021-01-01", end="2021-01-03"))
+ list(pd.date_range(start="2021-01-01", end="2021-01-03")),
"segment": ["segment_0", "segment_0", "segment_0", "segment_1", "segment_1", "segment_1"],
"regressor_0": [5, 8, 5, 9, 5, 9],
"target": [1, 2, 3, 4, 5, 6],
}
df1 = TSDataset.to_dataset(pd.DataFrame(d))
d = {
"timestamp": list(pd.date_range(start="2021-01-01", end="2021-01-03"))
+ list(pd.date_range(start="2021-01-01", end="2021-01-03")),
"segment": ["segment_0", "segment_0", "segment_0", "segment_1", "segment_1", "segment_1"],
"regressor_0": [5, 8, 9, 5, 0, 0],
"target": [1, 2, 3, 4, 5, 6],
}
df2 = TSDataset.to_dataset(pd.DataFrame(d))
return df1, df2
@pytest.fixture
def df_for_ohe_encoding():
df_to_forecast = generate_ar_df(10, start_time="2021-01-01", n_segments=1)
d = {
"timestamp": pd.date_range(start="2021-01-01", end="2021-01-12"),
"regressor_0": [5, 8, 5, 8, 5, 8, 5, 8, 5, 8, 5, 8],
"regressor_1": [9, 5, 9, 5, 9, 5, 9, 5, 9, 5, 9, 5],
"regressor_2": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
"regressor_3": [1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7],
}
df_regressors = pd.DataFrame(d)
df_regressors["segment"] = "segment_0"
df_to_forecast = TSDataset.to_dataset(df_to_forecast)
df_regressors = TSDataset.to_dataset(df_regressors)
tsdataset = TSDataset(df=df_to_forecast, freq="D", df_exog=df_regressors)
answer_on_regressor_0 = tsdataset.df.copy()["segment_0"]
answer_on_regressor_0["test_0"] = answer_on_regressor_0["regressor_0"].apply(lambda x: float(x == 5))
answer_on_regressor_0["test_1"] = answer_on_regressor_0["regressor_0"].apply(lambda x: float(x == 8))
answer_on_regressor_0["test_0"] = answer_on_regressor_0["test_0"].astype("category")
answer_on_regressor_0["test_1"] = answer_on_regressor_0["test_1"].astype("category")
answer_on_regressor_1 = tsdataset.df.copy()["segment_0"]
answer_on_regressor_1["test_0"] = answer_on_regressor_1["regressor_1"].apply(lambda x: float(x == 5))
answer_on_regressor_1["test_1"] = answer_on_regressor_1["regressor_1"].apply(lambda x: float(x == 9))
answer_on_regressor_1["test_0"] = answer_on_regressor_1["test_0"].astype("category")
answer_on_regressor_1["test_1"] = answer_on_regressor_1["test_1"].astype("category")
answer_on_regressor_2 = tsdataset.df.copy()["segment_0"]
answer_on_regressor_2["test_0"] = answer_on_regressor_2["regressor_2"].apply(lambda x: float(x == 0))
answer_on_regressor_2["test_0"] = answer_on_regressor_2["test_0"].astype("category")
return tsdataset.df, (answer_on_regressor_0, answer_on_regressor_1, answer_on_regressor_2)
@pytest.fixture
def df_for_label_encoding():
df_to_forecast = generate_ar_df(10, start_time="2021-01-01", n_segments=1)
d = {
"timestamp": pd.date_range(start="2021-01-01", end="2021-01-12"),
"regressor_0": [5, 8, 5, 8, 5, 8, 5, 8, 5, 8, 5, 8],
"regressor_1": [9, 5, 9, 5, 9, 5, 9, 5, 9, 5, 9, 5],
"regressor_2": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
"regressor_3": [1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7],
}
df_regressors = pd.DataFrame(d)
df_regressors["segment"] = "segment_0"
df_to_forecast = TSDataset.to_dataset(df_to_forecast)
df_regressors = TSDataset.to_dataset(df_regressors)
tsdataset = TSDataset(df=df_to_forecast, freq="D", df_exog=df_regressors)
answer_on_regressor_0 = tsdataset.df.copy()["segment_0"]
answer_on_regressor_0["test"] = answer_on_regressor_0["regressor_0"].apply(lambda x: float(x == 8))
answer_on_regressor_0["test"] = answer_on_regressor_0["test"].astype("category")
answer_on_regressor_1 = tsdataset.df.copy()["segment_0"]
answer_on_regressor_1["test"] = answer_on_regressor_1["regressor_1"].apply(lambda x: float(x == 9))
answer_on_regressor_1["test"] = answer_on_regressor_1["test"].astype("category")
answer_on_regressor_2 = tsdataset.df.copy()["segment_0"]
answer_on_regressor_2["test"] = answer_on_regressor_2["regressor_2"].apply(lambda x: float(x == 1))
answer_on_regressor_2["test"] = answer_on_regressor_2["test"].astype("category")
return tsdataset.df, (answer_on_regressor_0, answer_on_regressor_1, answer_on_regressor_2)
@pytest.fixture
def df_for_naming():
df_to_forecast = generate_ar_df(10, start_time="2021-01-01", n_segments=1)
df_regressors = generate_periodic_df(12, start_time="2021-01-01", scale=10, period=2, n_segments=2)
df_regressors = df_regressors.pivot(index="timestamp", columns="segment").reset_index()
df_regressors.columns = ["timestamp"] + ["regressor_1", "2"]
df_regressors["segment"] = "segment_0"
df_to_forecast = TSDataset.to_dataset(df_to_forecast)
df_regressors = TSDataset.to_dataset(df_regressors)
tsdataset = TSDataset(df=df_to_forecast, freq="D", df_exog=df_regressors)
return tsdataset.df
def test_label_encoder_simple(df_for_label_encoding):
"""Test that LabelEncoderTransform works correct in a simple cases."""
df, answers = df_for_label_encoding
for i in range(3):
le = LabelEncoderTransform(in_column=f"regressor_{i}", out_column="test")
le.fit(df)
cols = le.transform(df)["segment_0"].columns
assert le.transform(df)["segment_0"][cols].equals(answers[i][cols])
def test_ohe_encoder_simple(df_for_ohe_encoding):
"""Test that OneHotEncoderTransform works correct in a simple case."""
df, answers = df_for_ohe_encoding
for i in range(3):
ohe = OneHotEncoderTransform(in_column=f"regressor_{i}", out_column="test")
ohe.fit(df)
cols = ohe.transform(df)["segment_0"].columns
assert ohe.transform(df)["segment_0"][cols].equals(answers[i][cols])
def test_value_error_label_encoder(df_for_label_encoding):
"""Test LabelEncoderTransform with wrong strategy."""
df, _ = df_for_label_encoding
with pytest.raises(ValueError, match="The strategy"):
le = LabelEncoderTransform(in_column="target", strategy="new_vlue")
le.fit(df)
le.transform(df)
@pytest.mark.parametrize(
"strategy, expected_values",
[
("new_value", np.array([[5, 0, 1, 5, 0, 4], [8, 1, 2, 0, -1, 5], [9, -1, 3, 0, -1, 6]])),
("none", np.array([[5, 0, 1, 5, 0, 4], [8, 1, 2, 0, np.nan, 5], [9, np.nan, 3, 0, np.nan, 6]])),
("mean", np.array([[5, 0, 1, 5, 0, 4], [8, 1, 2, 0, 0, 5], [9, 0.5, 3, 0, 0, 6]])),
],
)
def test_new_value_label_encoder(two_df_with_new_values, strategy, expected_values):
"""Test LabelEncoderTransform correct works with unknown values."""
df1, df2 = two_df_with_new_values
le = LabelEncoderTransform(in_column="regressor_0", strategy=strategy)
le.fit(df1)
np.testing.assert_array_almost_equal(le.transform(df2).values, expected_values)
def test_new_value_ohe_encoder(two_df_with_new_values):
"""Test OneHotEncoderTransform correct works with unknown values."""
expected_values = np.array(
[
[5.0, 1.0, 1.0, 0.0, 5.0, 4.0, 1.0, 0.0],
[8.0, 2.0, 0.0, 1.0, 0.0, 5.0, 0.0, 0.0],
[9.0, 3.0, 0.0, 0.0, 0.0, 6.0, 0.0, 0.0],
]
)
df1, df2 = two_df_with_new_values
ohe = OneHotEncoderTransform(in_column="regressor_0", out_column="targets")
ohe.fit(df1)
np.testing.assert_array_almost_equal(ohe.transform(df2).values, expected_values)
def test_naming_ohe_encoder(two_df_with_new_values):
"""Test OneHotEncoderTransform gives the correct columns."""
df1, df2 = two_df_with_new_values
ohe = OneHotEncoderTransform(in_column="regressor_0", out_column="targets")
ohe.fit(df1)
segments = ["segment_0", "segment_1"]
target = ["target", "targets_0", "targets_1", "regressor_0"]
assert set([(i, j) for i in segments for j in target]) == set(ohe.transform(df2).columns.values)
@pytest.mark.parametrize(
"in_column, prefix",
[("2", ""), ("regressor_1", "regressor_")],
)
def test_naming_ohe_encoder_no_out_column(df_for_naming, in_column, prefix):
"""Test OneHotEncoderTransform gives the correct columns with no out_column."""
df = df_for_naming
ohe = OneHotEncoderTransform(in_column=in_column)
ohe.fit(df)
answer = set(
list(df["segment_0"].columns) + [prefix + str(ohe.__repr__()) + "_0", prefix + str(ohe.__repr__()) + "_1"]
)
assert answer == set(ohe.transform(df)["segment_0"].columns.values)
@pytest.mark.parametrize(
"in_column, prefix",
[("2", ""), ("regressor_1", "regressor_")],
)
def test_naming_label_encoder_no_out_column(df_for_naming, in_column, prefix):
"""Test LabelEncoderTransform gives the correct columns with no out_column."""
df = df_for_naming
le = LabelEncoderTransform(in_column=in_column)
le.fit(df)
answer = set(list(df["segment_0"].columns) + [prefix + str(le.__repr__())])
assert answer == set(le.transform(df)["segment_0"].columns.values)
@pytest.fixture
def ts_for_ohe_sanity():
df_to_forecast = generate_const_df(periods=100, start_time="2021-01-01", scale=0, n_segments=1)
df_regressors = generate_periodic_df(periods=120, start_time="2021-01-01", scale=10, period=4, n_segments=1)
df_regressors = df_regressors.pivot(index="timestamp", columns="segment").reset_index()
df_regressors.columns = ["timestamp"] + [f"regressor_{i}" for i in range(1)]
df_regressors["segment"] = "segment_0"
df_to_forecast = TSDataset.to_dataset(df_to_forecast)
df_regressors = TSDataset.to_dataset(df_regressors)
rng = np.random.default_rng(12345)
def f(x):
return x ** 2 + rng.normal(0, 0.01)
df_to_forecast["segment_0", "target"] = df_regressors["segment_0"]["regressor_0"][:100].apply(f)
ts = TSDataset(df=df_to_forecast, freq="D", df_exog=df_regressors)
return ts
def test_ohe_sanity(ts_for_ohe_sanity):
"""Test for correct work in the full forecasting pipeline."""
horizon = 10
train_ts, test_ts = ts_for_ohe_sanity.train_test_split(test_size=horizon)
ohe = OneHotEncoderTransform(in_column="regressor_0")
filt = FilterFeaturesTransform(exclude=["regressor_0"])
train_ts.fit_transform([ohe, filt])
model = LinearPerSegmentModel()
model.fit(train_ts)
future_ts = train_ts.make_future(horizon)
forecast_ts = model.forecast(future_ts)
r2 = R2()
assert 1 - r2(test_ts, forecast_ts)["segment_0"] < 1e-5
| 44.837398 | 114 | 0.685766 | 0 | 0 | 0 | 0 | 7,718 | 0.699728 | 0 | 0 | 2,294 | 0.207978 |
52b58fbb3b5abd32caee769dd685ed00d95121c9 | 1,467 | py | Python | platform/core/polyaxon/db/migrations/0017_auto_20190104_2032.py | hackerwins/polyaxon | ff56a098283ca872abfbaae6ba8abba479ffa394 | [
"Apache-2.0"
] | null | null | null | platform/core/polyaxon/db/migrations/0017_auto_20190104_2032.py | hackerwins/polyaxon | ff56a098283ca872abfbaae6ba8abba479ffa394 | [
"Apache-2.0"
] | null | null | null | platform/core/polyaxon/db/migrations/0017_auto_20190104_2032.py | hackerwins/polyaxon | ff56a098283ca872abfbaae6ba8abba479ffa394 | [
"Apache-2.0"
] | null | null | null | # Generated by Django 2.1.3 on 2019-01-04 20:32
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import libs.spec_validation
class Migration(migrations.Migration):
dependencies = [
('db', '0016_experimentjob_sequence_and_deleted_flag_tpu_resources'),
]
operations = [
migrations.AddField(
model_name='buildjob',
name='persistence',
field=django.contrib.postgres.fields.jsonb.JSONField(blank=True, help_text='The persistence definition.', null=True, validators=[libs.spec_validation.validate_persistence_config]),
),
migrations.AddField(
model_name='project',
name='persistence',
field=django.contrib.postgres.fields.jsonb.JSONField(blank=True, help_text='The persistence definition.', null=True, validators=[libs.spec_validation.validate_persistence_config]),
),
migrations.AlterField(
model_name='nodegpu',
name='memory',
field=models.BigIntegerField(blank=True, default=0, null=True),
),
migrations.AlterField(
model_name='nodegpu',
name='name',
field=models.CharField(blank=True, max_length=256, null=True),
),
migrations.AlterField(
model_name='nodegpu',
name='serial',
field=models.CharField(blank=True, max_length=256, null=True),
),
]
| 35.780488 | 192 | 0.641445 | 1,302 | 0.887526 | 0 | 0 | 0 | 0 | 0 | 0 | 263 | 0.179277 |
52b64d6a3cc20fe5b367848db8ee78806a620b35 | 1,591 | py | Python | validate.py | AVturbine/mapmaker | c70d748c340158a9be08df088e21cc70f0b84753 | [
"MIT"
] | 3 | 2018-01-14T21:24:11.000Z | 2018-07-26T14:11:01.000Z | validate.py | AVturbine/mapmaker | c70d748c340158a9be08df088e21cc70f0b84753 | [
"MIT"
] | 3 | 2018-01-14T02:12:12.000Z | 2018-01-25T16:02:55.000Z | validate.py | AVturbine/mapmaker | c70d748c340158a9be08df088e21cc70f0b84753 | [
"MIT"
] | 3 | 2018-01-13T04:30:27.000Z | 2018-01-19T00:10:16.000Z | MAP_HEIGHT_MIN = 20
MAP_HEIGHT_MAX = 50
MAP_WIDTH_MIN = 20
MAP_WIDTH_MAX = 50
MAP_KARBONITE_MIN = 0
MAP_KARBONITE_MAX = 50
ASTEROID_ROUND_MIN = 10
ASTEROID_ROUND_MAX = 20
ASTEROID_KARB_MIN = 20
ASTEROID_KARB_MAX = 100
ORBIT_FLIGHT_MIN = 50
ORBIT_FLIGHT_MAX = 200
ROUND_LIMIT = 1000
def validate_map_dims(h, w):
return (MAP_HEIGHT_MAX >= h >= MAP_HEIGHT_MIN) and (MAP_WIDTH_MAX >= w >= MAP_WIDTH_MIN)
def validate_num_bots(bot_list):
return 0 < len(bot_list) <= 6 and (len(bot_list) % 2 == 0)
def validate_asteroid_pattern(asteroid_list):
rounds = [i[0] for i in asteroid_list]
karb = [i[1] for i in asteroid_list]
for i in range(len(rounds)):
if rounds[i] < 1 or rounds[i] > ROUND_LIMIT:
print("fail round limit check")
return False
if karb[i] < ASTEROID_KARB_MIN or karb[i] > ASTEROID_KARB_MAX:
print("fail karb limit check")
return False
rounds.sort()
if rounds[0] > ASTEROID_ROUND_MAX:
print("fail minimum round")
return False
if ROUND_LIMIT - rounds[-1] > ASTEROID_ROUND_MAX:
print("fail maximum round")
return False
for i in range(len(rounds) - 1):
diff = rounds[i+1] - rounds[i]
if diff < ASTEROID_ROUND_MIN or diff > ASTEROID_ROUND_MAX:
print("fail diff {}".format(diff))
return False
return True
def validate_orbital_pattern(amplitude, period, center):
if center - amplitude < ORBIT_FLIGHT_MIN:
return False
if center + amplitude > ORBIT_FLIGHT_MAX:
return False
return True | 31.196078 | 92 | 0.664362 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 101 | 0.063482 |
52b68564244ef7d53bf8d8e23a3a806ccdf7449e | 544 | py | Python | webserver/testserver/main/urls.py | frankovacevich/aleph | 9b01dcabf3c074e8617e50fffd35c9ee1960eab6 | [
"MIT"
] | null | null | null | webserver/testserver/main/urls.py | frankovacevich/aleph | 9b01dcabf3c074e8617e50fffd35c9ee1960eab6 | [
"MIT"
] | null | null | null | webserver/testserver/main/urls.py | frankovacevich/aleph | 9b01dcabf3c074e8617e50fffd35c9ee1960eab6 | [
"MIT"
] | null | null | null | from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('home', views.home, name='home'),
path('login', views.ulogin, name='login'),
path('logout', views.ulogout, name='logout'),
path('password_change', views.password_change, name='password_change'),
# path('users', views.users, name='users'),
path('explorer', views.explorer, name='explorer'),
# path('reports/base_report', views.resources, name='reports'),
# path('docs', views.docs, name='docs'),
] | 30.222222 | 75 | 0.650735 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 251 | 0.461397 |
52b72cd9290f8164c698fae5869224cfa6d4ff36 | 8,603 | py | Python | exporter.py | mrDoctorWho/VK-Exporter | ecdcc818b2b375b2a42f0b106706c62cf833e1c4 | [
"MIT"
] | 9 | 2015-01-16T09:24:47.000Z | 2021-04-29T15:41:56.000Z | exporter.py | mrDoctorWho/VK-Exporter | ecdcc818b2b375b2a42f0b106706c62cf833e1c4 | [
"MIT"
] | 5 | 2018-04-02T05:50:20.000Z | 2018-06-05T23:40:35.000Z | exporter.py | mrDoctorWho/VK-Exporter | ecdcc818b2b375b2a42f0b106706c62cf833e1c4 | [
"MIT"
] | 1 | 2019-10-23T05:29:20.000Z | 2019-10-23T05:29:20.000Z | #!/usr/bin/env python2
# coding: utf-8
# based on the vk4xmpp gateway, v2.25
# © simpleApps, 2013 — 2014.
# Program published under MIT license.
import gc
import json
import logging
import os
import re
import signal
import sys
import threading
import time
import urllib
core = getattr(sys.modules["__main__"], "__file__", None)
if core:
core = os.path.abspath(core)
root = os.path.dirname(core)
if root:
os.chdir(root)
sys.path.insert(0, "library")
reload(sys).setdefaultencoding("utf-8")
from datetime import datetime
from webtools import *
from writer import *
from stext import *
from stext import _
setVars("ru", root)
Semaphore = threading.Semaphore()
LOG_LEVEL = logging.DEBUG
EXTENSIONS = []
MAXIMUM_FORWARD_DEPTH = 100
pidFile = "pidFile.txt"
logFile = "vk4xmpp.log"
crashDir = "crash"
PhotoSize = "photo_100"
logger = logging.getLogger("vk4xmpp")
logger.setLevel(LOG_LEVEL)
loggerHandler = logging.FileHandler(logFile)
formatter = logging.Formatter("%(asctime)s:%(levelname)s:%(name)s %(message)s",
"[%d.%m.%Y %H:%M:%S]")
loggerHandler.setFormatter(formatter)
logger.addHandler(loggerHandler)
import vkapi as api
## Escaping xmpp non-allowed chars
badChars = [x for x in xrange(32) if x not in (9, 10, 13)] + [57003, 65535]
escape = re.compile("|".join(unichr(x) for x in badChars), re.IGNORECASE | re.UNICODE | re.DOTALL).sub
sortMsg = lambda msgOne, msgTwo: msgOne.get("mid", 0) - msgTwo.get("mid", 0)
require = lambda name: os.path.exists("extensions/%s.py" % name)
def registerHandler(type, handler):
EXTENSIONS.append(handler)
def loadExtensions(dir):
"""
Read and exec files located in dir
"""
for file in os.listdir(dir):
if not file.startswith("."):
execfile("%s/%s" % (dir, file), globals())
def execute(handler, list=()):
try:
result = handler(*list)
except SystemExit:
result = 1
except Exception:
result = -1
crashLog(handler.func_name)
return result
def apply(instance, args=()):
try:
code = instance(*args)
except Exception:
code = None
return code
def runThread(func, args=(), name=None):
thr = threading.Thread(target=execute, args=(func, args), name=name or func.func_name)
try:
thr.start()
except threading.ThreadError:
crashlog("runThread.%s" % name)
class VK(object):
"""
The base class containts most of functions to work with VK
"""
def __init__(self):
self.online = False
self.userID = 0
self.friends_fields = set(["screen_name"])
logger.debug("VK.__init__")
getToken = lambda self: self.engine.token
def checkData(self):
"""
Checks the token or authorizes by password
Raises api.TokenError if token is invalid or missed in hell
Raises api.VkApiError if phone/password is invalid
"""
logger.debug("VK: checking data")
if self.engine.token:
logger.debug("VK.checkData: trying to use token")
if not self.checkToken():
logger.error("VK.checkData: token invalid: %s" % self.engine.token)
raise api.TokenError("Token is invalid: %s" % (self.engine.token))
else:
raise api.TokenError("%s, Where the hell is your token?" % self.source)
def checkToken(self):
"""
Checks the api token
"""
try:
int(self.method("isAppUser", force=True))
except (api.VkApiError, TypeError):
return False
return True
def auth(self, token=None, raise_exc=False):
"""
Initializes self.engine object
Calls self.checkData() and initializes longPoll if all is ok
"""
logger.debug("VK.auth %s token" % ("with" if token else "without"))
self.engine = api.APIBinding(token=token)
try:
self.checkData()
except api.AuthError as e:
logger.error("VK.auth failed with error %s" % (e.message))
if raise_exc:
raise
return False
except Exception:
crashLog("VK.auth")
return False
logger.debug("VK.auth completed")
self.online = True
return True
def method(self, method, args=None, nodecode=False, force=False):
"""
This is a duplicate function of self.engine.method
Needed to handle errors properly exactly in __main__
Parameters:
method: obviously VK API method
args: method aruments
nodecode: decode flag (make json.loads or not)
force: says that method will be executed even the captcha and not online
See library/vkapi.py for more information about exceptions
Returns method result
"""
args = args or {}
result = {}
if not self.engine.captcha and (self.online or force):
try:
result = self.engine.method(method, args, nodecode)
except api.InternalServerError as e:
logger.error("VK: internal server error occurred while executing method(%s) (%s)" % (method, e.message))
except api.NetworkNotFound:
logger.critical("VK: network is unavailable. Is vk down or you have network problems?")
self.online = False
except api.VkApiError as e:
logger.error("VK: apiError %s" % (e.message))
self.online = False
return result
def disconnect(self):
"""
Stops all user handlers and removes himself from Poll
"""
logger.debug("VK: user has left")
self.online = False
runThread(self.method, ("account.setOffline", None, True, True))
def getFriends(self, fields=None):
"""
Executes friends.get and formats it in key-values style
Example: {1: {"name": "Pavel Durov", "online": False}
Parameter fields is needed to receive advanced fields which will be added in result values
"""
fields = fields or self.friends_fields
raw = self.method("friends.get", {"fields": str.join(chr(44), fields)}) or ()
friends = {}
for friend in raw:
uid = friend["uid"]
online = friend["online"]
name = escape("", str.join(chr(32), (friend["first_name"], friend["last_name"])))
friends[uid] = {"name": name, "online": online}
for key in fields:
if key != "screen_name": # screen_name is default
friends[uid][key] = friend.get(key)
return friends
def getMessages(self, count=5, mid=0):
"""
Gets last messages list count 5 with last id mid
"""
values = {"out": 0, "filters": 1, "count": count}
if mid:
del values["count"], values["filters"]
values["last_message_id"] = mid
return self.method("messages.get", values)
def getUserID(self):
"""
Gets user id and adds his id into jidToID
"""
self.userID = self.method("execute.getUserID")
return self.userID
def getUserData(self, uid, fields=None):
"""
Gets user data. Such as name, photo, etc
Will request method users.get
Default fields is ["screen_name"]
"""
if not fields:
fields = self.friends_fields
data = self.method("users.get", {"fields": ",".join(fields), "user_ids": uid}) or {}
if not data:
data = {"name": "None"}
for key in fields:
data[key] = "None"
else:
data = data.pop()
data["name"] = escape("", str.join(chr(32), (data.pop("first_name"), data.pop("last_name"))))
return data
def getMessageHistory(self, count, uid, rev=0, start=0):
"""
Gets messages history
"""
values = {"count": count, "user_id": uid, "rev": rev, "start": start}
return self.method("messages.getHistory", values)
format = "[%(date)s] <%(name)s> %(body)s\n"
if not os.path.exists("logs"):
os.makedirs("logs")
loadExtensions("extensions")
# https://oauth.vk.com/authorize?scope=69638&redirect_uri=https%3A%2F%2Foauth.vk.com%2Fblank.html&display=mobile&client_id=3789129&response_type=token
print "\nYou can get token over there: http://jabberon.ru/vk4xmpp.html"
token = raw_input("\nToken: ")
class User:
"""
A compatibility layer for vk4xmpp-extensions
"""
vk = VK()
user = User()
user.vk.auth(token)
user.vk.friends = user.vk.getFriends()
for friend in user.vk.friends.keys():
file = open("logs/%d.txt" % friend, "w")
start = 0
while True:
count = 200
rev = 0
messages = sorted(user.vk.getMessageHistory(count, friend, rev, start)[1:], sortMsg)
print "receiving messages for %d" % friend
if not messages or not messages[0]:
print "no messages for %d" % friend
file.close()
os.remove("logs/%d.txt" % friend)
break
last = messages[0]["mid"]
if last == start:
start = 0
break
start = last
for message in messages:
body = uHTML(message["body"])
iter = EXTENSIONS.__iter__()
for func in iter:
try:
result = func(user, message)
except Exception:
result = None
crashLog("handle.%s" % func.__name__)
if result is None:
for func in iter:
apply(func, (user, message))
break
else:
body += result
date = datetime.fromtimestamp(message["date"]).strftime("%d.%m.%Y %H:%M:%S")
name = user.vk.getUserData(message["from_id"])["name"]
file.write(format % vars())
print "Done. Check out the \"logs\" directory" | 26.884375 | 150 | 0.683018 | 4,832 | 0.561469 | 0 | 0 | 0 | 0 | 0 | 0 | 3,070 | 0.356728 |
52b76fc4164fe4af1de4e58e8df86ebc7ba9d189 | 160 | py | Python | rmidi/math/functions.py | rushike/rmidipy | 7d80dc2cc584cb2e8b8df0eeedc34e9e11ab0de7 | [
"MIT"
] | 5 | 2019-11-30T11:12:14.000Z | 2021-08-15T00:47:23.000Z | rmidi/math/functions.py | rushike/rmidipy | 7d80dc2cc584cb2e8b8df0eeedc34e9e11ab0de7 | [
"MIT"
] | 3 | 2020-03-14T04:45:38.000Z | 2020-05-31T15:07:13.000Z | rmidi/math/functions.py | rushike/rmidipy | 7d80dc2cc584cb2e8b8df0eeedc34e9e11ab0de7 | [
"MIT"
] | null | null | null | import math
def fibonacci(nth):
return int(1 / math.sqrt(5) * (math.pow((1 + math.sqrt(5)) / 2, nth + 1) - math.pow((1 - math.sqrt(5)) / 2, nth + 1)))
| 32 | 122 | 0.55 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
52b94f6a46ab67c738007f61f91f1b32ae3ae2ba | 2,577 | py | Python | bot.py | Olegt0rr/calendar-telegram | 099e2c856fc6fbfeffe35bcb666e2800e41634ee | [
"MIT"
] | 4 | 2019-03-03T21:45:42.000Z | 2021-06-27T12:35:29.000Z | bot.py | Pestov/calendar-telegram | 099e2c856fc6fbfeffe35bcb666e2800e41634ee | [
"MIT"
] | null | null | null | bot.py | Pestov/calendar-telegram | 099e2c856fc6fbfeffe35bcb666e2800e41634ee | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import telebot
from telebot import types
import datetime
from telegramcalendar import create_calendar
bot = telebot.TeleBot("")
current_shown_dates={}
@bot.message_handler(commands=['calendar'])
def get_calendar(message):
now = datetime.datetime.now() #Current date
chat_id = message.chat.id
date = (now.year,now.month)
current_shown_dates[chat_id] = date #Saving the current date in a dict
markup= create_calendar(now.year,now.month)
bot.send_message(message.chat.id, "Please, choose a date", reply_markup=markup)
@bot.callback_query_handler(func=lambda call: call.data[0:13] == 'calendar-day-')
def get_day(call):
chat_id = call.message.chat.id
saved_date = current_shown_dates.get(chat_id)
if(saved_date is not None):
day=call.data[13:]
date = datetime.datetime(int(saved_date[0]),int(saved_date[1]),int(day),0,0,0)
bot.send_message(chat_id, str(date))
bot.answer_callback_query(call.id, text="")
else:
#Do something to inform of the error
pass
@bot.callback_query_handler(func=lambda call: call.data == 'next-month')
def next_month(call):
chat_id = call.message.chat.id
saved_date = current_shown_dates.get(chat_id)
if(saved_date is not None):
year,month = saved_date
month+=1
if month>12:
month=1
year+=1
date = (year,month)
current_shown_dates[chat_id] = date
markup= create_calendar(year,month)
bot.edit_message_text("Please, choose a date", call.from_user.id, call.message.message_id, reply_markup=markup)
bot.answer_callback_query(call.id, text="")
else:
#Do something to inform of the error
pass
@bot.callback_query_handler(func=lambda call: call.data == 'previous-month')
def previous_month(call):
chat_id = call.message.chat.id
saved_date = current_shown_dates.get(chat_id)
if(saved_date is not None):
year,month = saved_date
month-=1
if month<1:
month=12
year-=1
date = (year,month)
current_shown_dates[chat_id] = date
markup= create_calendar(year,month)
bot.edit_message_text("Please, choose a date", call.from_user.id, call.message.message_id, reply_markup=markup)
bot.answer_callback_query(call.id, text="")
else:
#Do something to inform of the error
pass
@bot.callback_query_handler(func=lambda call: call.data == 'ignore')
def ignore(call):
bot.answer_callback_query(call.id, text="")
bot.polling()
| 33.467532 | 119 | 0.676756 | 0 | 0 | 0 | 0 | 2,380 | 0.923555 | 0 | 0 | 313 | 0.121459 |
52b9ce2bf8d9de0c43c56e76507be9b3c2b1b317 | 5,094 | py | Python | Src/models/model2_3L.py | OferMon/Covid19-DeepLearning-XAI | 34c390755aafcc376c2a27a283af8e96ea7ce318 | [
"Apache-2.0"
] | 2 | 2021-11-14T11:59:46.000Z | 2022-03-31T03:40:14.000Z | Src/models/model2_3L.py | OferMon/Covid19-DeepLearning-XAI | 34c390755aafcc376c2a27a283af8e96ea7ce318 | [
"Apache-2.0"
] | null | null | null | Src/models/model2_3L.py | OferMon/Covid19-DeepLearning-XAI | 34c390755aafcc376c2a27a283af8e96ea7ce318 | [
"Apache-2.0"
] | null | null | null | from sklearn.model_selection import train_test_split
import pandas as pd
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Activation, Flatten
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import f1_score
from sklearn.metrics import accuracy_score, confusion_matrix
from keras.callbacks import ModelCheckpoint
import seaborn as sns
from keras.optimizers import Adam
import pickle
import matplotlib.pyplot as plt
import lime
import lime.lime_tabular
from lime.lime_tabular import LimeTabularExplainer
import os
# fix random seed for reproducibility
np.random.seed(7)
# load dataset
dataset = np.genfromtxt("covid_filtered_1-5_allMin3.csv", delimiter=",", encoding="utf8")
dataset = dataset[1:, :]
np.random.shuffle(dataset)
# split into input and output variables
df_label = dataset[:, 23]
label = []
for lab in df_label:
if lab == 1:
label.append([0]) # class 1
elif lab == 2 or lab == 3:
label.append([1]) # class 23
elif lab == 4 or lab == 5:
label.append([2]) # class 45
else:
print("DATA ERROR")
inputColumns = [0, 2, 3, 4, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]
label = np.array(label)
xFit, xTest, yFit, yTest = train_test_split(dataset[:, inputColumns], label, test_size=0.3, random_state=42,
stratify=label)
'''
# test:
xTest_c1 = []
yTest_c1 = []
xTest_c23 = []
yTest_c23 = []
xTest_c45 = []
yTest_c45 = []
for i in range(len(yTest)):
if yTest[i][0] == 1: # class 1
xTest_c1.append(xTest[i])
yTest_c1.append(yTest[i])
elif yTest[i][1] == 1: # class 2-3
xTest_c23.append(xTest[i])
yTest_c23.append(yTest[i])
elif yTest[i][2] == 1: # class 4-5
xTest_c45.append(xTest[i])
yTest_c45.append(yTest[i])
xTest_c1 = numpy.array(xTest_c1)
yTest_c1 = numpy.array(yTest_c1)
xTest_c23 = numpy.array(xTest_c23)
yTest_c23 = numpy.array(yTest_c23)
xTest_c45 = numpy.array(xTest_c45)
yTest_c45 = numpy.array(yTest_c45)
'''
parameters = {'bootstrap': True,
'min_samples_leaf': 3,
'n_estimators': 50,
'min_samples_split': 10,
'max_features': 'sqrt',
'max_depth': 6,
'max_leaf_nodes': None}
RF_model = RandomForestClassifier(**parameters)
yFit = np.array(yFit).ravel()
RF_model.fit(xFit, yFit)
RF_predictions = RF_model.predict(xTest)
score = accuracy_score(yTest, RF_predictions)
print(score)
from sklearn import tree
import matplotlib.pyplot as plt
fn = ['sex', 'HSD', 'entry_month', 'symptoms_month', 'pneumonia', 'age_group', 'pregnancy', 'diabetes',
'copd', 'asthma', 'immsupr', 'hypertension', 'other_disease', 'cardiovascular', 'obesity',
'renal_chronic', 'tobacco', 'contact_other_covid']
cn = ['Low', 'Middle', 'High']
fig = plt.figure(figsize=(35, 6), dpi=900)
tree.plot_tree(RF_model.estimators_[0],
feature_names=fn,
class_names=cn,
filled=True,
rounded=True,
precision=2,
fontsize=4)
fig.savefig('rf_individualtree.png')
'''
# Get and reshape confusion matrix data
matrix = confusion_matrix(yTest, RF_predictions)
matrix = matrix.astype('float') / matrix.sum(axis=1)[:, np.newaxis]
# Build the plot
plt.figure(figsize=(16, 7))
sns.set(font_scale=1.4)
sns.heatmap(matrix, annot=True, annot_kws={'size': 10},
cmap=plt.cm.Greens, linewidths=0.2)
# Add labels to the plot
class_names = ['Low severity', 'Medium severity', 'High severity']
tick_marks = np.arange(len(class_names))
tick_marks2 = tick_marks + 0.5
plt.xticks(tick_marks, class_names, rotation=25)
plt.yticks(tick_marks2, class_names, rotation=0)
plt.xlabel('Predicted label')
plt.ylabel('True label')
plt.title('Confusion Matrix for Random Forest Model')
plt.show()
# create model
model = Sequential()
model.add(Dense(729, input_dim=len(inputColumns), activation='sigmoid'))
model.add(Dense(243, activation='sigmoid'))
model.add(Dense(81, activation='sigmoid'))
model.add(Dense(27, activation='sigmoid'))
model.add(Dense(9, activation='sigmoid'))
model.add(Dense(3, activation='softmax'))
# Compile model
model.compile(loss='categorical_crossentropy', optimizer=Adam(learning_rate=0.002), metrics=['accuracy'])
# Fit the model (train the model)
model.fit(xFit, yFit, epochs=1000, batch_size=50)
# evaluate the model
print("\n-------------------------------------------------------")
print("\ntotal(%i):" % len(xTest))
scores = model.evaluate(xTest, yTest)
print("%s: %.2f%%" % (model.metrics_names[1], scores[1] * 100))
# test:
print("\nclass1(%i):" % len(xTest_c1))
scores = model.evaluate(xTest_c1, yTest_c1)
print("%s: %.2f%%" % (model.metrics_names[1], scores[1] * 100))
print("\nclass23(%i):" % len(xTest_c23))
scores = model.evaluate(xTest_c23, yTest_c23)
print("%s: %.2f%%" % (model.metrics_names[1], scores[1] * 100))
print("\nclass45(%i):" % len(xTest_c45))
scores = model.evaluate(xTest_c45, yTest_c45)
print("%s: %.2f%%" % (model.metrics_names[1], scores[1] * 100))
'''
| 32.240506 | 108 | 0.669808 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,128 | 0.614056 |
52b9e9f5160628ec72547ecc1f6e6284bb31babb | 14,275 | py | Python | models/FSSD_SE.py | cenchaojun/TDFSSD | 4c151fa1f26dd599849311bbdaee4f500c576f66 | [
"MIT"
] | null | null | null | models/FSSD_SE.py | cenchaojun/TDFSSD | 4c151fa1f26dd599849311bbdaee4f500c576f66 | [
"MIT"
] | null | null | null | models/FSSD_SE.py | cenchaojun/TDFSSD | 4c151fa1f26dd599849311bbdaee4f500c576f66 | [
"MIT"
] | null | null | null | '''
Micro Object Detector Net
the author:Luis
date : 11.25
'''
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from layers import *
from models.base_models import vgg, vgg_base
from ptflops import get_model_complexity_info
class BasicConv(nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, stride=1, padding=0, dilation=1, groups=1, relu=True,
bn=False, bias=True, up_size=0):
super(BasicConv, self).__init__()
self.out_channels = out_planes
self.in_channels = in_planes
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding,
dilation=dilation, groups=groups, bias=bias)
self.bn = nn.BatchNorm2d(out_planes, eps=1e-5, momentum=0.01, affine=True) if bn else None
self.relu = nn.ReLU(inplace=True) if relu else None
self.up_size = up_size
self.up_sample = nn.Upsample(size=(up_size, up_size), mode='bilinear') if up_size != 0 else None
def forward(self, x):
x = self.conv(x)
if self.bn is not None:
x = self.bn(x)
if self.relu is not None:
x = self.relu(x)
if self.up_size > 0:
x = self.up_sample(x)
return x
class SELayer(nn.Module):
def __init__(self, channel, reduction=16):
super(SELayer, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Sequential(
nn.Linear(channel, channel // reduction),
nn.ReLU(inplace=True),
nn.Linear(channel // reduction, channel),
nn.Sigmoid()
)
def forward(self, x):
b, c, _, _ = x.size()
y = self.avg_pool(x).view(b, c)
y = self.fc(y).view(b, c, 1, 1)
return x * y
class MOD(nn.Module):
def __init__(self, base, extras, upper, upper2, head, num_classes, size):
super(MOD, self).__init__()
self.num_classes = num_classes
self.extras = nn.ModuleList(extras)
self.size = size
self.base = nn.ModuleList(base)
# self.L2Norm = nn.ModuleList(extras)
self.upper = nn.ModuleList(upper)
self.upper2 = nn.ModuleList(upper2)
self.loc = nn.ModuleList(head[0])
self.conf = nn.ModuleList(head[1])
self.softmax = nn.Softmax()
self.predict1 = nn.ModuleList(extra_predict1(self.size))
self.predict2 = nn.ModuleList(extra_predict2(self.size))
def forward(self, x, test=False):
scale_source = []
upper_source = []
loc = []
conf = []
mid_trans = []
# get the F.T of conv4
for k in range(23):
x = self.base[k](x)
scale_source.append(x)
for k in range(23, len(self.base)):
x = self.base[k](x)
scale_source.append(x)
for k, v in enumerate(self.extras):
x = F.relu(v(x), inplace=True)
if k % 2 == 1:
scale_source.append(x)
upper_source = scale_source
lenscale = len(scale_source)
orgin = x
for k in range(len(self.upper) - 1):
# bn = nn.BatchNorm2d(self.upper[lenscale-k-2].in_channels,affine=True)
# print(self.upper[lenscale-k-2].in_channels)
# print(self.upper[lenscale-k-1].out_channels)
# print(scale_source[lenscale-k-2].size())
se = SELayer(self.upper[lenscale - k - 1].out_channels, 16)
upper_source[0] = upper_source[0] + se(self.upper[lenscale - k - 1](upper_source[lenscale - k - 1]))
# upper_source[0] =upper_source[0]+ self.upper[lenscale-k-1](upper_source[lenscale-k-1])
for k in range(len(self.upper) - 2):
se = SELayer(self.upper2[lenscale - k - 1].out_channels, 16)
upper_source[1] = upper_source[1] + se(self.upper2[lenscale - k - 1](upper_source[lenscale - k - 1]))
# upper_source[1] = upper_source[1] + self.upper2[lenscale-k-1](upper_source[lenscale-k-1])
bn = nn.BatchNorm2d(512, affine=True)
upper_source[0] = bn(upper_source[0])
# bn1 = nn.BatchNorm2d(1024,affine = True)
# upper_source[1] = bn1(upper_source[1])
predict_layer1 = []
predict_layer1.append(upper_source[0])
origin_fea = upper_source[0]
# print('origin_fea',origin_fea.size())
for k, v in enumerate(self.predict1):
origin_fea = v(origin_fea)
# print('ori',origin_fea.size())
predict_layer1.append(origin_fea)
bn = nn.BatchNorm2d(2048, affine=True)
# print(predict_layer1[1].size())
# print(upper_source[1].size())
# predict_layer1[1] = bn(torch.cat([predict_layer1[1],upper_source[1]],1))
predict_layer1[1] = predict_layer1[1] + upper_source[1]
origin_fea2 = upper_source[1]
for k, v in enumerate(self.predict2):
origin_fea2 = v(origin_fea2)
# predict_layer2.append(origin_fea2)
# bn = nn.BatchNorm2d(v.out_channels*2,affine=True)
# if not k==len(self.predict2)-1:
# predict_layer1[k+2] = bn(torch.cat([predict_layer1[k+2],origin_fea2],1))
# else:
# predict_layer1[k+2] = torch.cat([predict_layer1[k+2],origin_fea2],1)
predict_layer1[k + 2] = predict_layer1[k + 2] + origin_fea2
for (x, l, c) in zip(predict_layer1, self.loc, self.conf):
loc.append(l(x).permute(0, 2, 3, 1).contiguous())
conf.append(c(x).permute(0, 2, 3, 1).contiguous())
# for (x, l, c) in zip(upper_source, self.loc, self.conf):
# loc.append(l(x).permute(0, 2, 3, 1).contiguous())
# conf.append(c(x).permute(0, 2, 3, 1).contiguous())
loc = torch.cat([o.view(o.size(0), -1) for o in loc], 1)
conf = torch.cat([o.view(o.size(0), -1) for o in conf], 1)
# print(loc.size())
# print(conf.size())
if test:
output = (
loc.view(loc.size(0), -1, 4), # loc preds
self.softmax(conf.view(-1, self.num_classes)), # conf preds
)
else:
output = (
loc.view(loc.size(0), -1, 4),
conf.view(conf.size(0), -1, self.num_classes),
)
# print(loc.size())
# print(conf.size())
return output
def low_pooling(vgg, extracts, size):
if size == 300:
up_size = layer_size('300')[k]
elif size == 512:
up_size = layer_size('512')[k]
layers = []
def extra_predict1(size):
if size == 300:
layers = [BasicConv(512, 1024, kernel_size=3, stride=2, padding=1),
BasicConv(1024, 512, kernel_size=3, stride=2, padding=1), \
BasicConv(512, 256, kernel_size=3, stride=2, padding=1),
BasicConv(256, 256, kernel_size=3, stride=2, padding=1), \
BasicConv(256, 256, kernel_size=3, stride=1, padding=0)]
elif size == 512:
layers = [BasicConv(512, 1024, kernel_size=3, stride=2, padding=1),
BasicConv(1024, 512, kernel_size=3, stride=2, padding=1), \
BasicConv(512, 256, kernel_size=3, stride=2, padding=1),
BasicConv(256, 256, kernel_size=3, stride=2, padding=1), \
BasicConv(256, 256, kernel_size=3, stride=2, padding=1),
BasicConv(256, 256, kernel_size=4, padding=1, stride=1)]
return layers
def extra_predict2(size):
if size == 300:
layers = [BasicConv(1024, 512, kernel_size=3, stride=2, padding=1), \
BasicConv(512, 256, kernel_size=3, stride=2, padding=1),
BasicConv(256, 256, kernel_size=3, stride=2, padding=1), \
BasicConv(256, 256, kernel_size=3, stride=1, padding=0)]
elif size == 512:
layers = [BasicConv(1024, 512, kernel_size=3, stride=2, padding=1), \
BasicConv(512, 256, kernel_size=3, stride=2, padding=1),
BasicConv(256, 256, kernel_size=3, stride=2, padding=1), \
BasicConv(256, 256, kernel_size=3, stride=2, padding=1),
BasicConv(256, 256, kernel_size=4, padding=1, stride=1)]
return layers
def upper_deconv(vgg, extracts, size):
layers = []
layers2 = []
if size == 300:
layers.append(BasicConv(512, 128 * 4, kernel_size=1, padding=0))
layers += [(BasicConv(vgg[-2].out_channels, 512, kernel_size=1, padding=0, up_size=38))]
layers.append(BasicConv(extracts[1].out_channels, 512, kernel_size=1, padding=0, up_size=38))
layers.append(BasicConv(extracts[3].out_channels, 512, kernel_size=1, padding=0, up_size=38))
layers.append(BasicConv(extracts[5].out_channels, 512, kernel_size=1, padding=0, up_size=38))
layers.append(BasicConv(extracts[7].out_channels, 512, kernel_size=1, padding=0, up_size=38))
layers2.append(BasicConv(512, 128 * 4, kernel_size=1, padding=0))
layers2 += [(BasicConv(vgg[-2].out_channels, 1024, kernel_size=1, padding=0, up_size=19))]
layers2.append(BasicConv(extracts[1].out_channels, 1024, kernel_size=1, padding=0, up_size=19))
layers2.append(BasicConv(extracts[3].out_channels, 1024, kernel_size=1, padding=0, up_size=19))
layers2.append(BasicConv(extracts[5].out_channels, 1024, kernel_size=1, padding=0, up_size=19))
layers2.append(BasicConv(extracts[7].out_channels, 1024, kernel_size=1, padding=0, up_size=19))
elif size == 512:
layers.append(BasicConv(512, 128 * 4, kernel_size=1, padding=0))
layers.append(BasicConv(vgg[-2].out_channels, 512, kernel_size=1, padding=0, up_size=64))
layers.append(BasicConv(extracts[1].out_channels, 512, kernel_size=1, padding=0, up_size=64))
layers.append(BasicConv(extracts[3].out_channels, 512, kernel_size=1, padding=0, up_size=64))
layers.append(BasicConv(extracts[5].out_channels, 512, kernel_size=1, padding=0, up_size=64))
layers.append(BasicConv(extracts[7].out_channels, 512, kernel_size=1, padding=0, up_size=64))
layers.append(BasicConv(extracts[9].out_channels, 512, kernel_size=1, padding=0, up_size=64))
layers2.append(BasicConv(512, 128 * 4, kernel_size=1, padding=0))
layers2.append(BasicConv(vgg[-2].out_channels, 1024, kernel_size=1, padding=0, up_size=32))
layers2.append(BasicConv(extracts[1].out_channels, 1024, kernel_size=1, padding=0, up_size=32))
layers2.append(BasicConv(extracts[3].out_channels, 1024, kernel_size=1, padding=0, up_size=32))
layers2.append(BasicConv(extracts[5].out_channels, 1024, kernel_size=1, padding=0, up_size=32))
layers2.append(BasicConv(extracts[7].out_channels, 1024, kernel_size=1, padding=0, up_size=32))
layers2.append(BasicConv(extracts[9].out_channels, 1024, kernel_size=1, padding=0, up_size=32))
return vgg, extracts, layers, layers2
def add_extras(cfg, i, batch_norm=False, size=300):
# Extra layers added to VGG for feature scaling
layers = []
in_channels = i
flag = False
for k, v in enumerate(cfg):
if in_channels != 'S':
if v == 'S':
layers += [nn.Conv2d(in_channels, cfg[k + 1],
kernel_size=(1, 3)[flag], stride=2, padding=1)]
else:
layers += [nn.Conv2d(in_channels, v, kernel_size=(1, 3)[flag])]
flag = not flag
in_channels = v
if size == 512:
layers.append(nn.Conv2d(in_channels, 128, kernel_size=1, stride=1))
layers.append(nn.Conv2d(128, 256, kernel_size=4, stride=1, padding=1))
# print(len(layers))
return layers
def multibox(vgg, extra_layers, upper, upper2, cfg, num_classes):
loc_layers = []
conf_layers = []
vgg_source = [24, -2]
loc_layers += [nn.Conv2d(upper[0].out_channels, cfg[0] * 4, kernel_size=3, padding=1)]
conf_layers += [nn.Conv2d(upper[0].out_channels, cfg[0] * num_classes, kernel_size=3, padding=1)]
for k, v in enumerate(upper):
if k == 0:
continue
loc_layers += [nn.Conv2d(v.in_channels, cfg[k] * 4, kernel_size=3, padding=1)]
conf_layers += [nn.Conv2d(v.in_channels, cfg[k] * num_classes, kernel_size=3, padding=1)]
'''
for k, v in enumerate(vgg_source):
loc_layers += [nn.Conv2d(vgg[v].out_channels,
cfg[k] * 4, kernel_size=3, padding=1)]
conf_layers += [nn.Conv2d(vgg[v].out_channels,
cfg[k] * num_classes, kernel_size=3, padding=1)]
for k, v in enumerate(extra_layers[1::2], 2):
loc_layers += [nn.Conv2d(v.out_channels, cfg[k]
* 4, kernel_size=3, padding=1)]
conf_layers += [nn.Conv2d(v.out_channels, cfg[k]
* num_classes, kernel_size=3, padding=1)]
'''
return vgg, extra_layers, upper, upper2, (loc_layers, conf_layers)
layer_size = {
'300': [38, 19, 10, 5, 3, 1],
'512': [64, 32, 16, 8, 4, 2, 1],
}
extras = {
'300': [256, 'S', 512, 128, 'S', 256, 128, 256, 128, 256],
'512': [256, 'S', 512, 128, 'S', 256, 128, 'S', 256, 128, 'S', 256],
}
mbox = {
'300': [6, 6, 6, 6, 4, 4], # number of boxes per feature map location
'512': [6, 6, 6, 6, 6, 4, 4],
}
def build_net(size=300, num_classes=21):
if size != 300 and size != 512:
print("Error: Sorry only SSD300 and SSD512 is supported currently!")
return
return MOD(*multibox(*upper_deconv(vgg(vgg_base[str(size)], 3),
add_extras(extras[str(size)], 1024, size=size), size),
mbox[str(size)], num_classes), num_classes=num_classes, size=size)
if __name__ == '__main__':
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
with torch.no_grad():
model = build_net(size=512,num_classes=2)
print(model)
# x = torch.randn(16, 3, 300, 300)
model.cuda()
macs,params = get_model_complexity_info(model,(3,512,512),as_strings=True,print_per_layer_stat=True,verbose=True)
print('MACs: {0}'.format(macs))
print('Params: {0}'.format(params)) | 44.058642 | 121 | 0.596077 | 6,152 | 0.430963 | 0 | 0 | 0 | 0 | 0 | 0 | 2,291 | 0.16049 |
52b9eddf6573e3f16266c439e6b72ad18cdb718c | 24 | py | Python | __init__.py | yuta-hi/chainer_extensions | f0dbc898623c25251aa0820bfda8417e9edced6a | [
"MIT"
] | 1 | 2020-01-23T10:15:53.000Z | 2020-01-23T10:15:53.000Z | __init__.py | yuta-hi/chainer_extensions | f0dbc898623c25251aa0820bfda8417e9edced6a | [
"MIT"
] | null | null | null | __init__.py | yuta-hi/chainer_extensions | f0dbc898623c25251aa0820bfda8417e9edced6a | [
"MIT"
] | null | null | null | from . import extensions | 24 | 24 | 0.833333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
52b9f9e9f171e8a8b809496821ff4343dbc31d1c | 412 | py | Python | change_wall.py | masoodmomin/Python | 1545cb1f5ae08e9df8682b893079c8ce776944eb | [
"MIT"
] | null | null | null | change_wall.py | masoodmomin/Python | 1545cb1f5ae08e9df8682b893079c8ce776944eb | [
"MIT"
] | null | null | null | change_wall.py | masoodmomin/Python | 1545cb1f5ae08e9df8682b893079c8ce776944eb | [
"MIT"
] | null | null | null | import os
import ctypes
import time
PATH = 'C:/Users/acer/Downloads/wall/'
SPI=20
def changeBG(path):
"""Change background depending on bit size"""
count=0
for x in os.listdir(PATH):
while True:
ctypes.windll.user32.SystemParametersInfoW(SPI, 0, PATH+x, 3)
time.sleep(1)
count += 1
break
changeBG(PATH)
changeBG(PATH)
| 20.6 | 74 | 0.580097 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 76 | 0.184466 |
52ba39cbc7fb572147ab2b78903b1a54e6357b9c | 11,085 | py | Python | rump/rule.py | bninja/rump | 3b6c4ff29974b3c04a260d8275567beebb296e5d | [
"0BSD"
] | 6 | 2015-07-27T09:02:36.000Z | 2018-07-18T11:11:33.000Z | rump/rule.py | bninja/rump | 3b6c4ff29974b3c04a260d8275567beebb296e5d | [
"0BSD"
] | null | null | null | rump/rule.py | bninja/rump | 3b6c4ff29974b3c04a260d8275567beebb296e5d | [
"0BSD"
] | null | null | null | import collections
import logging
import StringIO
from . import exc, Request, Expression
logger = logging.getLogger(__name__)
class CompiledRule(object):
"""
Compiled version of a routing rule.
`symbols`
A `rump.fields.Symbols` table used to store symbolic information used
in the compiled rule.
`expression`
The `rump.fields.Expression` which has been compiled.
`compiled`
Byte code for evaluating a `rump.fields.Expression`.
`upstream`
The `rump.Upstream` to be returned on a match.
You don't usually need to create these directly. Instead grab them from
the source `rump.Rule` like:
my_compiled_rule = rump.parser.for_rule()("my-rule-string").compile()
"""
def __init__(self, expression, upstream, symbols=None):
self.expression = expression
self.upstream = upstream
self.symbols = (
self.expression.symbols() if symbols is None else symbols
)
self.compiled = compile(
expression.compile(self.symbols), '<string>', 'eval'
)
def match_context(self, request_context):
"""
Determines whether a request represented by a context matches this rule.
:param context: A `rump.RequestContext`.
:return rump.Upstream:
If the request matches this rule then the associated upstream is
returned, otherwise None.
"""
matched = eval(self.compiled, None, request_context)
return self.upstream if matched else None
def match(self, request):
"""
Determines whether a request matches this rule.
:param request: A `rump.Request`.
:return rump.Upstream:
If the request matches this rule then the associated upstream is
returned, otherwise None.
"""
matched = self.match_context(request.context(self.symbols))
return self.upstream if matched else None
def __str__(self):
return '{0} => {1}'.format(str(self.expression), str(self.upstream))
def __eq__(self, other):
return (
isinstance(other, (Rule, CompiledRule)) and
self.expression == other.expression and
self.upstream == other.upstream
)
def __ne__(self, other):
return not self.__eq__(other)
class Rule(object):
"""
Represents a "routing" rule used to match requests to an upstream.
`expression`
A `rump.fields.Expression`.
`upstream`
The `rump.Upstream` to be returned on a match.
There are two ways to express a Rule:
- A `rump.fields.Expression`.
- String, see `rump.rule.grammar` for the grammar.
If you have a string just do something like:
rule = rump.parser.for_rule()("my-rule-string")
"""
compiled_type = CompiledRule
def __init__(self, expression, upstream):
self.expression = expression
self.upstream = upstream
def match(self, request):
"""
Determines whether a request matches this rule.
:param request: The rump.Request to evaluate for a match.
:param cache: Optional map used to cache request field lookups.
:return rump.Upstream:
If the request matches this rule then the associated upstream is
returned, otherwise None.
"""
matched = self.expression(request)
return self.upstream if matched else None
def compile(self, symbols=None):
"""
Compiles this rule.
:param symbols:
A `rump.fields.Symbols` table used to store symbolic information
used in the compiled rule.
:return CompiledRule: The equivalent compiled rule.
"""
return CompiledRule(self.expression, self.upstream, symbols)
def __str__(self):
return '{0} => {1}'.format(self.expression, self.upstream)
def __eq__(self, other):
return (
isinstance(other, (Rule, CompiledRule)) and
self.expression == other.expression and
self.upstream == other.upstream
)
def __ne__(self, other):
return not self.__eq__(other)
class Rules(collections.MutableSequence):
"""
A collection of "routing" rules used to match requests to an upstream.
`request_type`
Specification of the requests these rules will be matching. Defaults
to `rump.Request`.
`compile`
Flag determining whether added rules are compiled.
`strict`
Flag determining whether added rules are compiled.
`auto_disable`
Flag determining whether to auto disable a rule that generates and
error when attempting to match a request.
"""
def __init__(self, *rules, **options):
self._parse_rule = None
self.symbols = None
self._compile = False
self.disabled = set()
self._rules = []
if len(rules) == 1 and isinstance(rules[0], list):
rules = rules[0]
for rule in rules:
self._rules.append(rule)
# options
self.request_type = options.pop('request_type', Request)
self.compile = options.pop('compile', False)
self.strict = options.pop('strict', True)
self.auto_disable = options.pop('auto_disable', False)
if options:
raise TypeError(
'Unexpected keyword argument {0}'.format(options.keys()[0])
)
@property
def compile(self):
return self._compile
@compile.setter
def compile(self, value):
if value == self._compile:
return
self._compile = value
if self._compile:
self.symbols = Expression.symbols()
for i in xrange(len(self)):
self[i] = Rule(
self[i].expression, self[i].upstream
).compile(self.symbols)
else:
self.symbols = None
for i in xrange(len(self)):
self[i] = Rule(self[i].expression, self[i].upstream)
@property
def parse_rule(self):
from . import parser
if not self._parse_rule:
self._parse_rule = parser.for_rule(self.request_type)
return self._parse_rule
def load(self, io, strict=None):
strict = self.strict if strict is None else strict
for i, line in enumerate(io):
line = line.strip()
if not line or line.startswith('#'):
continue
try:
rule = self.parse_rule(line)
except exc.ParseException, ex:
if strict:
raise
logger.warning(
'%s, line %s, unable to parse rule - %s, skipping',
getattr(io, 'name', '<memory>'), i, ex,
)
continue
self.append(rule)
return self
def loads(self, s, strict=None):
io = StringIO.StringIO(s)
return self.load(io, strict=strict)
def dump(self, io):
for rule in self:
io.write(str(rule))
io.write('\n')
def dumps(self):
io = StringIO.StringIO()
self.dump(io)
return io.getvalue()
def disable(self, i):
self.disabled.add(self[i])
def disable_all(self):
self.disabled = set(self)
def enable(self, i):
self.disabled.remove(self[i])
def enable_all(self):
self.disabled.clear()
def match(self, request, error=None):
if error is None:
error = 'suppress' if self.auto_disable is False else 'disable'
if error not in ('raise', 'disable', 'suppress'):
raise ValueError('error={0} invalid'.format(error))
return (
self._match_compiled if self.compile else self._match
)(request, error)
def _match_compiled(self, request, error):
i, count, request_ctx = 0, len(self), request.context(self.symbols)
while True:
try:
while i != count:
if self[i] not in self.disabled:
upstream = self[i].match_context(request_ctx)
if upstream:
return upstream
i += 1
break
except StandardError:
raise
except Exception as ex:
if error == 'raise':
raise
logger.exception('[%s] %s match failed - %s\n', i, self[i], ex)
if error == 'disable':
self.disabled.add(self[i])
i += 1
def _match(self, request, error):
i, count = 0, len(self)
while True:
try:
while i != count:
if self[i] not in self.disabled:
upstream = self[i].match(request)
if upstream:
return upstream
i += 1
break
except StandardError:
raise
except Exception as ex:
if error == 'raise':
raise
logger.exception('[%s] %s match failed - %s\n', self[i], i, ex)
if error == 'disable':
self.disabled.add(self[i])
i += 1
def __str__(self):
return str(self._rules)
def __eq__(self, other):
return (
(isinstance(other, Rules) and self._rules == other._rules) or
(isinstance(other, list) and self._rules == other)
)
def __ne__(self, other):
return not self.__eq__(other)
# collections.MutableSequence
def __getitem__(self, key):
return self._rules[key]
def __setitem__(self, key, value):
if isinstance(value, basestring):
rule = self.parse_rule(value)
elif isinstance(value, Rule):
rule = value
elif isinstance(value, Rule.compiled_type):
rule = Rule(value.expression, value.upstream)
else:
raise TypeError(
'{0} is not a string, Rule or CompiledRule'.format(value)
)
if self.compile:
rule = rule.compile(self.symbols)
self._rules[key] = rule
def __delitem__(self, key):
self.disabled.difference_update(self.__getitem__(key))
self._rules.__delitem__(key)
def __len__(self):
return len(self._rules)
def insert(self, key, value):
if isinstance(value, basestring):
rule = self.parse_rule(value)
elif isinstance(value, Rule):
rule = value
elif isinstance(value, Rule.compiled_type):
rule = Rule(value.expression, value.upstream)
else:
raise ValueError(
'{0} is not a string, Rule or CompiledRule'.format(value)
)
if self.compile:
rule = rule.compile(self.symbols)
self._rules.insert(key, rule)
| 29.959459 | 80 | 0.564727 | 10,947 | 0.987551 | 0 | 0 | 784 | 0.070726 | 0 | 0 | 3,167 | 0.285701 |
52bde9994c231b7dab0757f34ebf002f12e33370 | 951 | py | Python | tests/__init__.py | adelosa/cardutil | fa31223aaac1f0749d50368bb639a311d98e279a | [
"MIT"
] | null | null | null | tests/__init__.py | adelosa/cardutil | fa31223aaac1f0749d50368bb639a311d98e279a | [
"MIT"
] | 1 | 2022-03-25T20:15:24.000Z | 2022-03-30T09:20:34.000Z | tests/__init__.py | adelosa/cardutil | fa31223aaac1f0749d50368bb639a311d98e279a | [
"MIT"
] | null | null | null | import binascii
def print_stream(stream, description):
stream.seek(0)
data = stream.read()
print('***' + description + '***')
print(data)
stream.seek(0)
def test_message(encoding='ascii', hex_bitmap=False):
binary_bitmap = b'\xF0\x10\x05\x42\x84\x61\x80\x02\x02\x00\x00\x04\x00\x00\x00\x00'
bitmap = binary_bitmap
if hex_bitmap:
bitmap = binascii.hexlify(binary_bitmap)
return (
'1144'.encode(encoding) +
bitmap +
('164444555544445555111111000000009999150815171500123456789012333123423579957991200000'
'012306120612345612345657994211111111145BIG BOBS\\80 KERNDALE ST\\DANERLEY\\3103 VIC'
'AUS0080001001Y99901600000000000000011234567806999999').encode(encoding))
message_ascii_raw = test_message()
message_ebcdic_raw = test_message('cp500')
message_ascii_raw_hex = test_message(hex_bitmap=True)
message_ebcdic_raw_hex = test_message('cp500', hex_bitmap=True)
| 32.793103 | 95 | 0.729758 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 330 | 0.347003 |
52be6c2a30d69c057fc9279336d0f7c1bee4db5d | 16,265 | py | Python | bot/discordbot.py | DuckMcFuddle/forum-sweats | 56a6df2bcb7beb630cc375638e6a31d4d234050b | [
"MIT"
] | 2 | 2020-12-05T07:56:59.000Z | 2021-05-18T19:34:27.000Z | bot/discordbot.py | antisynth/forum-sweats | b3d5352cfdb993c592085420ade2ed9c58b47b08 | [
"MIT"
] | null | null | null | bot/discordbot.py | antisynth/forum-sweats | b3d5352cfdb993c592085420ade2ed9c58b47b08 | [
"MIT"
] | null | null | null | from .betterbot import BetterBot
from . import commands
from datetime import datetime, timedelta
import importlib
import discord
import asyncio
import modbot
import forums
import base64
import json
import time
import os
import db
intents = discord.Intents.default()
intents.members = True
intents.presences = True
prefix = '!'
token = os.getenv('token')
is_dev = os.getenv('dev') == 'true'
betterbot = BetterBot(
prefix=prefix,
bot_id=int(base64.b64decode(token.split('.')[0])) if token else 0
)
with open('roles.json', 'r') as f:
roles = json.loads(f.read())
def get_role_id(guild_id, role_name):
return roles.get(str(guild_id), {}).get(role_name)
def has_role(member_id, guild_id, role_name):
'Checks if a member has a role from roles.json'
if is_dev:
return True
guild = client.get_guild(guild_id)
member = guild.get_member(member_id)
role_id = get_role_id(guild_id, role_name)
return any([role_id == role.id for role in member.roles])
client = discord.Client(intents=intents)
async def start_bot():
print('starting bot pog')
await client.start(token)
cached_invites = []
async def check_dead_chat():
guild = client.get_guild(717904501692170260)
general_channel = guild.get_channel(719579620931797002)
while True:
await asyncio.sleep(5)
time_since_message = time.time() - last_general_message
if time_since_message > 60 * 5:
await general_channel.send('dead chat xD')
async def give_hourly_bobux():
while True:
time_until_bobux_given = 3600 - ((time.time()) % 3600)
await asyncio.sleep(time_until_bobux_given)
members = await db.get_active_members_from_past_hour(1)
for member_data in members:
member_id = member_data['discord']
messages_in_past_hour = member_data['hourly_messages']
given_bobux = 0
if messages_in_past_hour >= 20:
given_bobux += 10
elif messages_in_past_hour >= 10:
given_bobux += 5
elif messages_in_past_hour >= 1:
given_bobux += 1
await db.change_bobux(member_id, given_bobux)
@client.event
async def on_ready():
global cached_invites
print('ready')
await forums.login(os.getenv('forumemail'), os.getenv('forumpassword'))
for module in command_modules:
if hasattr(module, 'init'):
await module.init()
await client.change_presence(
activity=discord.Game(name='e')
)
if not is_dev:
active_mutes = await db.get_active_mutes()
for muted_id in active_mutes:
asyncio.ensure_future(unmute_user(muted_id, True))
guild = client.get_guild(717904501692170260)
cached_invites = await guild.invites()
asyncio.ensure_future(check_dead_chat())
asyncio.ensure_future(give_hourly_bobux())
@client.event
async def on_member_join(member):
if is_dev: return
global cached_invites
cached_invites_dict = {invite.code: invite for invite in cached_invites}
guild = client.get_guild(717904501692170260)
new_invites = await guild.invites()
used_invite = None
for invite in new_invites:
if invite.code in cached_invites_dict:
invite_uses_before = cached_invites_dict[invite.code].uses
else:
invite_uses_before = 0
invite_uses_now = invite.uses
if invite_uses_now > invite_uses_before:
used_invite = invite
bot_logs_channel = client.get_channel(718107452960145519)
if used_invite:
await bot_logs_channel.send(embed=discord.Embed(
description=f'<@{member.id}> joined using discord.gg/{used_invite.code} (created by <@{used_invite.inviter.id}>)'
))
else:
await bot_logs_channel.send(embed=discord.Embed(
description=f'<@{member.id}> joined using an unknown invite'
))
cached_invites = await guild.invites()
if 'ban speedrun' in member.name.lower() or 'forum sweats nsfw' in member.name.lower():
return await member.ban(reason='has blacklisted phrase in name')
mute_end = await db.get_mute_end(member.id)
is_muted = mute_end and mute_end > time.time()
if is_muted:
mute_remaining = mute_end - time.time()
await mute_user(member, mute_remaining, member.guild.id, gulag_message=False)
await asyncio.sleep(1)
member_role_id = get_role_id(member.guild.id, 'member')
member_role = member.guild.get_role(member_role_id)
await member.remove_roles(member_role, reason='mee6 cringe')
else:
# is_member = await db.get_is_member(member.id)
member_role_id = get_role_id(member.guild.id, 'member')
member_role = member.guild.get_role(member_role_id)
await member.add_roles(member_role, reason='Member joined')
# if is_member:
# await member.add_roles(member_role, reason='Linked member rejoined')
# else:
# if datetime.now() - member.created_at > timedelta(days=365):
# await member.add_roles(member_role, reason='Account is older than a year')
# else:
# await member.send('Hello! Please verify your Minecraft account by doing !link <your username>. (You must set your Discord in your Hypixel settings)')
def is_close_to_everyone(name):
return name and name.lower().strip('@').split()[0] in ['everyone', 'here']
@client.event
async def on_member_update(before, after):
# nick update
wacky_characters = ['𒈙', 'ٴٴ', '˞˞˞˞˞˞˞˞˞˞˞˞˞˞˞˞˞˞T', '﷽']
if after.nick:
if any([c in after.nick or '' for c in wacky_characters]):
return await after.edit(nick=before.nick)
else:
if any([c in after.display_name or '' for c in wacky_characters]):
return await after.edit(nick='i am a poopoo head ' + str(after.id)[-5:])
if is_close_to_everyone(after.nick):
if not is_close_to_everyone(before.nick):
return await after.edit(nick=before.nick)
elif not is_close_to_everyone(after.name):
return await after.edit(nick=after.name)
else:
return await after.edit(nick='i am a poopoo head ' + str(after.id)[-5:])
await asyncio.sleep(5)
after = after.guild.get_member(after.id)
# role update
# if before.roles != after.roles:
# muted_role = after.guild.get_role(719354039778803813)
# member_role = after.guild.get_role(718090644148584518)
# if muted_role in after.roles and member_role in after.roles:
# await after.remove_roles(member_role, reason='Member manually muted')
# print('manual mute')
# asyncio.ensure_future(mute_user(after, 60 * 60 * 24))
# elif muted_role in before.roles and muted_role not in after.roles and member_role not in after.roles:
# print('manual unmute')
# await after.add_roles(member_role, reason='Member manually unmuted')
# await unmute_user(after.id, wait=False)
most_recent_counting_message_id = None
async def process_counting_channel(message):
global most_recent_counting_message_id
if message.channel.id != 738449805218676737:
# if the message wasn't in the counting channel, you can ignore all of this
return
if message.author.id == 719348452491919401:
# if the message was sent by forum sweats, ignore it
return
old_number = await db.get_counter(message.guild.id)
content = message.content.replace(',', '')
try:
new_number = float(content)
except:
new_number = 0
if old_number == 0 and new_number != 1:
await message.delete()
await message.channel.send(f'<@{message.author.id}>, please start at 1', delete_after=10)
elif new_number == old_number + 1:
await db.set_counter(message.guild.id, int(new_number))
most_recent_counting_message_id = message.id
# give 1 bobux every time you count
await db.change_bobux(message.author.id, 1)
else:
await db.set_counter(message.guild.id, 0)
await message.channel.send(f"<@{message.author.id}> put an invalid number and ruined it for everyone. (Ended at {old_number})")
asyncio.ensure_future(mute_user(message.author, 60 * 60))
last_general_message = time.time()
async def process_suggestion(message):
agree_emoji = client.get_emoji(719235230958878822)
disagree_emoji = client.get_emoji(719235358029512814)
await message.add_reaction(agree_emoji)
await message.add_reaction(disagree_emoji)
@client.event
async def on_message(message):
global last_general_message
if message.channel.id == 738937428378779659: # skyblock-updates
await message.publish()
if message.channel.id == 719579620931797002: # general
last_general_message = time.time()
if message.channel.id == 718114140119629847: # suggestions
await process_suggestion(message)
if message.channel.id == 763088127287361586: # spam
if message.content and message.content[0] != '!' and not message.author.bot:
uwuized_message = message.content\
.replace('@', '')\
.replace('r', 'w')\
.replace('l', 'w')\
.replace('R', 'W')\
.replace('L', 'W')\
.replace('<!642466378254647296>', '<@642466378254647296>')
await message.channel.send(uwuized_message)
asyncio.ensure_future(db.add_message(message.author.id))
await process_counting_channel(message)
await betterbot.process_commands(message)
await modbot.process_messsage(message)
@client.event
async def on_message_delete(message):
print('deleted:', message.author, message.content)
if message.id == most_recent_counting_message_id:
counter = await db.get_counter(message.guild.id)
await message.channel.send(str(counter))
@client.event
async def on_message_edit(before, after):
if after.channel.id == 738449805218676737:
await after.delete()
await modbot.process_messsage(after, warn=False)
async def mute_user(member, length, guild_id=None, gulag_message=True):
guild_id = guild_id if guild_id else 717904501692170260
guild = client.get_guild(guild_id)
muted_role_id = get_role_id(guild_id, 'muted')
muted_role = guild.get_role(muted_role_id)
if not muted_role: return print('muted role not found')
member_role_id = get_role_id(guild_id, 'member')
member_role = guild.get_role(member_role_id)
sweat_role_id = get_role_id(guild_id, 'sweat')
sweat_role = guild.get_role(sweat_role_id)
print(sweat_role, 'sweat_role')
og_role_id = get_role_id(guild_id, 'og')
og_role = guild.get_role(og_role_id)
# if length == 0:
# await message.send(str(length))
print('muted_role', muted_role)
print()
await member.add_roles(muted_role)
await member.remove_roles(member_role)
unmute_time = await db.get_mute_end(member.id)
unmute_in = unmute_time - time.time()
muted_before = False
if unmute_in < 0:
extra_data = {
'sweat': sweat_role in member.roles,
'og': og_role in member.roles,
}
else:
extra_data = await db.get_mute_data(member.id)
muted_before = True
await db.set_mute_end(
member.id,
time.time() + length,
extra_data
)
if sweat_role in member.roles:
await member.remove_roles(sweat_role)
if og_role in member.roles:
await member.remove_roles(og_role)
gulag = client.get_channel(720073985412562975)
if gulag_message:
if not muted_before:
await gulag.send(f'Welcome to gulag, <@{member.id}>.')
else:
mute_remaining = int(length)
mute_remaining_minutes = int(mute_remaining // 60)
mute_remaining_hours = int(mute_remaining_minutes // 60)
if mute_remaining_hours >= 2:
mute_str = f'{mute_remaining_hours} hours'
elif mute_remaining_hours == 1:
mute_str = f'one hour'
elif mute_remaining_minutes >= 2:
mute_str = f'{mute_remaining_minutes} minutes'
elif mute_remaining_minutes == 1:
mute_str = f'one minute'
elif mute_remaining == 1:
mute_str = f'one second'
else:
mute_str = f'{mute_remaining} seconds'
await gulag.send(f'<@{member.id}>, your mute is now {mute_str}')
await unmute_user(member.id, wait=True)
async def unmute_user(user_id, wait=False, gulag_message=True, reason=None):
'Unmutes a user after a certain amount of seconds pass'
if wait:
print('unmuting in...')
unmute_time = await db.get_mute_end(user_id)
unmute_in = unmute_time - time.time()
print('unmute_in', unmute_in)
await asyncio.sleep(unmute_in)
if (await db.get_mute_end(user_id) != unmute_time):
return print('Mute seems to have been extended.')
print('now unmuting')
mute_data = await db.get_mute_data(user_id)
for guild in client.guilds:
member = guild.get_member(user_id)
if not member: continue
muted_role_id = get_role_id(guild.id, 'muted')
muted_role = guild.get_role(muted_role_id)
member_role_id = get_role_id(guild.id, 'member')
member_role = guild.get_role(member_role_id)
await member.add_roles(member_role, reason=reason)
await member.remove_roles(muted_role, reason=reason)
sweat_role_id = get_role_id(guild.id, 'sweat')
sweat_role = guild.get_role(sweat_role_id)
og_role_id = get_role_id(guild.id, 'og')
og_role = guild.get_role(og_role_id)
if mute_data.get('sweat'):
await member.add_roles(sweat_role)
if mute_data.get('og'):
await member.add_roles(og_role)
await db.set_mute_end(user_id, time.time())
if gulag_message:
gulag = client.get_channel(720073985412562975)
await gulag.send(f'<@{user_id}> has left gulag.')
# await member.send(embed=discord.Embed(
# description='You have been unmuted.'
# ))
@client.event
async def on_raw_reaction_add(payload):
# ignore reactions from mat
# if payload.user_id == 224588823898619905:
# return
if payload.message_id == 732551899374551171:
if str(payload.emoji.name).lower() != 'disagree':
message = await client.get_channel(720258155900305488).fetch_message(732551899374551171)
print('removed reaction')
await message.clear_reaction(payload.emoji)
if payload.message_id not in {732552573806051328, 732552579531407422}: return
message = await client.get_channel(720258155900305488).fetch_message(payload.message_id)
await message.clear_reaction(payload.emoji)
if payload.message_id == 741806331484438549:
if str(payload.emoji.name).lower() != 'disagree':
message = await client.get_channel(720258155900305488).fetch_message(741806331484438549)
await message.remove_reaction(payload.emoji, payload.member)
print('removed reaction!')
elif payload.message_id == 756691321917276223: # Blurrzy art
print(payload.emoji.name)
if str(payload.emoji.name).lower() != 'agree':
message = await client.get_channel(720258155900305488).fetch_message(756691321917276223)
await message.remove_reaction(payload.emoji, payload.member)
print('removed reaction!')
await payload.member.send("Hey, you're a dum dum. If you disagree, please do `!gulag 15m` in <#718076311150788649>. Thanks!")
# elif payload.message_id == : # react for role poll notifications
# get_role_id(payload.guild_id, 'pollnotifications')
# if
def api_get_members():
guild_id = 717904501692170260
guild = client.get_guild(guild_id)
total_member_count = guild.member_count
owner_role_id = 717906079572295750
coowner_role_id = 717906242026340373
admin_role_id = 740985921389723709
mod_role_id = get_role_id(guild_id, 'mod')
helper_role_id = get_role_id(guild_id, 'helper')
party_planner_role_id = 733695759425208401
owner_role = guild.get_role(owner_role_id)
coowner_role = guild.get_role(coowner_role_id)
admin_role = guild.get_role(admin_role_id)
mod_role = guild.get_role(mod_role_id)
helper_role = guild.get_role(helper_role_id)
party_planner_role = guild.get_role(party_planner_role_id)
owner_list = []
coowner_list = []
admin_list = []
mod_list = []
helper_list = []
party_planner_list = []
for member in guild.members:
if owner_role in member.roles:
owner_list.append(member.name)
elif coowner_role in member.roles:
coowner_list.append(member.name)
elif admin_role in member.roles:
admin_list.append(member.name)
elif mod_role in member.roles:
mod_list.append(member.name)
elif helper_role in member.roles:
helper_list.append(member.name)
elif party_planner_role in member.roles:
party_planner_list.append(member.name)
return {
'member_count': total_member_count,
'roles': {
'owner': ', '.join(owner_list),
'coowner': ', '.join(coowner_list),
'admin': ', '.join(admin_list),
'mod': ', '.join(mod_list),
'helper': ', '.join(helper_list),
'party_planner': ', '.join(party_planner_list),
}
}
command_modules = []
for module_filename in os.listdir('./bot/commands'):
if module_filename == '__init__.py' or module_filename[-3:] != '.py':
continue
module = importlib.import_module('bot.commands.' + module_filename[:-3])
command_modules.append(module)
betterbot.command(
module.name,
aliases=getattr(module, 'aliases', []),
bot_channel=getattr(module, 'bot_channel', True),
pad_none=getattr(module, 'pad_none', True),
)(module.run)
print('Registered command from file', module_filename)
| 31.582524 | 155 | 0.745958 | 0 | 0 | 0 | 0 | 7,100 | 0.43585 | 12,894 | 0.791529 | 3,332 | 0.204543 |
52bf9e552405d6d0c21e5f6ccdaebea54d983579 | 2,089 | py | Python | management/commands/start_taskforce.py | mallipeddi/django-taskforce | dd7aa1eae508b15428dd0fe9c2be969a75038890 | [
"BSD-3-Clause"
] | 3 | 2015-11-05T06:07:22.000Z | 2021-11-08T11:21:51.000Z | management/commands/start_taskforce.py | mallipeddi/django-taskforce | dd7aa1eae508b15428dd0fe9c2be969a75038890 | [
"BSD-3-Clause"
] | null | null | null | management/commands/start_taskforce.py | mallipeddi/django-taskforce | dd7aa1eae508b15428dd0fe9c2be969a75038890 | [
"BSD-3-Clause"
] | null | null | null | import sys
from optparse import make_option
from django.core.management.base import BaseCommand
from django.conf import settings
import taskforce
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--verbose', action='store_true', dest='verbose',
help = 'Verbose mode for you control freaks'),
make_option('--foreground', action='store_true', dest='foreground',
help = 'Run the server in the foreground.'),
)
help = """Start taskforce server."""
args = "[thread-pool-size]"
def _log(self, msg, error=False):
if self._verbose or error:
print msg
def handle(self, *args, **options):
# handle command-line options
self._verbose = options.get('verbose', False)
self._foreground = options.get('foreground', False)
if len(args) == 0:
pool_size = 5
elif len(args) == 1:
pool_size = int(args[0])
else:
self._log("ERROR - Takes in exactly 1 optional arg. %d were supplied." % len(args), error=True)
sys.exit(1)
address, port = taskforce.utils.get_server_loc()
available_tasks = []
for app_name in settings.INSTALLED_APPS:
app_mod = __import__(app_name, {}, {}, ['tasks'])
if hasattr(app_mod, 'tasks'):
for k in app_mod.tasks.__dict__.values():
if isinstance(k, type) and issubclass(k, taskforce.BaseTask):
available_tasks.append(k)
self._log("Starting Taskforce server...")
if self._foreground:
from taskforce.http import runserver
runserver(available_tasks, pool_size, (address, port))
else:
from taskforce.http import TaskforceDaemon
TaskforceDaemon(
"/tmp/taskforce.pid",
stdout="/tmp/taskforce.out.log",
stderr="/tmp/taskforce.err.log",
).start(available_tasks, pool_size, (address,port))
| 36.017241 | 107 | 0.581618 | 1,939 | 0.928195 | 0 | 0 | 0 | 0 | 0 | 0 | 413 | 0.197702 |
52c00979320f1e4a0f34214d8d935b0833088713 | 521 | py | Python | profiles/migrations/0004_profile_smapply_user_data.py | umarmughal824/bootcamp-ecommerce | 681bcc788a66867b8f240790c0ed33680b73932b | [
"BSD-3-Clause"
] | 2 | 2018-06-20T19:37:03.000Z | 2021-01-06T09:51:40.000Z | profiles/migrations/0004_profile_smapply_user_data.py | mitodl/bootcamp-ecommerce | ba7d6aefe56c6481ae2a5afc84cdd644538b6d50 | [
"BSD-3-Clause"
] | 1,226 | 2017-02-23T14:52:28.000Z | 2022-03-29T13:19:54.000Z | profiles/migrations/0004_profile_smapply_user_data.py | umarmughal824/bootcamp-ecommerce | 681bcc788a66867b8f240790c0ed33680b73932b | [
"BSD-3-Clause"
] | 3 | 2017-03-20T03:51:27.000Z | 2021-03-19T15:54:31.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-09-13 18:42
from __future__ import unicode_literals
import django.contrib.postgres.fields.jsonb
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("profiles", "0003_profile_smapply_id")]
operations = [
migrations.AddField(
model_name="profile",
name="smapply_user_data",
field=django.contrib.postgres.fields.jsonb.JSONField(blank=True, null=True),
)
]
| 26.05 | 88 | 0.677543 | 326 | 0.62572 | 0 | 0 | 0 | 0 | 0 | 0 | 135 | 0.259117 |
52c0834ff93f1ed8d51ad80ac5952cb23df17269 | 250 | py | Python | Lists/list_of_beggars.py | petel3/Softuni_education | 4fd80f8c6ce6c3d6a838edecdb091dda2ed1084c | [
"MIT"
] | 2 | 2022-03-05T13:17:12.000Z | 2022-03-05T13:17:16.000Z | Lists/list_of_beggars.py | petel3/Softuni_education | 4fd80f8c6ce6c3d6a838edecdb091dda2ed1084c | [
"MIT"
] | null | null | null | Lists/list_of_beggars.py | petel3/Softuni_education | 4fd80f8c6ce6c3d6a838edecdb091dda2ed1084c | [
"MIT"
] | null | null | null | string = input().split(", ")
beggars = int(input())
beggars_list = []
for x in range(0, beggars):
temp = string[x::beggars]
for j in range(0, len(temp)):
temp[j] = int(temp[j])
beggars_list.append(sum(temp))
print(beggars_list) | 20.833333 | 34 | 0.616 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.016 |
52c2f493c965a91dab85655d544b65075ea9b225 | 1,978 | py | Python | python/locate_2D_array1.py | leewalter/coding | 2afd9dbfc1ecb94def35b953f4195a310d6953c9 | [
"Apache-2.0"
] | null | null | null | python/locate_2D_array1.py | leewalter/coding | 2afd9dbfc1ecb94def35b953f4195a310d6953c9 | [
"Apache-2.0"
] | null | null | null | python/locate_2D_array1.py | leewalter/coding | 2afd9dbfc1ecb94def35b953f4195a310d6953c9 | [
"Apache-2.0"
] | 1 | 2020-08-29T17:12:52.000Z | 2020-08-29T17:12:52.000Z | # python to locate 1 in a 2D array
#below save it into a dictionary
# python to locate 1 in a 2D array
def check_zero(array1):
d = {}
print("array index zeros at:")
for i in range(len(array1)):
index = [k for k, v in enumerate(array1[i]) if v == 0]
d[i] = index
#print(i, index)
#print(d)
return(d)
array1 = [
[1, 1, 0, 0],
[0, 0, 1, 1],
[0, 1, 0, 1]
]
array2 = [
[1, 1, 0, 0, 0],
[0, 0, 1, 1, 0],
[0, 0, 1, 0, 1]
]
print(check_zero(array1))
print(check_zero(array2))
'''
array index zeros at:
{0: [2, 3], 1: [0, 1], 2: [0, 2]}
array index zeros at:
{0: [2, 3, 4], 1: [0, 1, 4], 2: [0, 1, 3]}
'''
# below is a function
def check_zero(array1):
print("array index zeros at:")
for i in range(len(array1)):
index = [k for k, v in enumerate(array1[i]) if v == 0]
print(i, index)
return(i,index)
array1 = [
[1, 1, 0, 0],
[0, 0, 1, 1],
[0, 1, 0, 1]
]
array2 = [
[1, 1, 0, 0, 0],
[0, 0, 1, 1, 0],
[0, 0, 1, 0, 1]
]
check_zero(array1)
check_zero(array2)
'''
array index zeros at:
0 [2, 3]
1 [0, 1]
2 [0, 2]
array index zeros at:
0 [2, 3, 4]
1 [0, 1, 4]
2 [0, 1, 3]
'''
array1 = [
[1, 1, 0, 0],
[0, 0, 1, 1],
[0, 1, 0, 1]
]
for i in range(len(array1)):
index = [ k for k,v in enumerate(array1[i]) if v ==0 ]
print(i, index)
'''
outputs
0 [2, 3]
1 [0, 1]
2 [0, 2]
'''
''' very primitive below,
for i in range(0,3):
for j in range(0,4):
if (array1[i][j] == 1):
print(i,j)
'''
'''
# https://docs.scipy.org/doc/numpy-1.15.0/reference/generated/numpy.nonzero.html
# not done yet
# import numpy
array2 = numpy.array([
[1, 1, 0, 0],
[0, 0, 1, 1],
[0, 0, 0, 0]])
print(numpy.nonzero(array2))
'''
# https://stackoverflow.com/questions/27175400/how-to-find-the-index-of-a-value-in-2d-array-in-python
'''
outputs from for i,j loop
D:\Go-workspace\walter\coding\python>python locate_zero.py
0 0
0 1
1 2
1 3
2 1
2 3
'''
| 14.87218 | 101 | 0.532356 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,050 | 0.530839 |
52c38d1cc6e6ce5d847e7873d6a974fa56d65e99 | 163 | py | Python | tests/__init__.py | RonenTRA/faster-than-requests | 237a57cf2607e0694c87fea8e313461bf9a462e7 | [
"MIT"
] | 857 | 2018-11-18T17:55:01.000Z | 2022-03-31T23:39:10.000Z | tests/__init__.py | RonenTRA/faster-than-requests | 237a57cf2607e0694c87fea8e313461bf9a462e7 | [
"MIT"
] | 181 | 2018-12-08T18:31:05.000Z | 2022-03-29T01:40:02.000Z | tests/__init__.py | RonenTRA/faster-than-requests | 237a57cf2607e0694c87fea8e313461bf9a462e7 | [
"MIT"
] | 92 | 2018-11-22T03:53:31.000Z | 2022-03-21T10:54:24.000Z | # Allow tests/ directory to see faster_than_requests/ package on PYTHONPATH
import sys
from pathlib import Path
sys.path.append(str(Path(__file__).parent.parent))
| 32.6 | 75 | 0.815951 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 75 | 0.460123 |
52c49ba6e0f3c37a7b4d0399f526ea22dc7636df | 7,711 | py | Python | fib_optimizer/fib_optimizer.py | dbarrosop/sir_tools | 0c325eb8ea9667ecdc9da4f524ef2224b098fc81 | [
"Apache-2.0"
] | 9 | 2015-09-13T20:00:52.000Z | 2018-04-04T09:07:55.000Z | fib_optimizer/fib_optimizer.py | dbarrosop/sir_tools | 0c325eb8ea9667ecdc9da4f524ef2224b098fc81 | [
"Apache-2.0"
] | 2 | 2015-09-28T14:12:41.000Z | 2017-03-02T16:29:48.000Z | fib_optimizer/fib_optimizer.py | dbarrosop/sir_tools | 0c325eb8ea9667ecdc9da4f524ef2224b098fc81 | [
"Apache-2.0"
] | 5 | 2015-09-28T13:54:02.000Z | 2016-04-19T23:50:34.000Z | #!/usr/bin/env python
from pySIR.pySIR import pySIR
import argparse
import datetime
import json
import os
import shlex
import subprocess
import sys
import time
import logging
logger = logging.getLogger('fib_optimizer')
log_format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.DEBUG, format=log_format)
'''
def _split_tables(s):
lem = list()
lpm = list()
for p in s:
if p.split('/')[1] == '24':
lem.append(p)
else:
lpm.append(p)
return lem, lpm
def get_bgp_prefix_lists():
bgp_p = sir.get_bgp_prefixes(date=end_time).result
p = list()
for router, prefix_list in bgp_p.iteritems():
for prefix in prefix_list:
p.append(prefix)
return _split_tables(p)
def inc_exc_prefixes():
i_lem, i_lpm = _split_tables(conf['include_prefixes'])
e_lem, e_lpm = _split_tables(conf['exclude_prefixes'])
return i_lem, i_lpm, e_lem, e_lpm
def complete_prefix_list():
def _complete_pl(pl, bgp_pl, num):
if len(pl) < num:
num = num - len(pl)
for prefix in bgp_pl:
if prefix not in pl:
pl.append(prefix)
num -= 1
if num == 0:
break
return pl
else:
return pl
lem_pl = _complete_pl(lem_prefixes, bgp_lem, conf['max_lem_prefixes'])
lpm_pl = _complete_pl(lpm_prefixes, bgp_lpm, conf['max_lpm_prefixes'])
return lem_pl, lpm_pl
'''
def get_variables():
logger.debug('Getting variables from SIR')
v = sir.get_variables_by_category_and_name('apps', 'fib_optimizer').result[0]
logger.debug('Configuration: {}'.format(json.loads(v['content'])))
return json.loads(v['content'])
def get_date_range():
# These are dates for which we have flows. We want to "calculate" the range we want to use
# to calculate the topN prefixes
logger.debug('Getting available dates')
dates = sir.get_available_dates().result
if len(dates) < conf['age']:
sd = dates[0]
else:
sd = dates[-conf['age']]
ed = dates[-1]
logger.debug("Date range: {} - {}".format(sd, ed))
time_delta = datetime.datetime.now() - datetime.datetime.strptime(ed, '%Y-%m-%dT%H:%M:%S')
if time_delta.days > 2:
msg = 'Data is more than 48 hours old: {}'.format(ed)
logger.error(msg)
raise Exception(msg)
return sd, ed
def get_top_prefixes():
logger.debug('Getting top prefixes')
# limit_lem = int(conf['max_lem_prefixes']) - len(inc_lem) + len(exc_lem)
limit_lem = int(conf['max_lem_prefixes'])
lem = [p['key'] for p in sir.get_top_prefixes(
start_time=start_time,
end_time=end_time,
limit_prefixes=limit_lem,
net_masks=conf['lem_prefixes'],
filter_proto=4,).result]
# limit_lpm = int(conf['max_lpm_prefixes']) - len(inc_lpm) + len(exc_lpm)
limit_lpm = int(conf['max_lpm_prefixes'])
lpm = [p['key'] for p in sir.get_top_prefixes(
start_time=start_time,
end_time=end_time,
limit_prefixes=limit_lpm,
net_masks=conf['lem_prefixes'],
filter_proto=4,
exclude_net_masks=1,).result]
return lem, lpm
def build_prefix_lists():
logger.debug('Storing prefix lists in disk')
def _build_pl(name, prefixes):
pl = ''
for s, p in prefixes.iteritems():
prefix, mask = p.split('/')
if mask == '32' or (prefix == '' and mask == '0'):
continue
pl += 'seq {} permit {}\n'.format(s, p)
with open('{}/{}'.format(conf['path'], name), "w") as f:
f.write(pl)
_build_pl('fib_optimizer_lpm_v4', lpm_prefixes)
_build_pl('fib_optimizer_lem_v4', lem_prefixes)
def install_prefix_lists():
logger.debug('Installing the prefix-lists in the system')
cli_lpm = shlex.split('printf "refresh ip prefix-list fib_optimizer_lpm_v4"'.format(
conf['path']))
cli_lem = shlex.split('printf "refresh ip prefix-list fib_optimizer_lem_v4"'.format(
conf['path']))
cli = shlex.split('sudo ip netns exec default FastCli -p 15 -A')
p_lpm = subprocess.Popen(cli_lpm, stdout=subprocess.PIPE)
p_cli = subprocess.Popen(cli, stdin=p_lpm.stdout, stdout=subprocess.PIPE)
time.sleep(30)
p_lem = subprocess.Popen(cli_lem, stdout=subprocess.PIPE)
p_cli = subprocess.Popen(cli, stdin=p_lem.stdout, stdout=subprocess.PIPE)
def merge_pl():
logger.debug('Merging new prefix-list with existing ones')
def _merge_pl(pl, pl_file, max_p):
if os.path.isfile(pl_file):
logger.debug('Prefix list {} already exists. Merging'.format(pl_file))
with open(pl_file, 'r') as f:
original_pl = dict()
for line in f.readlines():
_, seq, permit, prefix = line.split(' ')
original_pl[prefix.rstrip()] = int(seq)
if len(original_pl) * 0.75 > len(pl):
msg = 'New prefix list ({}) is more than 25%% smaller than the new one ({})'.format(len(original_pl),
len(pl))
logger.error(msg)
raise Exception(msg)
new_prefixes = set(pl) - set(original_pl.keys())
existing_prefixes = set(pl) & set(original_pl.keys())
new_pl = dict()
for p in existing_prefixes:
new_pl[original_pl[p]] = p
empty_pos = sorted(list(set(xrange(1, int(max_p) + 1)) - set(original_pl.values())))
for p in new_prefixes:
new_pl[empty_pos.pop(0)] = p
return new_pl
else:
logger.debug('Prefix list {} does not exist'.format(pl_file))
i = 1
new = dict()
for p in pl:
new[i] = p
i += 1
return new
lem = _merge_pl(lem_prefixes, '{}/fib_optimizer_lem_v4'.format(conf['path']), conf['max_lem_prefixes'])
lpm = _merge_pl(lpm_prefixes, '{}/fib_optimizer_lpm_v4'.format(conf['path']), conf['max_lpm_prefixes'])
return lem, lpm
def purge_old_data():
logger.debug('Purging old data')
date = datetime.datetime.now() - datetime.timedelta(hours=conf['purge_older_than'])
date_text = date.strftime('%Y-%m-%dT%H:%M:%S')
logger.debug('Deleting BGP data older than: {}'.format(date_text))
sir.purge_bgp(older_than=date_text)
logger.debug('Deleting flow data older than: {}'.format(date_text))
sir.purge_flows(older_than=date_text)
if __name__ == "__main__":
if len(sys.argv) < 2:
print 'You have to specify the base URL. For example: {} http://127.0.0.1:5000'.format(sys.argv[0])
sys.exit(0)
elif sys.argv[1] == '-h' or sys.argv[1] == '--help':
print 'You have to specify the base URL. For example: {} http://127.0.0.1:5000'.format(sys.argv[0])
sys.exit(1)
logger.info('Starting fib_optimizer')
sir = pySIR(sys.argv[1], verify_ssl=False)
# We get the configuration for our application
conf = get_variables()
# The time range we want to process
start_time, end_time = get_date_range()
# We get the Top prefixes. Included and excluded prefixes are merged as well
lem_prefixes, lpm_prefixes = get_top_prefixes()
# If the prefix list exists already we merge the data
lem_prefixes, lpm_prefixes = merge_pl()
# We build the files with the prefix lists
build_prefix_lists()
install_prefix_lists()
purge_old_data()
logger.info('End fib_optimizer')
| 30.844 | 117 | 0.606925 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,059 | 0.396706 |
52c4acd2d2a5426d20652531bc70630c947940a9 | 247 | py | Python | Py_Print_Function_With_Loop.py | ZeroHero77/PythonChallenges | 516f02d6dcc65b47563642de9707bb32ac392f21 | [
"MIT"
] | null | null | null | Py_Print_Function_With_Loop.py | ZeroHero77/PythonChallenges | 516f02d6dcc65b47563642de9707bb32ac392f21 | [
"MIT"
] | null | null | null | Py_Print_Function_With_Loop.py | ZeroHero77/PythonChallenges | 516f02d6dcc65b47563642de9707bb32ac392f21 | [
"MIT"
] | null | null | null | import re
n = int(input())
def loopai(n):
x = [i+1 for i in range(n)]
z = x
print(re.sub(' ','',re.sub('\,','',re.sub('\]','',re.sub('\[','',str(z))))))
def main():
loopai(n)
if __name__ == '__main__':
main()
| 16.466667 | 81 | 0.441296 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 33 | 0.133603 |
52c51b70ee280d26ac396fd1baa601910de8efa3 | 343 | py | Python | good_spot/common/views.py | jasmine92122/NightClubBackend | 7f59129b78baaba0e0c25de2b493033b858f1b00 | [
"MIT"
] | null | null | null | good_spot/common/views.py | jasmine92122/NightClubBackend | 7f59129b78baaba0e0c25de2b493033b858f1b00 | [
"MIT"
] | 5 | 2020-02-12T03:13:11.000Z | 2022-01-13T01:41:14.000Z | good_spot/common/views.py | jasmine92122/NightClubBackend | 7f59129b78baaba0e0c25de2b493033b858f1b00 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
from django.conf import settings
from django.http.response import HttpResponse
from django.views.generic import View
class HealthCheckView(View):
def get(self, request, *args, **kwargs):
return HttpResponse(settings.HEALTH_CHECK_BODY, status=200)
| 28.583333 | 67 | 0.769679 | 141 | 0.411079 | 0 | 0 | 0 | 0 | 0 | 0 | 23 | 0.067055 |
52c72675260b9c9944a7f437f0e277d4c24506aa | 691 | py | Python | year2020/day21/test_reader.py | Sebaestschjin/advent-of-code | 5fd708efa355483fc0ccddf7548b62682662bcc8 | [
"MIT"
] | null | null | null | year2020/day21/test_reader.py | Sebaestschjin/advent-of-code | 5fd708efa355483fc0ccddf7548b62682662bcc8 | [
"MIT"
] | null | null | null | year2020/day21/test_reader.py | Sebaestschjin/advent-of-code | 5fd708efa355483fc0ccddf7548b62682662bcc8 | [
"MIT"
] | null | null | null | from assertpy import assert_that
import year2020.day21.reader as reader
def test_example():
lines = ['mxmxvkd kfcds sqjhc nhms (contains dairy, fish)\n',
'trh fvjkl sbzzf mxmxvkd (contains dairy)\n',
'sqjhc fvjkl (contains soy)\n',
'sqjhc mxmxvkd sbzzf (contains fish)\n']
result = reader.read_lines(lines)
assert_that(result).is_equal_to([(['mxmxvkd', 'kfcds', 'sqjhc', 'nhms'], ['dairy', 'fish']),
(['trh', 'fvjkl', 'sbzzf', 'mxmxvkd'], ['dairy']),
(['sqjhc', 'fvjkl'], ['soy']),
(['sqjhc', 'mxmxvkd', 'sbzzf'], ['fish'])])
| 43.1875 | 96 | 0.507959 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 289 | 0.418234 |
52c7bf2510e8384dd01a5738c1a369e2f22577eb | 4,109 | py | Python | realtime_hand_3d/segmentation/utils/data_utils.py | NeelayS/realtime_hand | 219c772b9b7df60c390edac7da23f9cdddebca4d | [
"MIT"
] | null | null | null | realtime_hand_3d/segmentation/utils/data_utils.py | NeelayS/realtime_hand | 219c772b9b7df60c390edac7da23f9cdddebca4d | [
"MIT"
] | null | null | null | realtime_hand_3d/segmentation/utils/data_utils.py | NeelayS/realtime_hand | 219c772b9b7df60c390edac7da23f9cdddebca4d | [
"MIT"
] | null | null | null | import cv2 as cv
import numpy as np
import os
import shutil
import torch
from torchvision import io
def resize_image(
image, expected_size, pad_value, ret_params=True, mode=cv.INTER_LINEAR
):
"""
image (ndarray) with either shape of [H,W,3] for RGB or [H,W] for grayscale.
Padding is added so that the content of image is in the center.
"""
h, w = image.shape[:2]
if w > h:
w_new = int(expected_size)
h_new = int(h * w_new / w)
image = cv.resize(image, (w_new, h_new), interpolation=mode)
pad_up = (w_new - h_new) // 2
pad_down = w_new - h_new - pad_up
if len(image.shape) == 3:
pad_width = ((pad_up, pad_down), (0, 0), (0, 0))
constant_values = ((pad_value, pad_value), (0, 0), (0, 0))
elif len(image.shape) == 2:
pad_width = ((pad_up, pad_down), (0, 0))
constant_values = ((pad_value, pad_value), (0, 0))
image = np.pad(
image, pad_width=pad_width, mode="constant", constant_values=constant_values
)
if ret_params:
return image, pad_up, 0, h_new, w_new
else:
return image
elif w < h:
h_new = int(expected_size)
w_new = int(w * h_new / h)
image = cv.resize(image, (w_new, h_new), interpolation=mode)
pad_left = (h_new - w_new) // 2
pad_right = h_new - w_new - pad_left
if len(image.shape) == 3:
pad_width = ((0, 0), (pad_left, pad_right), (0, 0))
constant_values = ((0, 0), (pad_value, pad_value), (0, 0))
elif len(image.shape) == 2:
pad_width = ((0, 0), (pad_left, pad_right))
constant_values = ((0, 0), (pad_value, pad_value))
image = np.pad(
image, pad_width=pad_width, mode="constant", constant_values=constant_values
)
if ret_params:
return image, 0, pad_left, h_new, w_new
else:
return image
else:
image = cv.resize(image, (expected_size, expected_size), interpolation=mode)
if ret_params:
return image, 0, 0, expected_size, expected_size
else:
return image
def preprocessing(image, expected_size=224, pad_value=0):
"""
Pre-processing steps to use pre-trained model on images
"""
imgnet_mean = np.array([0.485, 0.456, 0.406])[None, None, :]
imgnet_std = np.array([0.229, 0.224, 0.225])[None, None, :]
image, pad_up, pad_left, h_new, w_new = resize_image(
image, expected_size, pad_value, ret_params=True
)
image = image.astype(np.float32) / 255.0
image = (image - imgnet_mean) / imgnet_std
X = np.transpose(image, axes=(2, 0, 1))
X = np.expand_dims(X, axis=0)
X = torch.tensor(X, dtype=torch.float32)
return X, pad_up, pad_left, h_new, w_new
def gen_e2h_eval_masks(root_dir):
for seq_dir in os.listdir(root_dir):
if seq_dir[:4] != "eval":
continue
print("Processing directory", seq_dir)
seq = seq_dir.split("_")[1][3:]
os.makedirs(os.path.join("imgs", seq), exist_ok=True)
os.makedirs(os.path.join("masks", seq), exist_ok=True)
for img_path in os.listdir(seq_dir):
if (
img_path.split(".")[0][-4:] == "_e_l"
or img_path.split(".")[0][-4:] == "_e_r"
or img_path.split(".")[0][-4:] == "_seg"
):
continue
img = io.read_image(os.path.join(seq_dir, img_path))
shutil.copy(
os.path.join(seq_dir, img_path), os.path.join("imgs", seq, img_path)
)
l_mask = io.read_image(
os.path.join(seq_dir, img_path.replace(".png", "_e_l.png"))
)
r_mask = io.read_image(
os.path.join(seq_dir, img_path.replace(".png", "_e_r.png"))
)
mask = torch.zeros_like(l_mask)
mask[l_mask == 255] = 127
mask[r_mask == 255] = 255
io.write_png(mask, os.path.join("masks", seq, img_path))
| 30.894737 | 88 | 0.556826 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 367 | 0.089316 |
52c9891e030597761f62c3109bb03c2ff213dd4d | 281 | py | Python | intro/hello.py | ANU-WALD/acawsi | f6f340b59de8814173ab7dc8195c0fced53e652e | [
"MIT"
] | 1 | 2020-02-03T03:05:44.000Z | 2020-02-03T03:05:44.000Z | intro/hello.py | ANU-WALD/acawsi | f6f340b59de8814173ab7dc8195c0fced53e652e | [
"MIT"
] | null | null | null | intro/hello.py | ANU-WALD/acawsi | f6f340b59de8814173ab7dc8195c0fced53e652e | [
"MIT"
] | null | null | null |
name = "Sharalanda"
age = 10
hobbies = ["draw", "swim", "dance"]
address = {"city": "Sebastopol", "Post Code": 1234, "country": "Enchantia"}
print("My name is", name)
print("I am", age, "years old")
print("My favourite hobbie is", hobbies[0])
print("I live in", address["city"])
| 25.545455 | 75 | 0.637011 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 150 | 0.533808 |
52ca2cad175cb01038bfad01648331f9f620e47e | 537 | py | Python | python/array/1550_three_consecutive_odds.py | linshaoyong/leetcode | ea052fad68a2fe0cbfa5469398508ec2b776654f | [
"MIT"
] | 6 | 2019-07-15T13:23:57.000Z | 2020-01-22T03:12:01.000Z | python/array/1550_three_consecutive_odds.py | linshaoyong/leetcode | ea052fad68a2fe0cbfa5469398508ec2b776654f | [
"MIT"
] | null | null | null | python/array/1550_three_consecutive_odds.py | linshaoyong/leetcode | ea052fad68a2fe0cbfa5469398508ec2b776654f | [
"MIT"
] | 1 | 2019-07-24T02:15:31.000Z | 2019-07-24T02:15:31.000Z | class Solution(object):
def threeConsecutiveOdds(self, arr):
"""
:type arr: List[int]
:rtype: bool
"""
odds = 0
for a in arr:
if a % 2 == 1:
odds += 1
if odds >= 3:
return True
else:
odds = 0
return False
def test_three_consecutive_odds():
s = Solution()
assert s.threeConsecutiveOdds([2, 6, 4, 1]) is False
assert s.threeConsecutiveOdds([1, 2, 34, 3, 4, 5, 7, 23, 12])
| 23.347826 | 65 | 0.463687 | 356 | 0.662942 | 0 | 0 | 0 | 0 | 0 | 0 | 65 | 0.121043 |
52cbf168ddc176a88ff38b6b38497de610416b77 | 20,279 | py | Python | src/third_party/wiredtiger/tools/wtperf_stats/3rdparty/nvd3/NVD3Chart.py | rueckstiess/mongo | 2a4111960fee25453ed0974ee9eae95ec25bd556 | [
"Apache-2.0"
] | null | null | null | src/third_party/wiredtiger/tools/wtperf_stats/3rdparty/nvd3/NVD3Chart.py | rueckstiess/mongo | 2a4111960fee25453ed0974ee9eae95ec25bd556 | [
"Apache-2.0"
] | null | null | null | src/third_party/wiredtiger/tools/wtperf_stats/3rdparty/nvd3/NVD3Chart.py | rueckstiess/mongo | 2a4111960fee25453ed0974ee9eae95ec25bd556 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Python-nvd3 is a Python wrapper for NVD3 graph library.
NVD3 is an attempt to build re-usable charts and chart components
for d3.js without taking away the power that d3.js gives you.
Project location : https://github.com/areski/python-nvd3
"""
from optparse import OptionParser
from string import Template
import json
template_content_nvd3 = """
$container
$jschart
"""
template_page_nvd3 = """
<!DOCTYPE html>
<html lang="en">
<head>
$header
</head>
<body>
%s
</body>
""" % template_content_nvd3
def stab(tab=1):
"""
create space tabulation
"""
return ' ' * 4 * tab
class NVD3Chart:
"""
NVD3Chart Base class
**Attributes**:
* ``axislist`` - All X, Y axis list
* ``charttooltip_dateformat`` - date fromat for tooltip if x-axis is in date format
* ``charttooltip`` - Custom tooltip string
* ``color_category`` - Defien color category (eg. category10, category20, category20c)
* ``color_list`` - used by pieChart (eg. ['red', 'blue', 'orange'])
* ``container`` - Place for graph
* ``containerheader`` - Header for javascript code
* ``count`` - chart count
* ``custom_tooltip_flag`` - False / True
* ``d3_select_extra`` -
* ``date_flag`` - x-axis contain date format or not
* ``dateformat`` - see https://github.com/mbostock/d3/wiki/Time-Formatting
* ``header_css`` - False / True
* ``header_js`` - Custom tooltip string
* ``height`` - Set graph height
* ``htmlcontent`` - Contain the htmloutput
* ``htmlheader`` - Contain the html header
* ``jschart`` - Javascript code as string
* ``margin_bottom`` - set the bottom margin
* ``margin_left`` - set the left margin
* ``margin_right`` - set the right margin
* ``margin_top`` - set the top margin
* ``model`` - set the model (ex. pieChart, LineWithFocusChart, MultiBarChart)
* ``resize`` - False / True
* ``series`` - Series are list of data that will be plotted
* ``stacked`` - False / True
* ``style`` - Special style
* ``template_page_nvd3`` - template variable
* ``width`` - Set graph width
* ``x_axis_date`` - False / True
* ``show_legend`` - False / True
* ``show_labels`` - False / True
* ``assets_directory`` directory holding the assets (./bower_components/)
"""
count = 0
dateformat = '%x'
series = []
axislist = {}
style = ''
htmlcontent = ''
htmlheader = ''
height = None
width = None
margin_bottom = None
margin_left = None
margin_right = None
margin_top = None
model = ''
d3_select_extra = ''
x_axis_date = False
resize = False
stacked = False
template_page_nvd3 = None
container = None
containerheader = ''
jschart = None
custom_tooltip_flag = False
date_flag = False
charttooltip = ''
tooltip_condition_string = ''
color_category = 'category10' # category10, category20, category20c
color_list = [] # for pie chart
tag_script_js = True
charttooltip_dateformat = None
x_axis_format = ''
show_legend = True
show_labels = True
assets_directory = './bower_components/'
def __init__(self, **kwargs):
"""
Constructor
"""
#set the model
self.model = self.__class__.__name__
#Init Data
self.series = []
self.axislist = {}
self.template_page_nvd3 = Template(template_page_nvd3)
self.template_content_nvd3 = Template(template_content_nvd3)
self.charttooltip_dateformat = '%d %b %Y'
self.name = kwargs.get('name', self.model)
self.jquery_on_ready = kwargs.get('jquery_on_ready', False)
self.color_category = kwargs.get('color_category', None)
self.color_list = kwargs.get('color_list', None)
self.margin_bottom = kwargs.get('margin_bottom', 20)
self.margin_left = kwargs.get('margin_left', 60)
self.margin_right = kwargs.get('margin_right', 60)
self.margin_top = kwargs.get('margin_top', 30)
self.stacked = kwargs.get('stacked', False)
self.resize = kwargs.get('resize', False)
self.show_legend = kwargs.get('show_legend', True)
self.show_labels = kwargs.get('show_labels', True)
self.tag_script_js = kwargs.get('tag_script_js', True)
self.chart_attr = kwargs.get("chart_attr", {})
self.assets_directory = kwargs.get('assets_directory', './bower_components/')
#CDN http://cdnjs.com/libraries/nvd3/ needs to make sure it's up to date
self.header_css = [
'<link href="%s" rel="stylesheet">\n' % h for h in
(
self.assets_directory + 'nvd3/src/nv.d3.css',
)
]
self.header_js = [
'<script src="%s"></script>\n' % h for h in
(
self.assets_directory + 'd3/d3.min.js',
self.assets_directory + 'nvd3/nv.d3.min.js'
)
]
def add_serie(self, y, x, name=None, extra={}, **kwargs):
"""
add serie - Series are list of data that will be plotted
y {1, 2, 3, 4, 5} / x {1, 2, 3, 4, 5}
**Attributes**:
* ``name`` - set Serie name
* ``x`` - x-axis data
* ``y`` - y-axis data
kwargs:
* ``shape`` - for scatterChart, you can set different shapes (circle, triangle etc...)
* ``size`` - for scatterChart, you can set size of different shapes
* ``type`` - for multiChart, type should be bar
* ``bar`` - to display bars in Chart
* ``color_list`` - define list of colors which will be used by pieChart
* ``color`` - set axis color
* ``disabled`` -
extra:
* ``tooltip`` - set tooltip flag
* ``date_format`` - set date_format for tooltip if x-axis is in date format
"""
if not name:
name = "Serie %d" % (len(self.series) + 1)
# For scatterChart shape & size fields are added in serie
if 'shape' in kwargs or 'size' in kwargs:
csize = kwargs.get('size', 1)
cshape = kwargs.get('shape', 'circle')
serie = [{
'x': x[i],
'y': y,
'shape': cshape,
'size': csize[i] if isinstance(csize, list) else csize
} for i, y in enumerate(y)]
else:
if self.model == 'pieChart':
serie = [{'label': x[i], 'value': y} for i, y in enumerate(y)]
elif self.model == 'linePlusBarWithFocusChart':
serie = [[x[i], y] for i, y in enumerate(y)]
else:
serie = [{'x': x[i], 'y': y} for i, y in enumerate(y)]
data_keyvalue = {'values': serie, 'key': name}
#multiChart
#Histogram type='bar' for the series
if 'type' in kwargs and kwargs['type']:
data_keyvalue['type'] = kwargs['type']
if self.model == 'pieChart':
if 'color_list' in extra and extra['color_list']:
self.color_list = extra['color_list']
#Define on which Y axis the serie is related
#a chart can have 2 Y axis, left and right, by default only one Y Axis is used
if 'yaxis' in kwargs and kwargs['yaxis']:
data_keyvalue['yAxis'] = kwargs['yaxis']
else:
if self.model != 'pieChart' and self.model != 'linePlusBarWithFocusChart':
data_keyvalue['yAxis'] = '1'
if 'bar' in kwargs and kwargs['bar']:
data_keyvalue['bar'] = 'true'
if 'disabled' in kwargs and kwargs['disabled']:
data_keyvalue['disabled'] = 'true'
if 'color' in extra and extra['color']:
data_keyvalue['color'] = extra['color']
if extra.get('date_format'):
self.charttooltip_dateformat = extra['date_format']
if extra.get('tooltip'):
self.custom_tooltip_flag = True
if self.model != 'pieChart':
_start = extra['tooltip']['y_start']
_end = extra['tooltip']['y_end']
_start = ("'" + str(_start) + "' + ") if _start else ''
_end = (" + '" + str(_end) + "'") if _end else ''
if self.model == 'linePlusBarChart' or self.model == 'linePlusBarWithFocusChart':
self.tooltip_condition_string += stab(3) + "if(key.indexOf('" + name + "') > -1 ){\n" +\
stab(4) + "var y = " + _start + " String(graph.point.y) " + _end + ";\n" +\
stab(3) + "}\n"
elif self.model == 'cumulativeLineChart':
self.tooltip_condition_string += stab(3) + "if(key == '" + name + "'){\n" +\
stab(4) + "var y = " + _start + " String(e) " + _end + ";\n" +\
stab(3) + "}\n"
else:
self.tooltip_condition_string += stab(3) + "if(key == '" + name + "'){\n" +\
stab(4) + "var y = " + _start + " String(graph.point.y) " + _end + ";\n" +\
stab(3) + "}\n"
if self.model == 'pieChart':
_start = extra['tooltip']['y_start']
_end = extra['tooltip']['y_end']
_start = ("'" + str(_start) + "' + ") if _start else ''
_end = (" + '" + str(_end) + "'") if _end else ''
self.tooltip_condition_string += \
"var y = " + _start + " String(y) " + _end + ";\n"
self.series.append(data_keyvalue)
def set_graph_height(self, height):
"""Set Graph height"""
self.height = str(height)
def set_graph_width(self, width):
"""Set Graph width"""
self.width = str(width)
def set_containerheader(self, containerheader):
"""Set containerheader"""
self.containerheader = containerheader
def set_date_flag(self, date_flag=False):
"""Set date falg"""
self.date_flag = date_flag
def set_custom_tooltip_flag(self, custom_tooltip_flag):
"""Set custom_tooltip_flag & date_flag"""
self.custom_tooltip_flag = custom_tooltip_flag
def __str__(self):
"""return htmlcontent"""
self.buildhtml()
return self.htmlcontent
def buildcontent(self):
"""Build HTML content only, no header or body tags. To be useful this
will usually require the attribute `juqery_on_ready` to be set which
will wrap the js in $(function(){<regular_js>};)
"""
self.buildcontainer()
self.buildjschart()
self.htmlcontent = self.template_content_nvd3.substitute(container=self.container,
jschart=self.jschart)
def buildhtml(self):
"""Build the HTML page
Create the htmlheader with css / js
Create html page
Add Js code for nvd3
"""
self.buildhtmlheader()
self.buildcontainer()
self.buildjschart()
self.htmlcontent = self.template_page_nvd3.substitute(header=self.htmlheader,
container=self.container,
jschart=self.jschart)
def buildhtmlheader(self):
"""generate HTML header content"""
self.htmlheader = ''
for css in self.header_css:
self.htmlheader += css
for js in self.header_js:
self.htmlheader += js
def buildcontainer(self):
"""generate HTML div"""
self.container = self.containerheader
#Create SVG div with style
if self.width:
if self.width[-1] != '%':
self.style += 'width:%spx;' % self.width
else:
self.style += 'width:%s;' % self.width
if self.height:
if self.height[-1] != '%':
self.style += 'height:%spx;' % self.height
else:
self.style += 'height:%s;' % self.height
if self.style:
self.style = 'style="%s"' % self.style
self.container += '<div id="%s"><svg %s></svg></div>\n' % (self.name, self.style)
def build_custom_tooltip(self):
"""generate custom tooltip for the chart"""
if self.custom_tooltip_flag:
if not self.date_flag:
if self.model == 'pieChart':
self.charttooltip = stab(2) + "chart.tooltipContent(function(key, y, e, graph) {\n" + \
stab(3) + "var x = String(key);\n" +\
stab(3) + self.tooltip_condition_string +\
stab(3) + "tooltip_str = '<center><b>'+x+'</b></center>' + y;\n" +\
stab(3) + "return tooltip_str;\n" + \
stab(2) + "});\n"
else:
self.charttooltip = stab(2) + "chart.tooltipContent(function(key, y, e, graph) {\n" + \
stab(3) + "var x = String(graph.point.x);\n" +\
stab(3) + "var y = String(graph.point.y);\n" +\
self.tooltip_condition_string +\
stab(3) + "tooltip_str = '<center><b>'+key+'</b></center>' + y + ' at ' + x;\n" +\
stab(3) + "return tooltip_str;\n" + \
stab(2) + "});\n"
else:
self.charttooltip = stab(2) + "chart.tooltipContent(function(key, y, e, graph) {\n" + \
stab(3) + "var x = d3.time.format('%s')(new Date(parseInt(graph.point.x)));\n" \
% self.charttooltip_dateformat +\
stab(3) + "var y = String(graph.point.y);\n" +\
self.tooltip_condition_string +\
stab(3) + "tooltip_str = '<center><b>'+key+'</b></center>' + y + ' on ' + x;\n" +\
stab(3) + "return tooltip_str;\n" + \
stab(2) + "});\n"
def buildjschart(self):
"""generate javascript code for the chart"""
self.jschart = ''
if self.tag_script_js:
self.jschart += '\n<script>\n'
self.jschart += stab()
if self.jquery_on_ready:
self.jschart += '$(function(){'
self.jschart += 'nv.addGraph(function() {\n'
self.jschart += stab(2) + 'var chart = nv.models.%s();\n' % self.model
if self.model != 'pieChart' and not self.color_list:
if self.color_category:
self.jschart += stab(2) + 'chart.color(d3.scale.%s().range());\n' % self.color_category
if self.stacked:
self.jschart += stab(2) + "chart.stacked(true);"
self.jschart += stab(2) + \
'chart.margin({top: %s, right: %s, bottom: %s, left: %s})\n' % \
(self.margin_top, self.margin_right, \
self.margin_bottom, self.margin_left)
"""
We want now to loop through all the defined Axis and add:
chart.y2Axis
.tickFormat(function(d) { return '$' + d3.format(',.2f')(d) });
"""
if self.model != 'pieChart':
for axis_name, a in list(self.axislist.items()):
self.jschart += stab(2) + "chart.%s\n" % axis_name
for attr, value in list(a.items()):
self.jschart += stab(3) + ".%s(%s);\n" % (attr, value)
if self.width:
self.d3_select_extra += ".attr('width', %s)\n" % self.width
if self.height:
self.d3_select_extra += ".attr('height', %s)\n" % self.height
if self.model == 'pieChart':
datum = "data_%s[0].values" % self.name
else:
datum = "data_%s" % self.name
# add custom tooltip string in jschart
# default condition (if build_custom_tooltip is not called explicitly with date_flag=True)
if self.tooltip_condition_string == '':
self.tooltip_condition_string = 'var y = String(graph.point.y);\n'
self.build_custom_tooltip()
self.jschart += self.charttooltip
# the shape attribute in kwargs is not applied when
# not allowing other shapes to be rendered
if self.model == 'scatterChart':
self.jschart += 'chart.scatter.onlyCircles(false);'
if self.model != 'discreteBarChart':
if self.show_legend:
self.jschart += stab(2) + "chart.showLegend(true);\n"
else:
self.jschart += stab(2) + "chart.showLegend(false);\n"
#showLabels only supported in pieChart
if self.model == 'pieChart':
if self.show_labels:
self.jschart += stab(2) + "chart.showLabels(true);\n"
else:
self.jschart += stab(2) + "chart.showLabels(false);\n"
# add custom chart attributes
for attr, value in self.chart_attr.items():
self.jschart += stab(2) + "chart.%s(%s);\n" % (attr, value)
#Inject data to D3
self.jschart += stab(2) + "d3.select('#%s svg')\n" % self.name + \
stab(3) + ".datum(%s)\n" % datum + \
stab(3) + ".transition().duration(500)\n" + \
stab(3) + self.d3_select_extra + \
stab(3) + ".call(chart);\n\n"
if self.resize:
self.jschart += stab(1) + "nv.utils.windowResize(chart.update);\n"
self.jschart += stab(1) + "return chart;\n});"
if self.jquery_on_ready:
self.jschart += "\n});"
#Include data
series_js = json.dumps(self.series)
if self.model == 'linePlusBarWithFocusChart':
append_to_data = ".map(function(series) {" + \
"series.values = series.values.map(function(d) { return {x: d[0], y: d[1] } });" + \
"return series; })"
self.jschart += """data_%s=%s%s;\n""" % (self.name, series_js, append_to_data)
else:
self.jschart += """data_%s=%s;\n""" % (self.name, series_js)
if self.tag_script_js:
self.jschart += "</script>"
def create_x_axis(self, name, label=None, format=None, date=False, custom_format=False):
"""
Create X-axis
"""
axis = {}
if custom_format and format:
axis['tickFormat'] = format
else:
if format:
if format == 'AM_PM':
axis['tickFormat'] = "function(d) { return get_am_pm(parseInt(d)); }"
else:
axis['tickFormat'] = "d3.format(',%s')" % format
if label:
axis['axisLabel'] = label
#date format : see https://github.com/mbostock/d3/wiki/Time-Formatting
if date:
self.dateformat = format
axis['tickFormat'] = "function(d) { return d3.time.format('%s')(new Date(parseInt(d))) }\n" % self.dateformat
#flag is the x Axis is a date
if name[0] == 'x':
self.x_axis_date = True
#Add new axis to list of axis
self.axislist[name] = axis
def create_y_axis(self, name, label=None, format=None, custom_format=False):
"""
Create Y-axis
"""
axis = {}
if custom_format and format:
axis['tickFormat'] = format
else:
if format:
axis['tickFormat'] = "d3.format(',%s')" % format
if label:
axis['axisLabel'] = label
#Add new axis to list of axis
self.axislist[name] = axis
def _main():
"""
Parse options and process commands
"""
# Parse arguments
usage = "usage: nvd3.py [options]"
parser = OptionParser(usage=usage, version="python-nvd3 - Charts generator with nvd3.js and d3.js")
parser.add_option("-q", "--quiet",
action="store_false", dest="verbose", default=True,
help="don't print messages to stdout")
(options, args) = parser.parse_args()
if __name__ == '__main__':
_main()
| 37.005474 | 121 | 0.529612 | 19,146 | 0.944129 | 0 | 0 | 0 | 0 | 0 | 0 | 8,332 | 0.410868 |
52ccbe1d1ecd046246c4b0fe1da5ae87b3459108 | 5,733 | py | Python | pycaz/lib/s3_objects.py | jmunhoz/pycaz | 5e2bbd954657403992d0c2bf668b9ca0bde7e740 | [
"MIT"
] | null | null | null | pycaz/lib/s3_objects.py | jmunhoz/pycaz | 5e2bbd954657403992d0c2bf668b9ca0bde7e740 | [
"MIT"
] | null | null | null | pycaz/lib/s3_objects.py | jmunhoz/pycaz | 5e2bbd954657403992d0c2bf668b9ca0bde7e740 | [
"MIT"
] | null | null | null | # Copyright (c) 2018 Javier M. Mellid <jmunhoz@igalia.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
class S3Owner:
def __init__(self, xid, display_name, id):
self.xid = xid
self.display_name = display_name
self.id = id
def __str__(self):
return "{} - {} - {}".format(self.xid,
self.display_name,
self.id)
class S3Object:
pass
class S3Version(S3Object):
xid = 0
last_modified = ''
version_id = ''
etag = ''
storage_class = ''
key = ''
owner = None
is_latest = True
size = 0
def __init__(self, xid, last_modified, version_id, etag, storage_class, key, owner, is_latest, size):
self.xid = xid
self.last_modified = last_modified
self.version_id = version_id
self.etag = etag
self.storage_class = storage_class
self.key = key
self.owner = owner
self.is_latest = is_latest
self.size = size
'''
"Versions": [
{
"LastModified": "2018-06-22T10:55:36.186Z",
"VersionId": "zWwD51VVELeSCN-mgn61yevKf4ETZB.",
"ETag": "\"b9c85244be9733bc79eca588db7bf306\"",
"StorageClass": "STANDARD",
"Key": "test-key-1",
"Owner": {
"DisplayName": "Test User",
"ID": "testuser1"
},
"IsLatest": true,
"Size": 151024
},
'''
def __str__(self):
return "{}:{}:{}:{}:{}:{}:{}:{}:{}:{}:{}".format(self.xid,
self.last_modified,
self.version_id,
self.etag,
self.storage_class,
self.key,
self.owner.xid,
self.owner.display_name,
self.owner.id,
self.is_latest,
self.size)
def pretty_str(self):
return "{}:{}:{}:{}:{}:{}:{}:{}:{}:{}".format('V',
'+' if self.is_latest is 1 else '-',
self.last_modified,
self.version_id,
self.key,
self.owner.display_name,
self.owner.id,
self.size,
self.etag,
self.storage_class)
class S3DeleteMarker(S3Version):
def __init__(self, xid, last_modified, version_id, key, owner, is_latest):
self.xid = xid
self.last_modified = last_modified
self.version_id = version_id
self.key = key
self.owner = owner
self.is_latest = is_latest
'''
"DeleteMarkers": [
{
"Owner": {
"DisplayName": "Test User",
"ID": "testuser1"
},
"IsLatest": true,
"VersionId": "0eU0xYMb3G2UiZVji4l8jhX-nL1tXqm",
"Key": "test-key-1",
"LastModified": "2018-06-22T11:22:31.955Z"
},
'''
def __str__(self):
return "{}:{}:{}:{}:{}:{}:{}:{}".format(self.xid,
self.last_modified,
self.version_id,
self.key,
self.owner.xid,
self.owner.display_name,
self.owner.id,
self.is_latest)
def pretty_str(self):
return "{}:{}:{}:{}:{}:{}:{}".format('D',
'+' if self.is_latest else '-',
self.last_modified,
self.version_id,
self.key,
self.owner.display_name,
self.owner.id)
| 41.846715 | 105 | 0.431013 | 4,609 | 0.803942 | 0 | 0 | 0 | 0 | 0 | 0 | 2,076 | 0.362114 |
52ccdc49aef97ed8b47fad9ada0088091eb98d80 | 1,822 | py | Python | mmdet/models/losses/dice_loss.py | liuyanyi/mmdetection | d2003536af6f08cb9bd7a75e0444eef03ace4bb3 | [
"Apache-2.0"
] | null | null | null | mmdet/models/losses/dice_loss.py | liuyanyi/mmdetection | d2003536af6f08cb9bd7a75e0444eef03ace4bb3 | [
"Apache-2.0"
] | null | null | null | mmdet/models/losses/dice_loss.py | liuyanyi/mmdetection | d2003536af6f08cb9bd7a75e0444eef03ace4bb3 | [
"Apache-2.0"
] | null | null | null | import torch
import torch.nn as nn
import torch.nn.functional as F
from ..builder import LOSSES
@LOSSES.register_module()
class DiceLoss(nn.Module):
def __init__(self, use_sigmoid=True, loss_weight=1.0):
"""`Dice Loss <https://arxiv.org/abs/1912.04488>`
Args:
use_sigmoid (bool, optional): Whether to the prediction is
used for sigmoid or softmax. Defaults to True.
loss_weight (float, optional): Weight of loss. Defaults to 1.0.
"""
super(DiceLoss, self).__init__()
self.use_sigmoid = use_sigmoid
self.loss_weight = loss_weight
def forward(self, pred, target):
"""Forward function.
Args:
pred (torch.Tensor or tuple):
The prediction, if torch.Tensor, shape (n, h, w)
if tuple, each param is torch.Tensor with shape (n, w, h)
target (torch.Tensor): The learning label of the prediction,
shape (n, h, w).
Returns:
torch.Tensor: The calculated loss
"""
assert isinstance(pred, torch.Tensor) or isinstance(pred, tuple)
if isinstance(pred, tuple):
assert len(pred) == 2
if self.use_sigmoid:
pred = F.sigmoid(pred[0]) * F.sigmoid(pred[1])
else:
pred = pred[0] + pred[1]
else:
if self.use_sigmoid:
pred = F.sigmoid(pred)
input = pred.contiguous().view(pred.size()[0], -1)
target = target.contiguous().view(target.size()[0], -1).float()
a = torch.sum(input * target, 1)
b = torch.sum(input * input, 1) + 1e-3
c = torch.sum(target * target, 1) + 1e-3
d = (2 * a) / (b + c)
loss_cls = self.loss_weight * (1 - d)
return loss_cls | 35.038462 | 75 | 0.558178 | 1,697 | 0.931394 | 0 | 0 | 1,723 | 0.945664 | 0 | 0 | 682 | 0.374314 |
52cecdcf1301a05a1abc0b2231ff2258f8575da0 | 2,676 | py | Python | model/contact.py | Keith234/python_training | ff476423cda06d1846761500cc7bd7acb9edf16c | [
"Apache-2.0"
] | null | null | null | model/contact.py | Keith234/python_training | ff476423cda06d1846761500cc7bd7acb9edf16c | [
"Apache-2.0"
] | null | null | null | model/contact.py | Keith234/python_training | ff476423cda06d1846761500cc7bd7acb9edf16c | [
"Apache-2.0"
] | null | null | null | from sys import maxsize
class Contact:
def __init__(self, first_name=None, middlename=None, lastname=None, nicknam=None, title=None, company=None,
address=None, home=None, mobile=None, work=None, fax=None,
email=None, email2=None, email3=None, secondaryphone=None, id=None, all_phones_from_home_page=None,
all_emails_from_home_page=None, all_phones_from_db=None, all_emails_from_db=None):
self.first_name = first_name
self.middlename = middlename
self.lastname = lastname
self.nicknam = nicknam
self.title = title
self.company = company
self.address = address
self.home = home
self.mobile = mobile
self.work = work
self.fax = fax
self.secondaryphone = secondaryphone
self.all_phones_from_home_page = all_phones_from_home_page
self.all_emails_from_home_page = all_emails_from_home_page
self.email = email
self.email2 = email2
self.email3 = email3
self.id = id
self.all_phones_from_db = all_phones_from_db
self.all_emails_from_db = all_emails_from_db
def __repr__(self):
return "%s : %s, %s, %s, %s, %s, %s, %s, %s, %s, %s" % (
self.id, self.lastname, self.first_name, self.address, self.home, self.mobile, self.work, self.fax,
self.email, self.email2, self.email3)
def __eq__(self, other):
return (self.id is None or other.id is None or self.id == other.id) and \
(self.first_name is None or other.first_name is None or self.first_name == other.first_name) \
and (self.lastname is None or other.lastname is None or self.lastname == other.lastname) and (
self.address is None or other.address is None or self.address == other.address) and (
self.home is None or other.home is None or self.home == other.home) and (
self.mobile is None or other.mobile is None or self.mobile == other.mobile) and (
self.work is None or other.work is None or self.work == other.work) and (
self.fax is None or other.fax is None or self.fax == other.fax) and (
self.email is None or other.email is None or self.email == other.email) and (
self.email2 is None or other.email2 is None or self.email2 == other.email2) and (
self.email3 is None or other.email3 is None or self.email3 == other.email3)
def id_or_max(self):
if self.id:
return int(self.id)
else:
return maxsize
| 49.555556 | 116 | 0.613602 | 2,649 | 0.98991 | 0 | 0 | 0 | 0 | 0 | 0 | 45 | 0.016816 |
52cf7e995d7f53da62cf8abc38f6b4412b3b46df | 717 | py | Python | test/manager_drmaa_test.py | jmchilton/pulsar | 783b90cf0bce893a11c347fcaf6778b98e0bb062 | [
"Apache-2.0"
] | null | null | null | test/manager_drmaa_test.py | jmchilton/pulsar | 783b90cf0bce893a11c347fcaf6778b98e0bb062 | [
"Apache-2.0"
] | null | null | null | test/manager_drmaa_test.py | jmchilton/pulsar | 783b90cf0bce893a11c347fcaf6778b98e0bb062 | [
"Apache-2.0"
] | null | null | null | from .test_utils import (
BaseManagerTestCase,
skip_unless_module
)
from pulsar.managers.queued_drmaa import DrmaaQueueManager
class DrmaaManagerTest(BaseManagerTestCase):
def setUp(self):
super(DrmaaManagerTest, self).setUp()
self._set_manager()
def tearDown(self):
super(DrmaaManagerTest, self).setUp()
self.manager.shutdown()
def _set_manager(self, **kwds):
self.manager = DrmaaQueueManager('_default_', self.app, **kwds)
@skip_unless_module("drmaa")
def test_simple_execution(self):
self._test_simple_execution(self.manager)
@skip_unless_module("drmaa")
def test_cancel(self):
self._test_cancelling(self.manager)
| 24.724138 | 71 | 0.701534 | 578 | 0.806137 | 0 | 0 | 214 | 0.298466 | 0 | 0 | 25 | 0.034868 |
52d07595fd7ad4acc1310734ea1028229e6c89c6 | 2,235 | py | Python | nerddiary/user/user.py | mishamsk/nerddiary | 2d0981c5034460f353c2994347fb95a5c94a55bd | [
"Apache-2.0"
] | null | null | null | nerddiary/user/user.py | mishamsk/nerddiary | 2d0981c5034460f353c2994347fb95a5c94a55bd | [
"Apache-2.0"
] | 5 | 2022-02-20T06:10:28.000Z | 2022-03-28T03:22:41.000Z | nerddiary/user/user.py | mishamsk/nerddiary | 2d0981c5034460f353c2994347fb95a5c94a55bd | [
"Apache-2.0"
] | null | null | null | """ User model """
from __future__ import annotations
from datetime import tzinfo
import pytz
from pydantic import BaseModel, PrivateAttr, validator
from pydantic.fields import Field
from ..poll.poll import Poll
from ..primitive.timezone import TimeZone
from ..report.report import Report
from typing import Dict, List, Optional
class User(BaseModel):
id: str = Field(description="This user id", regex=r"^\w{1,64}$")
username: str | None = Field(default=None, description="Optional user name")
lang_code: str = Field(
default="en", min_length=2, max_length=2, description="User preferred language (2 letter code)"
)
timezone: TimeZone = pytz.timezone("US/Eastern") # type: ignore
polls: Optional[List[Poll]] = Field(min_items=1)
reports: Optional[List[Report]] = Field(min_items=1)
_polls_dict: Dict[str, Poll] = PrivateAttr(default={})
""" Dictionary of polls for workflow convinience
"""
class Config:
title = "User Configuration"
extra = "forbid"
json_encoders = {tzinfo: lambda t: str(t)}
def __init__(self, **data) -> None:
super().__init__(**data)
# convert_reminder_times_to_local_if_set
if self.polls:
# Create help mappings for workflow processing
self._polls_dict = {}
for poll in self.polls:
self._polls_dict |= {poll.poll_name: poll}
if poll.reminder_time:
poll.reminder_time = poll.reminder_time.replace(tzinfo=self.timezone)
@validator("polls")
def poll_names_must_be_unique(cls, v: List[Poll]):
if v:
poll_names = [p.poll_name for p in v]
poll_names_set = set(poll_names)
if len(poll_names_set) != len(poll_names):
raise ValueError("Poll names must be unique")
return v
@validator("polls")
def poll_commands_must_be_unique(cls, v: List[Poll]):
if v:
poll_commands = [p.command for p in v if p.command is not None]
poll_commands_set = set(poll_commands)
if len(poll_commands_set) != len(poll_commands):
raise ValueError("Poll commands must be unique")
return v
| 32.867647 | 103 | 0.640716 | 1,898 | 0.849217 | 0 | 0 | 678 | 0.303356 | 0 | 0 | 377 | 0.16868 |
52d1423501559358e6f7024a091cf36f17c291a8 | 5,264 | py | Python | Testes/Trabalho/album.py | matheusmenezs/com220 | d699f00892df1259249ae012aa2a02f63ae0f06f | [
"MIT"
] | null | null | null | Testes/Trabalho/album.py | matheusmenezs/com220 | d699f00892df1259249ae012aa2a02f63ae0f06f | [
"MIT"
] | null | null | null | Testes/Trabalho/album.py | matheusmenezs/com220 | d699f00892df1259249ae012aa2a02f63ae0f06f | [
"MIT"
] | null | null | null | import tkinter as tk
from tkinter import ttk
from tkinter import simpledialog, messagebox
import os.path
import pickle
class Album():
def __init__(self, nome, artista, ano, faixas):
self.__nome = nome
self.__artista = artista
self.__ano = ano
self.__faixas = faixas
def getNome(self):
return self.__nome
def getArtista(self):
return self.__artista
def getAno(self):
return self.__ano
def getFaixas(self):
return self.__faixas
class Musica:
def __init__(self, titulo):
self.__titulo = titulo
def getTitulo(self):
return self.__titulo
class LimiteCadastraAlbum(tk.Toplevel):
def __init__(self, controle, lista):
tk.Toplevel.__init__(self)
self.geometry('300x250')
self.title("Album")
self.controle = controle
self.frameNome = tk.Frame(self)
self.frameAno = tk.Frame(self)
self.frameArtista = tk.Frame(self)
self.frameButton = tk.Frame(self)
self.frameNome.pack()
self.frameAno.pack()
self.frameArtista.pack()
self.frameButton.pack()
self.labelNome = tk.Label(self.frameNome,text="Titulo:")
self.labelNome.pack(side="left")
self.labelAno = tk.Label(self.frameAno, text="Ano:")
self.labelAno.pack(side='left')
self.inputNome = tk.Entry(self.frameNome, width=20)
self.inputNome.pack(side="left")
self.inputAno = tk.Entry(self.frameAno, width=20)
self.inputAno.pack(side="left")
self.buttonSubmit = tk.Button(self.frameButton ,text="Inserir Música")
self.buttonSubmit.pack(side="top")
self.buttonSubmit.bind("<Button>", controle.inserirFaixas)
self.buttonSubmit = tk.Button(self.frameButton ,text="Enter")
self.buttonSubmit.pack(side="left")
self.buttonSubmit.bind("<Button>", controle.enterHandler)
self.buttonClear = tk.Button(self.frameButton ,text="Clear")
self.buttonClear.pack(side="left")
self.buttonClear.bind("<Button>", controle.clearHandler)
self.buttonFecha = tk.Button(self.frameButton ,text="Concluído")
self.buttonFecha.pack(side="left")
self.buttonFecha.bind("<Button>", controle.fechaHandler)
self.labelArt = tk.Label(self.frameArtista,text="Escolha o artista: ")
self.labelArt.pack(side="left")
self.escolhaCombo = tk.StringVar()
self.combobox = ttk.Combobox(self.frameArtista, width = 15 , textvariable = self.escolhaCombo)
self.combobox.pack(side="left")
self.combobox['values'] = lista
class LimiteConsultaAlbum():
def __init__(self):
self.answer = simpledialog.askstring('Consultar Album', 'Digite o Titulo: ')
def mostraJanela(self, titulo, msg):
messagebox.showinfo(titulo, msg)
class ControleAlbum():
def __init__(self, ctrlPrincipal):
self.ctrlPrincipal = ctrlPrincipal
if not os.path.isfile("album.pickle"):
self.listaAlb = []
else:
with open("album.pickle", "rb") as f:
self.listaAlb = pickle.load(f)
def salvaAlbuns(self):
if len(self.listaAlb) != 0:
with open("album.pickle","wb") as f:
pickle.dump(self.listaAlb, f)
def cadastrarAlbum(self):
listaArt = self.ctrlPrincipal.ctrlArtista.getListaNomeArt()
self.limiteCad = LimiteCadastraAlbum(self, listaArt)
self.listaFaixas = []
self.nroFaixas = 0
def enterHandler(self, event):
titulo = self.limiteCad.inputNome.get()
nomeArt = self.limiteCad.escolhaCombo.get()
ano = self.limiteCad.inputAno.get()
album = Album(titulo, nomeArt, ano, self.listaFaixas)
self.listaAlb.append(album)
messagebox.showinfo('Cadastro', 'Album cadastrado com sucesso')
self.clearHandler(event)
def inserirFaixas(self, event):
faixa = simpledialog.askstring('Inserir Músicas', 'Digite o Nome: ')
music = Musica(faixa)
self.listaFaixas.append(music)
def clearHandler(self, event):
self.limiteCad.inputNome.delete(0, len(self.limiteCad.inputNome.get()))
def fechaHandler(self, event):
self.limiteCad.destroy()
def consultarAlbum(self):
consultaAlb = LimiteConsultaAlbum()
answer = consultaAlb.answer
for album in self.listaAlb:
self.msg = 'Album: '
if answer == str(album.getNome()):
self.msg += album.getNome() + '\n'
self.msg += '\nMúsicas:'
for faixas in album.getFaixas():
self.msg +='\n' + faixas.getTitulo()
consultaAlb.mostraJanela('Busca', self.msg)
break
else:
self.msg = 'Album não cadastrado'
consultaAlb.mostraJanela('Busca', self.msg)
def getAlbum(self, nomeArt):
albRet = []
for alb in self.listaAlb:
if alb.getArtista() == nomeArt:
albRet.append(alb)
return albRet
| 30.604651 | 102 | 0.601254 | 5,110 | 0.969823 | 0 | 0 | 0 | 0 | 0 | 0 | 430 | 0.081609 |
52d15598b3845713de68c7a188bed8ef0fab4e10 | 7,191 | py | Python | src/otest/aus/app.py | rohe/otest | 8983db8abfa63eda4e8a35bbe193ac80793c14bb | [
"Apache-2.0"
] | 2 | 2016-08-26T07:42:19.000Z | 2017-09-06T02:13:02.000Z | src/otest/aus/app.py | rohe/otest | 8983db8abfa63eda4e8a35bbe193ac80793c14bb | [
"Apache-2.0"
] | 3 | 2017-06-15T06:07:18.000Z | 2018-06-28T07:43:21.000Z | src/otest/aus/app.py | rohe/otest | 8983db8abfa63eda4e8a35bbe193ac80793c14bb | [
"Apache-2.0"
] | 5 | 2016-07-22T21:38:40.000Z | 2019-04-05T19:20:23.000Z | import logging
import os
from oic.utils.http_util import BadRequest
from oic.utils.http_util import SeeOther
from otest.events import EV_HTTP_ARGS
from otest.result import safe_url
__author__ = 'roland'
logger = logging.getLogger(__name__)
class WebApplication(object):
def __init__(self, sessionhandler, webio, webtester, check, webenv,
pick_grp, path=''):
self.sessionhandler = sessionhandler
self.webio = webio
self.webtester = webtester
self.check = check
self.webenv = webenv
self.pick_grp = pick_grp
self.path = path
def application(self, environ, start_response):
logger.info("Connection from: %s" % environ["REMOTE_ADDR"])
session = environ['beaker.session']
path = environ.get('PATH_INFO', '').lstrip('/')
logger.info("path: %s" % path)
try:
sh = session['session_info']
except KeyError:
sh = self.sessionhandler(**self.webenv)
sh.session_init()
session['session_info'] = sh
info = self.webio(session=sh, **self.webenv)
info.environ = environ
info.start_response = start_response
tester = self.webtester(info, sh, **self.webenv)
tester.check_factory = self.check.factory
if path == "robots.txt":
return info.static("static/robots.txt")
elif path == "favicon.ico":
return info.static("static/favicon.ico")
elif path.startswith("static/"):
return info.static(path)
elif path.startswith("jwks/"):
return info.static(path)
elif path.startswith("export/"):
return info.static(path)
if self.path and path.startswith(self.path):
_path = path[len(self.path)+1:]
else:
_path = path
if _path == "": # list
return tester.display_test_list()
if _path == "logs":
return info.display_log("log", issuer="", profile="", testid="")
elif _path.startswith("log"):
if _path == "log" or _path == "log/":
try:
_iss = self.webenv['client_info']["provider_info"]["issuer"]
except KeyError:
_iss = self.webenv['tool_conf']['issuer']
parts = [safe_url(_iss)]
else:
parts = []
while _path != "log":
head, tail = os.path.split(_path)
# tail = tail.replace(":", "%3A")
# if tail.endswith("%2F"):
# tail = tail[:-3]
parts.insert(0, tail)
_path = head
return info.display_log("log", *parts)
elif _path.startswith("tar"):
_path = _path.replace(":", "%3A")
return info.static(_path)
if _path == "reset":
sh.reset_session()
return info.flow_list()
elif _path == "pedit":
try:
return info.profile_edit()
except Exception as err:
return info.err_response("pedit", err)
elif _path == "profile":
return tester.set_profile(environ)
elif _path.startswith("test_info"):
p = _path.split("/")
try:
return info.test_info(p[1])
except KeyError:
return info.not_found()
elif _path == "continue":
resp = tester.cont(environ, self.webenv)
session['session_info'] = info.session
if resp:
return resp
else:
resp = SeeOther(
"{}display#{}".format(self.webenv['base_url'],
self.pick_grp(sh['conv'].test_id)))
return resp(environ, start_response)
elif _path == 'display':
return info.flow_list()
elif _path == "opresult":
resp = SeeOther(
"{}display#{}".format(self.webenv['base_url'],
self.pick_grp(sh['conv'].test_id)))
return resp(environ, start_response)
# expected _path format: /<testid>[/<endpoint>]
elif _path in sh["tests"]:
resp = tester.run(_path, **self.webenv)
session['session_info'] = info.session
if resp is False or resp is True:
pass
elif isinstance(resp, list):
return resp
try:
# return info.flow_list()
resp = SeeOther(
"{}display#{}".format(
self.webenv['client_info']['base_url'],
self.pick_grp(sh['conv'].test_id)))
return resp(environ, start_response)
except Exception as err:
logger.error(err)
raise
elif _path in ["authz_cb", "authz_post"]:
if _path == "authz_cb":
_conv = sh["conv"]
try:
response_mode = _conv.req.req_args["response_mode"]
except KeyError:
response_mode = ""
# Check if fragment encoded
if response_mode == "form_post":
pass
else:
try:
response_type = _conv.req.req_args["response_type"]
except KeyError:
response_type = [""]
if response_type == [""]: # expect anything
if environ["QUERY_STRING"]:
pass
else:
return info.opresult_fragment()
elif response_type != ["code"]:
# but what if it's all returned as a query anyway ?
try:
qs = environ["QUERY_STRING"]
except KeyError:
pass
else:
_conv.events.store(EV_HTTP_ARGS, qs)
_conv.query_component = qs
return info.opresult_fragment()
try:
resp = tester.async_response(self.webenv["conf"])
except Exception as err:
return info.err_response("authz_cb", err)
else:
if resp is False or resp is True:
pass
elif not isinstance(resp, int):
return resp
try:
# return info.flow_list()
resp = SeeOther(
"{}display#{}".format(
self.webenv['client_info']['base_url'],
self.pick_grp(sh['conv'].test_id)))
return resp(environ, start_response)
except Exception as err:
logger.error(err)
raise
else:
resp = BadRequest()
return resp(environ, start_response)
| 35.955 | 80 | 0.478932 | 6,944 | 0.965652 | 0 | 0 | 0 | 0 | 0 | 0 | 985 | 0.136977 |
52d370ee2b072b19dce1e99ad9c3e3612bd78c77 | 4,821 | py | Python | my_weather_api/handlers.py | swingthrough/weather-api-task-django | b7de39fef470ed5eb36c28fc7ea23f3780d0535c | [
"MIT"
] | null | null | null | my_weather_api/handlers.py | swingthrough/weather-api-task-django | b7de39fef470ed5eb36c28fc7ea23f3780d0535c | [
"MIT"
] | null | null | null | my_weather_api/handlers.py | swingthrough/weather-api-task-django | b7de39fef470ed5eb36c28fc7ea23f3780d0535c | [
"MIT"
] | null | null | null | from .serializers import WeatherForecastDaySerializer
from .models import WeatherForecastDay
from datetime import datetime, timedelta
import requests
from decouple import config
import json
DATE_FORMAT = '%Y-%m-%d'
COUNTRY_CODES_TO_CAPITAL = {
'CZ': 'Prague',
'UK': 'London',
'SK': 'Bratislava',
}
WEATHER_API_KEY = config('WEATHER_API_KEY', default='')
WEATHER_API_URL = "http://api.weatherapi.com/v1/forecast.json?key={weather_api_key}&q={city_name}&dt={date}&days=1&aqi=no&alerts=no"
# checks if the format is YYYY-MM-DD
def validateDateFormat(date_text):
try:
if date_text != datetime.strptime(date_text, DATE_FORMAT).strftime(DATE_FORMAT):
raise ValueError
return True
except ValueError:
return False
def getForecastSummaryFromTempC(temp_c):
if temp_c > 20:
return "good"
elif temp_c >= 10:
return "soso"
else:
return "bad"
def getWeatherForecast(country_code, for_day, force_api_call=False):
ret_json_valid = {
'status': 'OK',
'data': {}
}
ret_json_bad_request = {
'status': 'BAD_REQUEST',
'message': '',
}
ret_json_api_err = {
'status': 'API_ERR',
'message': '',
}
if country_code not in COUNTRY_CODES_TO_CAPITAL.keys():
ret_json_bad_request['message'] = f"Value '{country_code}' for query parameter 'country_code' is invalid. Allowed values are {', '.join(COUNTRY_CODES_TO_CAPITAL.keys())}"
return ret_json_bad_request
if validateDateFormat(for_day) == False:
ret_json_bad_request['message'] = f"Value '{for_day}' for query parameter 'date' is invalid. Must be a valid date in format YYYY-MM-DD"
return ret_json_bad_request
query_date = datetime.strptime(for_day, DATE_FORMAT).date()
today = datetime.today().date()
todayPlusTen = today + timedelta(days=10)
if query_date < today:
ret_json_bad_request['message'] = f"Value '{for_day}' for query parameter 'date' must be greater or equal to current date: {today.strftime(DATE_FORMAT)}"
return ret_json_bad_request
if query_date > todayPlusTen:
ret_json_bad_request['message'] = f"Value '{for_day}' for query parameter 'date' cannot be larger than 10 days from now - furthest possible date for today is: {todayPlusTen.strftime(DATE_FORMAT)}"
return ret_json_bad_request
weatherForecastDayQuerySet = WeatherForecastDay.objects.filter(country_code=country_code, for_day=for_day)
if force_api_call == False and len(weatherForecastDayQuerySet) > 0:
data = WeatherForecastDaySerializer(weatherForecastDayQuerySet[0]).data
avgtemp_c = float(data.get('average_temp_c'))
forecast_json = {
'forecast': getForecastSummaryFromTempC(avgtemp_c),
'source': 'DB'
}
ret_json_valid['data'] = forecast_json
return ret_json_valid
else:
url = WEATHER_API_URL.format(weather_api_key=WEATHER_API_KEY, city_name=COUNTRY_CODES_TO_CAPITAL.get(country_code), date=for_day)
# TODO: handle potential exceptions of this API call
try:
response = requests.get(url)
if response.status_code in [400, 401, 403]:
json_response = response.json()
ret_json_api_err['message'] = json_response['error']['message']
return ret_json_api_err
elif response.status_code >= 400:
ret_json_api_err['message'] = 'Api Error'
return ret_json_api_err
json_response = response.json()
avgtemp_c = json_response['forecast']['forecastday'][0]['day']['avgtemp_c']
if len(weatherForecastDayQuerySet) > 0:
forecast_json = {
'forecast': getForecastSummaryFromTempC(avgtemp_c),
'source': 'forced API call'
}
# update entry
existingWeatherForecastDay = weatherForecastDayQuerySet[0]
existingWeatherForecastDay.average_temp_c = avgtemp_c
existingWeatherForecastDay.save(update_fields=['average_temp_c',])
else:
forecast_json = {
'forecast': getForecastSummaryFromTempC(avgtemp_c),
'source': 'API call'
}
# create entry
newWeatherForecastDay = WeatherForecastDay(country_code=country_code, for_day=query_date, average_temp_c=avgtemp_c)
newWeatherForecastDay.save()
ret_json_valid['data'] = forecast_json
return ret_json_valid
except requests.exceptions.RequestException:
ret_json_api_err['message'] = 'Api Error'
return ret_json_api_err
| 37.664063 | 204 | 0.644472 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,181 | 0.24497 |
52d3d72868c077690bde8ce4b9a24f77c6b48f81 | 134 | py | Python | app/blueprints/printing/__init__.py | OrigamiCranes/PrintingPortal | e25f9f683dca3a0dcf4c90ae50515d7693447cb8 | [
"MIT",
"Unlicense"
] | null | null | null | app/blueprints/printing/__init__.py | OrigamiCranes/PrintingPortal | e25f9f683dca3a0dcf4c90ae50515d7693447cb8 | [
"MIT",
"Unlicense"
] | null | null | null | app/blueprints/printing/__init__.py | OrigamiCranes/PrintingPortal | e25f9f683dca3a0dcf4c90ae50515d7693447cb8 | [
"MIT",
"Unlicense"
] | null | null | null | from flask import Blueprint, url_for
bp = Blueprint('printing', __name__,template_folder='templates')
from . import routes, forms
| 16.75 | 64 | 0.768657 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 21 | 0.156716 |
52d4e85bc2d0512c2b40fb706b7936fa8d8f91d2 | 5,239 | py | Python | csbiginteger/BigIntegerNet.py | NeoResearch/csBigInteger.py | 209046f6539a00b6dae9f5a58dda0fdba0ef20a2 | [
"MIT"
] | 2 | 2019-07-12T17:09:16.000Z | 2019-07-26T03:17:56.000Z | csbiginteger/BigIntegerNet.py | NeoResearch/csbiginteger-py | 209046f6539a00b6dae9f5a58dda0fdba0ef20a2 | [
"MIT"
] | 9 | 2019-07-15T06:48:19.000Z | 2019-07-16T13:56:15.000Z | csbiginteger/BigIntegerNet.py | NeoResearch/csbiginteger-py | 209046f6539a00b6dae9f5a58dda0fdba0ef20a2 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
from csbiginteger.BigInteger import BigInteger
from functools import total_ordering
# requires: pip install msl-loadlib pycparser pythonnet
from msl.loadlib import LoadLibrary
# remember to execute first: cd csbiginteger/dotnet && dotnet build -c Release
net = LoadLibrary('csbiginteger/dotnet/bin/Release/netstandard2.0/publish/csbiginteger.dll', 'net')
biglib = net.lib.csbiglib.BigIntegerLib()
z = biglib.zero()
print(bytearray(z.ToByteArray()))
m1 = biglib.from_int32(-1)
print(bytearray(m1.ToByteArray()))
i255 = biglib.from_int32(255)
print(bytearray(i255.ToByteArray()))
b2 = biglib.from_bytes(bytearray(i255.ToByteArray()))
print(biglib.to_int32(b2))
b3 = biglib.from_string("0xff", 16)
print(biglib.to_int32(b3))
print(biglib.to_string(b3, 16))
print(biglib.to_string(b3, 10))
print(bytearray(biglib.to_bytes(b3)))
@total_ordering
class BigIntegerNet(BigInteger):
# param may be: int, bytearray, bytes, string (parsed with base)
# bytes and bytearray should be received in little-endian format (same as to_bytearray() returns)
def __init__(self, param=0, base=10):
if type(param) is int:
param = str(param) # convert to base-10 integer
base = 10 # force base 10
if type(param) is bytearray:
param = bytes(param) # bytearray to bytes
if type(param) is bytes:
self._big = biglib.from_bytes(bytearray(param))
if type(param) is str:
self._big = biglib.from_string(param, base)
# returns value in signed int32 limit (or exception)
def to_int(self):
return biglib.to_int32(self._big)
def to_long(self):
return biglib.to_int64(self._big)
# bytearray is returned in little-endian format
def to_bytearray(self):
return bytearray(biglib.to_bytes(self._big))
def to_str(self, base=16):
return str(biglib.to_string(self._big, base))
def add(self, other):
if type(other) is int:
other = BigIntegerNet(other)
big3 = BigIntegerNet()
big3._big = self._big.Add(self._big, other._big)
return big3
def sub(self, other):
if type(other) is int:
other = BigIntegerNet(other)
big3 = BigIntegerNet()
big3._big = self._big.Subtract(self._big, other._big)
return big3
def mul(self, other):
if type(other) is int:
other = BigIntegerNet(other)
big3 = BigIntegerNet()
big3._big = self._big.Multiply(self._big, other._big)
return big3
def div(self, other):
if type(other) is int:
other = BigIntegerNet(other)
big3 = BigIntegerNet()
big3._big = self._big.Divide(self._big, other._big)
return big3
def mod(self, other):
if type(other) is int:
other = BigIntegerNet(other)
big3 = BigIntegerNet()
big3._big = self._big.DivRem(self._big, other._big)
return big3
def shl(self, other):
if type(other) is int:
other = BigIntegerNet(other)
big3 = BigIntegerNet()
big3._big = self._big.op_LeftShift(other._big)
return big3
def shr(self, other):
if type(other) is int:
other = BigIntegerNet(other)
big3 = BigIntegerNet()
big3._big = self._big.op_RightShift(other._big)
return big3
def eq(self, other):
if type(other) is int:
other = BigIntegerNet(other)
return self._big.op_Equality(other._big)
def lt(self, other):
if type(other) is int:
other = BigIntegerNet(other)
return self._big.op_LessThan(other._big)
def __repr__(self):
return str(self)
def __str__(self):
return self.to_str(10)
def __len__(self):
return len(self.to_bytearray())
# ---------
# operators
# ---------
def __add__(self, other):
return self.add(other)
def __sub__(self, other):
return self.sub(other)
def __mul__(self, other):
return self.mul(other)
# note that python usually follows 'pure floor' operation here, on a // b => floor(a/b)
# example: -5 // 2 => -3 (standard int on python)
# however, this library follows hardware-standard (from c/c++/java/fortran), of truncating positive or negative
# so, here: BigInteger(-5) // BigInteger(2) => -2 ("rounding" up, not down)
# floordiv is thus not a good name, since it's only floor for positive division, but ceil for negative, but that's what we have :)
def __floordiv__(self, other):
return self.div(other)
# truediv does not exist (using a // b)
def __truediv__(self, other):
return self.div(other)
def __mod__(self, other):
return self.mod(other)
def __rshift__(self, other):
return self.shr(other)
def __lshift__(self, other):
return self.shl(other)
# comparisons
# -----------
def __eq__(self, other):
return self.eq(other)
# def __gt__(self, other):
# return self.gt(other)
def __lt__(self, other):
return self.lt(other)
| 27.719577 | 134 | 0.620538 | 4,366 | 0.833365 | 0 | 0 | 4,382 | 0.836419 | 0 | 0 | 1,160 | 0.221416 |
52d5e881d194a755ed3fbfaa3392720a723678a2 | 8,841 | py | Python | cdci_spiacs_plugin/spiacs_lightcurve_query.py | andreatramacere/cdci_spiacs_plugin | 44227a4132b118b4f1f41d76b5aa7be23a1c66dd | [
"MIT"
] | null | null | null | cdci_spiacs_plugin/spiacs_lightcurve_query.py | andreatramacere/cdci_spiacs_plugin | 44227a4132b118b4f1f41d76b5aa7be23a1c66dd | [
"MIT"
] | null | null | null | cdci_spiacs_plugin/spiacs_lightcurve_query.py | andreatramacere/cdci_spiacs_plugin | 44227a4132b118b4f1f41d76b5aa7be23a1c66dd | [
"MIT"
] | null | null | null | """
Overview
--------
general info about this module
Classes and Inheritance Structure
----------------------------------------------
.. inheritance-diagram::
Summary
---------
.. autosummary::
list of the module you want
Module API
----------
"""
from __future__ import absolute_import, division, print_function
from builtins import (bytes, str, open, super, range,
zip, round, input, int, pow, object, map, zip)
__author__ = "Andrea Tramacere"
# Standard library
# eg copy
# absolute import rg:from copy import deepcopy
import os
# Dependencies
# eg numpy
# absolute import eg: import numpy as np
# Project
# relative import eg: from .mod import f
import ddosaclient as dc
# Project
# relative import eg: from .mod import f
import numpy as np
import pandas as pd
from astropy.table import Table
from astropy import time
from pathlib import Path
from astropy.io import fits as pf
from cdci_data_analysis.analysis.io_helper import FitsFile
from cdci_data_analysis.analysis.queries import LightCurveQuery
from cdci_data_analysis.analysis.products import LightCurveProduct,QueryProductList,QueryOutput
from cdci_data_analysis.analysis.io_helper import FilePath
from oda_api.data_products import NumpyDataProduct,NumpyDataUnit,BinaryData
from .spiacs_dataserver_dispatcher import SpiacsDispatcher
from .spiacs_dataserver_dispatcher import SpiacsAnalysisException
class SpicasLigthtCurve(LightCurveProduct):
def __init__(self,name,file_name,data,header,prod_prefix=None,out_dir=None,src_name=None,meta_data={}):
if meta_data == {} or meta_data is None:
self.meta_data = {'product': 'spiacs_lc', 'instrument': 'spiacs', 'src_name': src_name}
else:
self.meta_data = meta_data
self.meta_data['time'] = 'time'
self.meta_data['rate'] = 'rate'
self.meta_data['rate_err'] = 'rate_err'
super(LightCurveProduct, self).__init__(name=name,
data=data,
name_prefix=prod_prefix,
file_dir=out_dir,
file_name=file_name,
meta_data=meta_data)
@classmethod
def build_from_res(cls,
res,
src_name='',
prod_prefix='spiacs_lc',
out_dir=None,
delta_t=None):
lc_list = []
if out_dir is None:
out_dir = './'
if prod_prefix is None:
prod_prefix=''
file_name = src_name+'.fits'
print ('file name',file_name)
meta_data={}
meta_data['src_name'] = src_name
meta_data['time_bin'] = delta_t
try:
df=res.content.splitlines()
data = np.zeros(len(df-3), dtype=[('rate', '<f8'), ('rate_err', '<f8'), ('time', '<f8')])
for ID,d in enumerate(df[2:-1]):
t,r,_=d.split()
data['rate']=float(r)
data['time'] = float(t)
data['rate_err']=np.sqrt(data['rate'])
data['rate'] = df['rate']
data['rate_err'] = df['rate_err']
data['time'] = df['time']
npd = NumpyDataProduct(data_unit=NumpyDataUnit(data=data,
hdu_type='table'), meta_data=meta_data)
lc = cls(name=src_name, data=npd, header=None, file_name=file_name, out_dir=out_dir,
prod_prefix=prod_prefix,
src_name=src_name, meta_data=meta_data)
lc_list.append(lc)
except Exception as e:
raise SpiacsAnalysisException(message='spiacs light curve failed: %s'%e.__repr__(),debug_message=str(e))
return lc_list
class SpiacsLightCurveQuery(LightCurveQuery):
def __init__(self, name):
super(SpiacsLightCurveQuery, self).__init__(name)
def build_product_list(self, instrument, res, out_dir, prod_prefix='polar_lc',api=False):
src_name = 'spi_acs_lc '
delta_t = instrument.get_par_by_name('time_bin')._astropy_time_delta.sec
prod_list = SpicasLigthtCurve.build_from_res(res,
src_name=src_name,
prod_prefix=prod_prefix,
out_dir=out_dir,
delta_t=delta_t)
# print('spectrum_list',spectrum_list)
return prod_list
def get_data_server_query(self, instrument,
config=None):
src_name = 'spiacs_lc'
T1=instrument.get_par_by_name('T1')._astropy_time
T2=instrument.get_par_by_name('T2')._astropy_time
delta_t=T2-T1
T_ref=(T2+T1)*.5
t_bin = instrument.get_par_by_name('time_bin')._astropy_time_delta.sec
param_dict=self.set_instr_dictionaries(T_ref.iso,delta_t.sec)
print ('build here',config,instrument)
q = SpiacsDispatcher(instrument=instrument,config=config,param_dict=param_dict)
return q
def set_instr_dictionaries(self, T_ref,delta_t):
return dict(
requeststring='%s %s'%(T_ref,delta_t),
submit="Submit",
generate='ipnlc',
)
def process_product_method(self, instrument, prod_list,api=False):
_names = []
_lc_path = []
_root_path=[]
_html_fig = []
_data_list=[]
_binary_data_list=[]
for query_lc in prod_list.prod_list:
print('->name',query_lc.name)
query_lc.write()
if api == False:
_names.append(query_lc.name)
_lc_path.append(str(query_lc.file_path.name))
#x_label='MJD-%d (days)' % mjdref,y_label='Rate (cts/s)'
_html_fig.append(query_lc.get_html_draw(x=query_lc.data.data_unit[0].data['time'],
y=query_lc.data.data_unit[0].data['rate'],
dy=query_lc.data.data_unit[0].data['rate_err'],
title='Start Time: %s'%instrument.get_par_by_name('T1')._astropy_time.utc.value,
x_label='Time (s)',
y_label='Rate (cts/s)'))
if api==True:
_data_list.append(query_lc.data)
#try:
# open(root_file_path.path, "wb").write(BinaryData().decode(res_json['root_file_b64']))
# lc.root_file_path = root_file_path
#except:
# pass
_d,md=BinaryData(str(query_lc.root_file_path)).encode()
_binary_data_list.append(_d)
query_out = QueryOutput()
if api == True:
query_out.prod_dictionary['numpy_data_product_list'] = _data_list
query_out.prod_dictionary['binary_data_product_list'] = _binary_data_list
else:
query_out.prod_dictionary['name'] = _names
query_out.prod_dictionary['file_name'] = _lc_path
query_out.prod_dictionary['root_file_name'] = _root_path
query_out.prod_dictionary['image'] =_html_fig
query_out.prod_dictionary['download_file_name'] = 'light_curves.tar.gz'
query_out.prod_dictionary['prod_process_message'] = ''
return query_out
def get_dummy_products(self, instrument, config, out_dir='./'):
raise RuntimeError('method to implement')
# src_name = instrument.get_par_by_name('src_name').value
#
# dummy_cache = config.dummy_cache
# delta_t = instrument.get_par_by_name('time_bin')._astropy_time_delta.sec
# print('delta_t is sec', delta_t)
# query_lc = LightCurveProduct.from_fits_file(inf_file='%s/query_lc.fits' % dummy_cache,
# out_file_name='query_lc.fits',
# prod_name='isgri_lc',
# ext=1,
# file_dir=out_dir)
# print('name', query_lc.header['NAME'])
# query_lc.name=query_lc.header['NAME']
# #if src_name is not None:
# # if query_lc.header['NAME'] != src_name:
# # query_lc.data = None
#
# prod_list = QueryProductList(prod_list=[query_lc])
#
# return prod_list
| 30.912587 | 136 | 0.552766 | 7,418 | 0.839045 | 0 | 0 | 1,656 | 0.187309 | 0 | 0 | 2,207 | 0.249632 |
52d62a879cc5245c35a6c1ccbaf5bd54273348f1 | 334 | py | Python | addons/basic.py | dico-api/dicobot | 4471c4d2f20cb7890c2ae818d87d852d1a442471 | [
"MIT"
] | 1 | 2021-09-18T13:26:18.000Z | 2021-09-18T13:26:18.000Z | addons/basic.py | dico-api/dicobot | 4471c4d2f20cb7890c2ae818d87d852d1a442471 | [
"MIT"
] | null | null | null | addons/basic.py | dico-api/dicobot | 4471c4d2f20cb7890c2ae818d87d852d1a442471 | [
"MIT"
] | null | null | null | import dico_command
class Basic(dico_command.Addon):
@dico_command.command("ping")
async def ping(self, ctx: dico_command.Context):
await ctx.reply(f"Pong! {round(self.bot.ping*1000)}ms")
def load(bot: dico_command.Bot):
bot.load_addons(Basic)
def unload(bot: dico_command.Bot):
bot.unload_addons(Basic)
| 20.875 | 63 | 0.712575 | 183 | 0.547904 | 0 | 0 | 146 | 0.437126 | 112 | 0.335329 | 44 | 0.131737 |
52d6a9a85d70a1ba3fa92ec4b665bd33d6738e19 | 11,643 | py | Python | lambda_code/lambda_function.py | ReadingPlus/aurora-gfs-snapshot-tool | 1e5394a55bc28f6dfa0c5c573adc0de7d594b57d | [
"MIT"
] | 2 | 2018-12-12T15:41:05.000Z | 2019-10-16T13:05:00.000Z | lambda_code/lambda_function.py | ReadingPlus/aurora-gfs-snapshot-tool | 1e5394a55bc28f6dfa0c5c573adc0de7d594b57d | [
"MIT"
] | null | null | null | lambda_code/lambda_function.py | ReadingPlus/aurora-gfs-snapshot-tool | 1e5394a55bc28f6dfa0c5c573adc0de7d594b57d | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from datetime import datetime, timezone, date
import os
import sys
import boto3
import logging
import json
#setup global logger
logger = logging.getLogger("SnapTool")
#set log level
LOGLEVEL = os.environ['LogLevel'].strip()
logger.setLevel(LOGLEVEL.upper())
logging.getLogger("botocore").setLevel(logging.ERROR)
#setup global RDS client
rds = boto3.client("rds")
#rds snapshot tool tag name
toolTagKey="SnapTool"
def json_serial(obj):
"""JSON serializer for objects not serializable by default json code"""
if isinstance(obj, (datetime, date)):
return obj.isoformat()
raise TypeError ("Type %s not serializable" % type(obj))
def startTool(timeNow):
dbClusters=[]
if os.environ['DatabaseNames'] == "ALL":
resp=rds.describe_db_clusters()
for db in resp['DBClusters']:
dbClusters.append(db['DBClusterIdentifier'])
else:
dbClusters=os.environ['DatabaseNames'].split(",")
#make all lowercase
dbClusters=[x.lower() for x in dbClusters]
verifyClusters(dbClusters)
backupConfig=[]
backupConfig.append({
"timePeriod": "yearly",
"retention": int(os.environ['YearlyRetention'])
})
backupConfig.append({
"timePeriod": "monthly",
"retention": int(os.environ['MonthlyRetention'])
})
backupConfig.append({
"timePeriod": "weekly",
"retention": int(os.environ['WeeklyRetention'])
})
for db in dbClusters:
logger.info("Analyzing snapshot status for DB:" + db)
newSnapPeriod = []
snapsToDelete = {}
for period in backupConfig:
if(period['retention']> 0):
if (validBackupTime(timeNow, period['timePeriod'])):
newSnapPeriod.append(period['timePeriod'])
#check if there are snaps to delete keeping in mind we will be creating a new one soon
snapsToDelete[period['timePeriod']] = checkDeleteNeeded(db, period['timePeriod'], period['retention']-1)
else:
#check if there are snaps to delete
snapsToDelete[period['timePeriod']] = checkDeleteNeeded(db, period['timePeriod'], period['retention'])
else:
logger.info("No " + period['timePeriod'] + " retention specified.")
# delete any snaps if present
deleteAllSnaps(db, period['timePeriod'])
if(newSnapPeriod != []):
createSnap(db, newSnapPeriod)
else:
logger.info("No snapshot needed today.")
#delete snaps if needed
for timePeriod in snapsToDelete.keys():
for snap in snapsToDelete[timePeriod]:
deleteSnap(snap, timePeriod)
def validBackupTime(timeNow, timePeriod):
backupDate = int(os.environ['BackupDate'])
backupMonth = os.environ['BackupMonth']
weeklyBackupDay = os.environ['WeeklyBackupDay']
logger.debug("Checking if " + timePeriod + " retention policy is satisfied.")
if (timePeriod == "yearly"):
if(timeNow.day == backupDate and timeNow.strftime("%B") == backupMonth):
logger.debug("Backup date matches specifications")
return True
elif (timePeriod == "monthly"):
if (timeNow.day == backupDate):
logger.debug("Backup date matches specifications")
return True
elif (timePeriod == "weekly"):
if(timeNow.strftime("%A") ==weeklyBackupDay):
logger.debug("Backup date matches specifications")
return True
else:
logger.error("Invalid time period. Exiting")
sys.exit(1)
logger.debug("Backup date does not match specifications. Skipping snapshot")
return False
def checkDeleteNeeded(db, timePeriod, retention):
snaps=getSnaps(db,timePeriod)
if(snaps is not None and len(snaps)>=retention):
return snaps[:-retention]
else:
return []
def deleteAllSnaps(db,timePeriod):
snaps = getSnaps(db, timePeriod)
if(snaps is not None):
logger.info("Removing any old " + timePeriod + " snapshots.")
for snap in snaps:
deleteSnap(snap, timePeriod)
def getSnaps(db, timePeriod):
validSnaps = []
if ("dateSimulationDebugFile" in os.environ):
# snapshot info is stored in local file for debugging
snapStore = {}
try:
with open(os.environ['dateSimulationDebugFile'], 'r') as fp:
snapStore = json.load(fp)
except Exception:
logger.exception("Failed to load snapshot store file. Failing")
sys.exit(1)
for snap in snapStore[db]:
if (timePeriod in snap['Tag']):
# time period matches
# convert date strings to datetime objects
snap['SnapshotCreateTime'] = datetime.strptime(snap['SnapshotCreateTime'],
"%Y-%m-%dT%H:%M:%S.%f+00:00").replace(tzinfo=timezone.utc)
validSnaps.append(snap)
else:
snaps = rds.describe_db_cluster_snapshots(
DBClusterIdentifier=db,
SnapshotType="manual"
)
for s in snaps['DBClusterSnapshots']:
tags = rds.list_tags_for_resource(ResourceName=s['DBClusterSnapshotArn'])
for t in tags['TagList']:
if t['Key'] == toolTagKey and timePeriod in t['Value']:
validSnaps.append(s)
if (len(validSnaps) > 0):
# sort snaps by date
sortedArray = sorted(
validSnaps,
key=lambda x: x['SnapshotCreateTime'],
reverse=False
)
return sortedArray
else:
return None
def createSnap(db, tags):
logger.info("Creating snapshot on DB:" + db + " with tags:" + str(tags))
if ("dateSimulationDebugFile" in os.environ):
# snapshot info is stored in local file for debugging
# get simulated date from env var
simDate = datetime.strptime(os.environ['debugDate'],
"%Y-%m-%dT%H:%M:%S.%f+00:00").replace(tzinfo=timezone.utc)
snap = {
"Tag": " ".join(tags),
"SnapshotCreateTime": simDate,
"DBClusterIdentifier" : db
}
try:
with open(os.environ['dateSimulationDebugFile'], 'r') as json_data:
snapJson= json.load(json_data)
snapJson[db].append(snap)
with open(os.environ['dateSimulationDebugFile'], 'w') as json_data:
json.dump(snapJson, json_data, default=json_serial)
except Exception:
logger.exception("Failed to read or write snapshot store file. Failing")
sys.exit(1)
else:
snapshotName=db + "-" + datetime.now().strftime('%Y-%m-%d')
rds.create_db_cluster_snapshot(
DBClusterSnapshotIdentifier=snapshotName,
DBClusterIdentifier=db,
Tags=[
{
"Key": toolTagKey,
"Value": " ".join(tags)
}
])
def deleteSnap(snapToDelete, timePeriod):
logger.debug("Received a delete request for the " + timePeriod + " time period.")
if ("dateSimulationDebugFile" in os.environ):
# snapshot info is stored in local file for debugging
#read local file
snapJson={}
try:
with open(os.environ['dateSimulationDebugFile'], 'r') as json_data:
snapJson = json.load(json_data)
except Exception:
logger.exception("Failed to read snapshot store file. Failing")
sys.exit(1)
#check all snaps to see if date matches
newSnapList=[]
for snap in snapJson[snapToDelete['DBClusterIdentifier']]:
# convert date strings to datetime objects
snap['SnapshotCreateTime'] = datetime.strptime(snap['SnapshotCreateTime'], "%Y-%m-%dT%H:%M:%S.%f+00:00").replace(tzinfo=timezone.utc)
if (snap['SnapshotCreateTime'].date() == snapToDelete['SnapshotCreateTime'].date()):
#found snap with correct date
tags = snap['Tag'].split(" ")
if(len(tags) ==1 and tags[0]==timePeriod):
#we can delete it
logger.info("Deleting " + timePeriod + " snap from test file")
continue
else:
#update tag to remove time period
tags.remove(timePeriod)
snap['Tag']=" ".join(tags)
#if we are NOT deleting the snap we add its info to a new list
newSnapList.append(snap)
snapJson[snapToDelete['DBClusterIdentifier']]=newSnapList
try:
#write to file
with open(os.environ['dateSimulationDebugFile'], 'w') as json_data:
json.dump(snapJson, json_data, default=json_serial)
except Exception:
logger.exception("Failed to write snapshot store file. Failing")
sys.exit(1)
else:
#using RDS information for snapshots
# check tags on snapshot
tags = rds.list_tags_for_resource(ResourceName=snapToDelete['DBClusterSnapshotArn'])
for t in tags['TagList']:
if t['Key'] == toolTagKey:
tags = t['Value'].split(" ")
if (len(tags) == 1 and tags[0] == timePeriod):
# if the time period specified is the only remaining timeperiod we can delete it
logger.info("Deleting snapshot: " + snapToDelete['DBClusterSnapshotIdentifier'] + " from RDS.")
#delete from RDS
rds.delete_db_cluster_snapshot(DBClusterSnapshotIdentifier=snapToDelete['DBClusterSnapshotArn'])
else:
# update tag to remove time period
logger.info("Removing time period tag:" + timePeriod + " from snapshot:" + snapToDelete['DBClusterSnapshotIdentifier'])
tags.remove(timePeriod)
#rds update tag on snapshot
t['Value']= " ".join(tags)
rds.add_tags_to_resource(ResourceName=snapToDelete['DBClusterSnapshotArn'], Tags=[t])
break
def verifyClusters(dbClusters):
existingDBClusters=[d['DBClusterIdentifier'] for d in rds.describe_db_clusters()['DBClusters']]
for db in dbClusters:
logger.debug("Checking if DB:" + db + " is an existing Aurora Cluster.")
if(db in existingDBClusters):
logger.debug("DB:" + db + " is a valid cluster.")
else:
logger.error("DB:" + db + " is NOT a valid cluster. Failing")
sys.exit(1)
def lambda_handler(event, context):
logger.info("Starting Aurora Snapshot Generator tool")
logger.debug("Environment Variables:")
for key in os.environ:
logger.debug("Found {}={}".format(key, os.environ[key]))
logger.debug("Checking for required env vars.")
requiredEnvVars = ['DatabaseNames', 'WeeklyRetention', 'MonthlyRetention', 'YearlyRetention','WeeklyBackupDay', 'BackupDate', 'BackupMonth']
for r in requiredEnvVars:
if r not in os.environ.keys():
logger.error("Required variable:" + r + " not found. Exiting.")
sys.exit(1)
timeNow=datetime.now(timezone.utc)
logger.debug("Month:" + str(timeNow.strftime("%B")) + " Day:" + str(timeNow.day) + " DOW:" + str(timeNow.strftime("%A")))
startTool(timeNow)
logger.info("End of Aurora Snapshot Generator tool")
| 37.079618 | 145 | 0.592201 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,671 | 0.315297 |
52d6e3a13bb4f93454f95d8e2e0f6d4354044c0f | 4,176 | py | Python | bench/models.py | jareddk/bench | 2e6726a830bbe94a4daab1f892818bde0e3b9888 | [
"MIT-0"
] | null | null | null | bench/models.py | jareddk/bench | 2e6726a830bbe94a4daab1f892818bde0e3b9888 | [
"MIT-0"
] | null | null | null | bench/models.py | jareddk/bench | 2e6726a830bbe94a4daab1f892818bde0e3b9888 | [
"MIT-0"
] | null | null | null | import torch
from fielder import FieldClass
import yaml
class ModelBase(FieldClass, torch.nn.Module):
"""Base Model Class"""
class FCNet(ModelBase):
d_in: int = 10
H: int = 100
n_hidden: int = 1
D_out: int = 1
def __post_init__(self):
super().__post_init__()
self.input_linear = torch.nn.Linear(self.d_in, self.H)
self.middle_linears = torch.nn.ModuleList(
[torch.nn.Linear(self.H, self.H) for _ in range(self.n_hidden)]
)
self.output_linear = torch.nn.Linear(self.H, self.D_out)
def layer_n_activation(self, x, n_layer=-1):
x = x["input"]
if n_layer == 0:
return x
x = self.input_linear(x).clamp(min=0)
if n_layer == 1:
return x
for i, layer in enumerate(self.middle_linears):
x = layer(x).clamp(min=0)
if i + 2 == n_layer:
return x
return x
def forward(self, x):
return {"y": self.output_linear(self.layer_n_activation(x))}
class SimpleTransformer(ModelBase):
"""
DON'T USE THIS AS A REFERENCE IMPLEMENTATION
It's BiDir, ie it doesn't have any attention masking,
and it hasn't been debugged carefully
"""
d_model: int = 16
m_mlp: int = 2
n_head: int = 1
n_ctx: int = 2
n_layer: int = 2
d_out: int = 1
def __post_init__(self):
super().__post_init__()
assert self.n_head * self.d_head == self.d_model
self.layers = torch.nn.ModuleList(
SimpleTransformerLayer(
d_model=self.d_model,
m_mlp=self.m_mlp,
n_head=self.n_head,
n_ctx=self.n_ctx,
)
for _ in range(self.n_layer)
)
self.output_linear = torch.nn.Linear(self.d_model * self.n_ctx, self.d_out)
@property
def d_head(self):
return self.d_model // self.n_head
@property
def D_in(self):
return self.n_ctx * self.d_model
def torso(self, x):
x = x.reshape(-1, self.n_ctx, self.d_model)
for layer in self.layers:
x = layer(x)
return x
def forward(self, x):
x = self.torso(x).reshape(-1, self.D_in)
return self.output_linear(x)
class SimpleTransformerLayer(ModelBase):
d_model: int = 32
m_mlp: int = 4
n_head: int = 1
n_ctx: int = 4
def __post_init__(self):
super().__post_init__()
assert self.n_head * self.d_head == self.d_model
self.mlp_linear1 = torch.nn.Linear(self.d_model, self.m_mlp * self.d_model)
self.mlp_linear2 = torch.nn.Linear(self.m_mlp * self.d_model, self.d_model)
self.query = torch.nn.Linear(self.d_model, self.d_model, bias=False)
self.key = torch.nn.Linear(self.d_model, self.d_model, bias=False)
self.value = torch.nn.Linear(self.d_model, self.d_model)
self.dense = torch.nn.Linear(self.d_model, self.d_model)
@property
def d_head(self):
return self.d_model // self.n_head
def reshape_as_heads(self, x):
new_shape = x.shape[:-1] + (self.n_head, self.d_head)
return x.reshape(*new_shape)
def reshape_as_d_model(self, x):
new_shape = x.shape[:-2] + (self.d_model,)
return x.reshape(*new_shape)
def attn(self, x):
q = self.reshape_as_heads(self.query(x))
k = self.reshape_as_heads(self.key(x))
v = self.reshape_as_heads(self.value(x))
attn_logits = torch.einsum("bshi,bthi->bhst", q, k) / torch.sqrt(
torch.tensor(self.d_head, dtype=torch.float)
)
attn_weights = torch.nn.Softmax(dim=-1)(attn_logits)
attention_result = torch.einsum("bhst,bthi->bshi", attn_weights, v)
result = self.reshape_as_d_model(attention_result)
return self.dense(result)
def mlp(self, x):
m = self.mlp_linear1(x).clamp(0)
return self.mlp_linear2(m)
def forward(self, x):
x = torch.layer_norm(x, normalized_shape=x.shape[1:])
a = self.attn(x)
x = torch.layer_norm(x + a, normalized_shape=x.shape[1:])
m = self.mlp(x)
return x + m
| 28.8 | 83 | 0.597462 | 4,108 | 0.983716 | 0 | 0 | 218 | 0.052203 | 0 | 0 | 226 | 0.054119 |
52d7092eed9829703d91d03c000f7d21e7f84106 | 5,138 | py | Python | pytope/demo.py | XuShenLZ/pytope | b6f01582ff43250467fa953f3ec31d520f77716e | [
"MIT"
] | null | null | null | pytope/demo.py | XuShenLZ/pytope | b6f01582ff43250467fa953f3ec31d520f77716e | [
"MIT"
] | null | null | null | pytope/demo.py | XuShenLZ/pytope | b6f01582ff43250467fa953f3ec31d520f77716e | [
"MIT"
] | null | null | null | import numpy as np
from pytope import Polytope
import matplotlib.pyplot as plt
np.random.seed(1)
# Create a polytope in R^2 with -1 <= x1 <= 4, -2 <= x2 <= 3
lower_bound1 = (-1, -2) # [-1, -2]' <= x
upper_bound1 = (4, 3) # x <= [4, 3]'
P1 = Polytope(lb=lower_bound1, ub=upper_bound1)
# Print the halfspace representation A*x <= b and H = [A b]
print('P1: ', repr(P1))
print('A =\n', P1.A)
print('b =\n', P1.b)
print('H =\n', P1.H)
# Create a square polytope in R^2 from specifying the four vertices
V2 = np.array([[1, 0], [0, -1], [-1, 0], [0, 1]])
P2 = Polytope(V2)
# Print the array of vertices:
print('P2: ', repr(P2))
print('V =\n', P2.V)
# Create a triangle in R^2 from specifying three half spaces (inequalities)
A3 = [[1, 0], [0, 1], [-1, -1]]
b3 = (2, 1, -1.5)
P3 = Polytope(A3, b3)
# Print the halfspace representation A*x <= b and H = [A b]
print('P3: ', repr(P3))
print('A =\n', P3.A)
print('b =\n', P3.b)
print('H =\n', P3.H)
# Determine and print the vertices:
print('V =\n', P3.V)
# P4: P3 shifted by a point p4
p4 = (1.4, 0.7)
P4 = P3 + p4
# P5: P4 shifted by a point p5 (in negative direction)
p5 = [0.4, 2]
P5 = P4 - p5
# P6: P2 scaled by s6 and shifted by p6
s6 = 0.2
p6 = -np.array([[0.4], [1.6]])
P6 = s6 * P2 + p6
# P7: P2 rotated 20 degrees (both clockwise and counter-clockwise)
rot7 = np.pi / 9.0
rot_mat7 = np.array([[np.cos(rot7), -np.sin(rot7)],
[np.sin(rot7), np.cos(rot7)]])
P7 = rot_mat7 * P2
P7_inv = P2 * rot_mat7
# P8: -P6
P8 = -P6
# P9: The convex hull of a set of 30 random points in [1, 2]' <= x [2, 3]'
V9 = np.random.uniform((1, 2), (2, 3), (30, 2))
P9 = Polytope(V9)
P9.minimize_V_rep()
# P10: the Minkowski sum of two squares (one large and one rotated and smaller)
P10_1 = Polytope(lb=(-0.6, -0.6), ub=(0.6, 0.6))
P10_2 = rot_mat7 * Polytope(lb=(-0.3, -0.3), ub=(0.3, 0.3))
P10 = P10_1 + P10_2
# Plot all of the polytopes.
# See the matplotlib.patches.Polygon documentation for a list of valid kwargs
fig1, ax1 = plt.subplots(num=1)
plt.grid()
plt.axis([-1.5, 4.5, -2.5, 3.5])
P1.plot(ax1, fill=False, edgecolor='r', linewidth=2)
P2.plot(ax1, facecolor='g', edgecolor=(0, 0, 0), linewidth=1)
P3.plot(ax1, facecolor='b', edgecolor='k', linewidth=2, alpha=0.5)
P4.plot(ax1, facecolor='lightsalmon')
plt.scatter(P4.V[:, 0], P4.V[:, 1], c='k', marker='x') # the vertices of P4
# Polytope implements an additional keyword edgealpha:
P5.plot(ax1, fill=False, edgecolor='b', linewidth=8, edgealpha=0.2)
plt.plot(P5.centroid[0], P5.centroid[1], 'o') # the centroid of P5
P6.plot(ax1, facecolor='g', edgecolor=(0, 0, 0), linewidth=1)
P7.plot(ax1, facecolor='g', edgecolor=(0, 0, 0), alpha=0.3,
linewidth=1, edgealpha=0.3)
P7_inv.plot(ax1, facecolor='g', edgecolor=(0, 0, 0), alpha=0.3,
linewidth=1, edgealpha=0.3, linestyle='--')
P8.plot(ax1, facecolor='g', edgecolor=(0, 0, 0), alpha=0.3,
linewidth=1, edgealpha=0.3)
P9.plot(ax1, facecolor='gray', alpha=0.6, edgecolor='k')
plt.plot(V9[:, 0], V9[:, 1], 'or', marker='o', markersize=2) # random points
plt.plot(P9.V[:, 0], P9.V[:, 1], 'og', marker='o', markersize=1) # P9's vertices
plt.title('Demonstration of various polytope operations')
# Plot the Minkowski sum of two squares
fig2, ax2 = plt.subplots(num=2)
plt.grid()
plt.axis([-2.5, 2.5, -2.5, 2.5])
P10_1.plot(ax2, fill=False, edgecolor=(1, 0, 0))
P10_2.plot(ax2, fill=False, edgecolor=(0, 0, 1))
P10.plot(ax2, fill=False,
edgecolor=(1, 0, 1), linestyle='--', linewidth=2)
for p in P10_1.V: # the smaller square + each of the vertices of the larger one
(P10_2 + p).plot(ax2, facecolor='grey', alpha=0.4,
edgecolor='k', linewidth=0.5)
ax2.legend((r'$P$', r'$Q$', r'$P \oplus Q$'))
plt.title('Minkowski sum of two polytopes')
# Plot two rotated rectangles and their intersection
rot1 = -np.pi / 18.0
rot_mat1 = np.array([[np.cos(rot1), -np.sin(rot1)],
[np.sin(rot1), np.cos(rot1)]])
rot2 = np.pi / 18.0
rot_mat2 = np.array([[np.cos(rot2), -np.sin(rot2)],
[np.sin(rot2), np.cos(rot2)]])
P_i1 = rot_mat1 * Polytope(lb=(-2, -1), ub=(1, 1))
P_i2 = rot_mat2 * Polytope(lb=(0, 0), ub=(2, 2))
P_i = P_i1 & P_i2 # intersection
fig3, ax3 = plt.subplots(num=3)
plt.grid()
plt.axis([-3.5, 3.5, -3.5, 3.5])
P_i1.plot(fill=False, edgecolor=(1, 0, 0), linestyle='--')
P_i2.plot(fill=False, edgecolor=(0, 0, 1), linestyle='--')
P_i.plot(fill=False,
edgecolor=(1, 0, 1), linestyle='-', linewidth=2)
ax3.legend((r'$P$', r'$Q$', r'$P \cap Q$'))
plt.title('Intersection of two polytopes')
# Plot two polytopes and their Pontryagin difference
P_m1 = Polytope(lb=(-3, -3), ub=(3, 3))
P_m2 = Polytope([[1, 0], [0, -1], [-1, 0], [0, 1]])
P_diff = P_m1 - P_m2
fig4, ax4 = plt.subplots(num=4)
plt.grid()
plt.axis([-3.5, 3.5, -3.5, 3.5])
P_m1.plot(fill=False, edgecolor=(1, 0, 0))
P_m2.plot(fill=False, edgecolor=(0, 0, 1))
P_diff.plot(fill=False,
edgecolor=(1, 0, 1), linestyle='--', linewidth=2)
ax4.legend((r'$P$', r'$Q$', r'$P \ominus Q$'))
plt.title('Pontryagin difference of two polytopes')
plt.setp([ax1, ax2, ax3, ax4], xlabel=r'$x_1$', ylabel=r'$x_2$')
| 35.191781 | 80 | 0.616972 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,636 | 0.318412 |
52d7acd4e38632fe10b9ea332f1e4413f361d8b4 | 3,458 | py | Python | apetools/commands/ifconfig.py | rsnakamura/oldape | b4d1c77e1d611fe2b30768b42bdc7493afb0ea95 | [
"Apache-2.0"
] | null | null | null | apetools/commands/ifconfig.py | rsnakamura/oldape | b4d1c77e1d611fe2b30768b42bdc7493afb0ea95 | [
"Apache-2.0"
] | null | null | null | apetools/commands/ifconfig.py | rsnakamura/oldape | b4d1c77e1d611fe2b30768b42bdc7493afb0ea95 | [
"Apache-2.0"
] | null | null | null |
#python libraries
import re
import os
# this package
from apetools.baseclass import BaseClass
from apetools.commons import enumerations
from apetools.commons import expressions
from apetools.commons.errors import ConfigurationError
MAC_UNAVAILABLE = "MAC Unavailable (use `netcfg`)"
class IfconfigError(ConfigurationError):
"""
raise this if there is a user error
"""
# end class Ifconfig error
class IfconfigCommand(BaseClass):
"""
The IfconfigCommand interprets ifconfig
"""
def __init__(self, connection, interface, operating_system=None):
"""
:param:
- `connection`: A connection to the device
- `interface`: The interface to check
- `operating_system` : The operating system on the devices.
"""
super(IfconfigCommand, self).__init__()
self.connection = connection
self.interface = interface
self._operating_system = operating_system
self._ip_address = None
self._mac_address = None
self._output = None
self._ip_expression = None
return
@property
def operating_system(self):
"""
:return: the operating system for the device to query
"""
if self._operating_system is None:
self._operating_system = self.connection.os
return self._operating_system
@property
def ip_address(self):
"""
:return: The IP Address of the interface
"""
return self._match(self.ip_expression,
expressions.IP_ADDRESS_NAME)
@property
def ip_expression(self):
"""
:return: a compiled expression to get the ip address
"""
if self._ip_expression is None:
if self.operating_system == enumerations.OperatingSystem.linux:
expression = expressions.LINUX_IP
elif self.operating_system == enumerations.OperatingSystem.android:
expression = expressions.ANDROID_IP
self._ip_expression = re.compile(expression)
return self._ip_expression
@property
def mac_address(self):
"""
:return: MAC Address of the interface
"""
if self._mac_address is None:
if self.operating_system == enumerations.OperatingSystem.linux:
expression = expressions.LINUX_MAC
elif self.operating_system == enumerations.OperatingSystem.android:
self._mac_address = MAC_UNAVAILABLE
return self._mac_address
self._mac_address = self._match(re.compile(expression),
expressions.MAC_ADDRESS_NAME)
return self._mac_address
@property
def output(self):
"""
:return: The output of the ifconfig command on the device
"""
return self.connection.ifconfig(self.interface)
def _match(self, expression, name):
"""
:param:
- `expression`: The regular expression to match
- `name`: The group name to pull the match out of the line
:return: The named-group that matched or None
"""
for line in self.output.output:
match = expression.search(line)
if match:
return match.group(name)
for line in self.output.error:
self.logger.error(line)
return
# end class IfconfigCommand | 31.436364 | 79 | 0.617409 | 3,114 | 0.900521 | 0 | 0 | 1,787 | 0.516773 | 0 | 0 | 1,003 | 0.290052 |
52da868b9fee80215c6c514d746db382c4f63313 | 2,471 | py | Python | neat-paralle-sonic.py | healthpotionstudios/MarioWorldAI-NEAT | 41df8ebe92702244883354dd65cbe0227bca97d0 | [
"MIT"
] | 7 | 2021-01-05T18:04:48.000Z | 2022-01-08T23:57:37.000Z | neat-paralle-sonic.py | healthpotionstudios/MarioWorldAI-NEAT | 41df8ebe92702244883354dd65cbe0227bca97d0 | [
"MIT"
] | null | null | null | neat-paralle-sonic.py | healthpotionstudios/MarioWorldAI-NEAT | 41df8ebe92702244883354dd65cbe0227bca97d0 | [
"MIT"
] | 3 | 2020-11-20T18:48:13.000Z | 2021-02-09T17:52:35.000Z | import retro # pip install gym-retro
import numpy as np # pip install numpy
import cv2 # pip install opencv-python
import neat # pip install neat-python
import pickle # pip install cloudpickle
class Worker(object):
def __init__(self, genome, config):
self.genome = genome
self.config = config
def work(self):
self.env = retro.make('SonicTheHedgehog-Genesis', 'GreenHillZone.Act1')
self.env.reset()
ob, _, _, _ = self.env.step(self.env.action_space.sample())
inx = int(ob.shape[0]/8)
iny = int(ob.shape[1]/8)
done = False
net = neat.nn.FeedForwardNetwork.create(self.genome, self.config)
fitness = 0
xpos = 0
xpos_max = 0
counter = 0
imgarray = []
while not done:
# self.env.render()
ob = cv2.resize(ob, (inx, iny))
ob = cv2.cvtColor(ob, cv2.COLOR_BGR2GRAY)
ob = np.reshape(ob, (inx, iny))
imgarray = np.ndarray.flatten(ob)
imgarray = np.interp(imgarray, (0, 254), (-1, +1))
actions = net.activate(imgarray)
ob, rew, done, info = self.env.step(actions)
xpos = info['x']
if xpos > xpos_max:
xpos_max = xpos
counter = 0
fitness += 1
else:
counter += 1
if counter > 250:
done = True
if xpos == info['screen_x_end'] and xpos > 500:
fitness += 100000
done = True
print(fitness)
return fitness
def eval_genomes(genome, config):
worky = Worker(genome, config)
return worky.work()
config = neat.Config(neat.DefaultGenome, neat.DefaultReproduction,
neat.DefaultSpeciesSet, neat.DefaultStagnation,
'config-feedforward')
p = neat.Population(config)
p = neat.Checkpointer.restore_checkpoint('neat-checkpoint-13')
p.add_reporter(neat.StdOutReporter(True))
stats = neat.StatisticsReporter()
p.add_reporter(stats)
p.add_reporter(neat.Checkpointer(10))
pe = neat.ParallelEvaluator(10, eval_genomes)
winner = p.run(pe.evaluate)
with open('winner.pkl', 'wb') as output:
pickle.dump(winner, output, 1)
| 27.764045 | 79 | 0.533387 | 1,581 | 0.639822 | 0 | 0 | 0 | 0 | 0 | 0 | 257 | 0.104006 |
52dbaf4703b5efff66916435a9a7afda3fc69e88 | 9,420 | py | Python | shapespace/eval_shapespace.py | Chumbyte/DiGS | e2226c1bf294ee901970b617c3d6aff34220c169 | [
"MIT"
] | null | null | null | shapespace/eval_shapespace.py | Chumbyte/DiGS | e2226c1bf294ee901970b617c3d6aff34220c169 | [
"MIT"
] | null | null | null | shapespace/eval_shapespace.py | Chumbyte/DiGS | e2226c1bf294ee901970b617c3d6aff34220c169 | [
"MIT"
] | null | null | null | # Yizhak Ben-Shabat (Itzik) <sitzikbs@gmail.com>
# Chamin Hewa Koneputugodage <chamin.hewa@anu.edu.au>
import os, sys, time
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from shapespace.dfaust_dataset import DFaustDataSet
import torch
import utils.visualizations as vis
import numpy as np
import models.DiGS as DiGS
import torch.nn.parallel
import torch.optim as optim
import torch.optim.lr_scheduler as lr_scheduler
import utils.utils as utils
import shapespace.shapespace_dfaust_args as shapespace_dfaust_args
# from shapespace.shapespace_utils import logging_print, mkdir_ifnotexists
import trimesh
def optimize_latent(num_latent_iters, mnfld_points, nonmnfld_points, normals, network, latent_size, lr=1.0e-3):
# mnfld_points: (1, pnts, 3), nonmnfld_points: (1, pnts', 3)
assert len(mnfld_points.shape)==3 and len(nonmnfld_points.shape)==3, (mnfld_points.shape, nonmnfld_points.shape)
assert mnfld_points.shape[0]==1 and nonmnfld_points.shape[0]==1, (mnfld_points.shape, nonmnfld_points.shape)
assert mnfld_points.shape[2]==3 and nonmnfld_points.shape[2]==3, (mnfld_points.shape, nonmnfld_points.shape)
network.train()
mnfld_points.requires_grad_(); nonmnfld_points.requires_grad_()
latent = torch.ones(latent_size).normal_(0, 1 / latent_size).to(mnfld_points.device) # (ls,)
latent.requires_grad = True
optimizer = torch.optim.Adam([latent], lr=lr)
import time
t0 = time.time()
for i in range(num_latent_iters):
mnfld_input = torch.cat([mnfld_points, latent.unsqueeze(0).unsqueeze(0).repeat(1,mnfld_points.shape[1],1)], dim=-1) # (1,pnts, 259)
nonmnfld_input = torch.cat([nonmnfld_points, latent.unsqueeze(0).unsqueeze(0).repeat(1,nonmnfld_points.shape[1],1)], dim=-1) # (1,pnts', 259)
output_pred = network(nonmnfld_input, mnfld_input)
loss_dict, _ = criterion(output_pred, mnfld_points, nonmnfld_points, normals) # dict, mnfld_grad: (8, pnts, 3)
optimizer.zero_grad()
loss_dict["loss"].backward()
optimizer.step()
if i % 50 == 0 or i == num_latent_iters - 1:
print('Epoch: {} [{:4d}/{} ({:.0f}%)] Loss: {:.5f} = L_Mnfld: {:.5f} + '
'L_NonMnfld: {:.5f} + L_Nrml: {:.5f}+ L_Eknl: {:.5f} + L_Div: {:.5f} + L_Reg: {:.5f}'.format(
0, i, num_latent_iters, 100. * i / num_latent_iters,
loss_dict["loss"].item(), weights[0]*loss_dict["sdf_term"].item(), weights[1]*loss_dict["inter_term"].item(),
weights[2]*loss_dict["normals_loss"].item(), weights[3]*loss_dict["eikonal_term"].item(),
weights[4]*loss_dict["div_loss"].item(), weights[5]*loss_dict["latent_reg_term"].item()))
print('Epoch: {} [{:4d}/{} ({:.0f}%)] Loss: {:.5f} = L_Mnfld: {:.5f} + '
'L_NonMnfld: {:.5f} + L_Nrml: {:.5f}+ L_Eknl: {:.5f} + L_Div: {:.5f} + L_Reg: {:.5f}'.format(
0, i, num_latent_iters, 100. * i / num_latent_iters,
loss_dict["loss"].item(), loss_dict["sdf_term"].item(), loss_dict["inter_term"].item(),
loss_dict["normals_loss"].item(), loss_dict["eikonal_term"].item(),
loss_dict["div_loss"].item(), loss_dict["latent_reg_term"].item()))
print(("Time for latent opt", time.time()-t0))
network.eval()
return latent.detach()
digs_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) #DiGS/
args = shapespace_dfaust_args.get_args()
print(args)
print()
gpu_idx, nl, n_points, batch_size, effective_batch_size, latent_size, num_epochs, logdir, \
model_name, n_loss_type, normalize_normal_loss, unsigned_n, unsigned_d, loss_type, seed, encoder_type,\
model_dirpath, inter_loss_type =\
args.gpu_idx, args.nl, args.n_points, args.batch_size, args.effective_batch_size, args.latent_size, \
args.num_epochs, args.logdir, args.model_name, args.n_loss_type, \
args.normalize_normal_loss, args.unsigned_n, args.unsigned_d, args.loss_type, args.seed, args.encoder_type, \
args.model_dirpath, args.inter_loss_type
# Evaluate at args.num_epochs
epoch = args.num_epochs
print("NL: {}, bs: {} ({}), latent size: {}, num epochs: {} ".format(nl, batch_size, effective_batch_size, latent_size, num_epochs))
assert effective_batch_size % batch_size == 0, (batch_size, effective_batch_size)
print("Loss Type ", loss_type, 'div decay', (args.div_decay, args.div_decay_params))
# get data loaders
torch.manual_seed(0) #change random seed for training set (so it will be different from test set
np.random.seed(0)
torch.manual_seed(seed)
np.random.seed(seed)
# test_set = dataset.ReconDataset(file_path, n_points, n_samples, args.grid_res, args.nonmnfld_sample_type, requires_dist=args.requires_dist)
# test_dataloader = torch.utils.data.DataLoader(test_set, batch_size=batch_size, shuffle=False, num_workers=4,
# pin_memory=True)
# get model
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_idx)
device = torch.device("cuda:" + str(gpu_idx) if (torch.cuda.is_available()) else "cpu")
DiGSNet = DiGS.DiGSNetwork(latent_size=latent_size, in_dim=3, decoder_hidden_dim=args.decoder_hidden_dim,
nl=args.nl, encoder_type=args.encoder_type,
decoder_n_hidden_layers=args.decoder_n_hidden_layers, init_type=args.init_type)
DiGSNet.to(device)
if args.parallel:
if (device.type == 'cuda'):
DiGSNet = torch.nn.DataParallel(DiGSNet)
# Main eval arguments
test_split_path = args.test_split_path
split_name = os.path.basename(test_split_path)
weights = args.test_loss_weights
num_latent_iters = args.num_latent_iters
resolution = args.test_res
criterion = DiGS.DiGSLoss(weights=args.weights, loss_type=loss_type, div_decay=args.div_decay,
div_type=args.div_type, div_clamp=args.div_clamp)
batch_size = 1 # For eval, always have bs=1
dataset = DFaustDataSet(args.dataset_path, test_split_path, gt_path=args.gt_path, scan_path=args.scan_path, \
with_normals=True, points_batch=n_points)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size,
# shuffle=True,
shuffle=False,
num_workers=args.threads,
pin_memory=True)
model_path = os.path.join(logdir, "trained_models" , 'model_{}.pkl'.format(epoch))
print("Loading from {}".format(model_path))
DiGSNet.load_state_dict(torch.load(model_path))
DiGSNet.eval()
# For each epoch
for batch_idx, data in enumerate(dataloader):
mnfld_points, nonmnfld_points, normals, indices = data # (bs, pnts, 3), (bs, pnts*9/8, 3), (bs, pnts, 3), (bs,)
mnfld_points, nonmnfld_points, normals = mnfld_points.cuda(), nonmnfld_points.cuda(), normals.cuda()
assert len(indices) == 1
index = indices[0]
info = dataset.get_info(index)
shapename = str.join('_', info)
pc_path = os.path.join(*info)
gt_mesh_filename = dataset.gt_files[index]
normalization_params_filename = dataset.normalization_files[index]
normalization_params = np.load(normalization_params_filename,allow_pickle=True)
scale = normalization_params.item()['scale']
center = normalization_params.item()['center']
latent = optimize_latent(num_latent_iters, mnfld_points, nonmnfld_points, normals, DiGSNet, latent_size)
mnfld_points = mnfld_points.detach().squeeze() # (pnts, 3)
with torch.no_grad():
print('before digs implicit2mesh'); t0 = time.time()
bbox = np.array([mnfld_points.min(axis=0)[0].cpu().numpy(), mnfld_points.max(axis=0)[0].cpu().numpy()]).transpose()
# bbox = np.array([[-1,1], [-1,1], [-1,1]])*2
# bbox = np.array([[-10,10], [-10,10], [-10,10]])
gt_points = trimesh.sample.sample_surface(trimesh.load(gt_mesh_filename),30000)[0]
try:
mesh_dict = utils.implicit2mesh(decoder=DiGSNet.decoder, latent=latent.cpu(), grid_res=resolution, translate=-center, scale=scale,
get_mesh=True, device=next(DiGSNet.parameters()).device, bbox=bbox)
results_points = trimesh.sample.sample_surface(mesh_dict["mesh_obj"],30000)[0]
print('after digs implicit2mesh', time.time()-t0); t0 = time.time()
out_dir = "{}/vis_results/epoch_{}".format(logdir, epoch)
if not os.path.exists(out_dir):
os.makedirs(out_dir, exist_ok=True)
vis.plot_mesh(mesh_dict["mesh_trace"], mesh=mesh_dict["mesh_obj"],
output_ply_path="{}/{}_{}.ply".format(out_dir,split_name,shapename), show_ax=False,
title_txt=shapename, show=False)
print('after digs plot_mesh', time.time()-t0); t0 = time.time()
# chamfer, hausdorff, one_sided_results, pod_data, cdp_data, malcv_data = utils.recon_metrics(results_points*scale+center, gt_points)
chamfer, hausdorff, one_sided_results, pod_data, cdp_data, malcv_data = utils.recon_metrics(results_points, gt_points)
print(chamfer, hausdorff, *one_sided_results)
print('after digs res', time.time()-t0); t0 = time.time()
except ValueError as e:
print(e)
| 52.625698 | 149 | 0.670276 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,007 | 0.213057 |
52dbfba445e18389a6ca13cbc5580b99d308ec2f | 208 | py | Python | html/semantics/scripting-1/the-script-element/module/resources/delayed-modulescript.py | meyerweb/wpt | f04261533819893c71289614c03434c06856c13e | [
"BSD-3-Clause"
] | 14,668 | 2015-01-01T01:57:10.000Z | 2022-03-31T23:33:32.000Z | html/semantics/scripting-1/the-script-element/module/resources/delayed-modulescript.py | meyerweb/wpt | f04261533819893c71289614c03434c06856c13e | [
"BSD-3-Clause"
] | 7,642 | 2018-05-28T09:38:03.000Z | 2022-03-31T20:55:48.000Z | html/semantics/scripting-1/the-script-element/module/resources/delayed-modulescript.py | meyerweb/wpt | f04261533819893c71289614c03434c06856c13e | [
"BSD-3-Clause"
] | 5,941 | 2015-01-02T11:32:21.000Z | 2022-03-31T16:35:46.000Z | import time
def main(request, response):
delay = float(request.GET.first(b"ms", 500))
time.sleep(delay / 1E3)
return [(b"Content-type", b"text/javascript")], u"export let delayedLoaded = true;"
| 26 | 87 | 0.673077 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 73 | 0.350962 |
52df7f048377d6e209717eb8a5895ca66dae1ca8 | 12,533 | py | Python | ingestion/src/metadata/utils/sql_lineage.py | TeddyCr/OpenMetadata | 9e11ad54d99a3317c27c101269768e7aa2c6eb5f | [
"Apache-2.0"
] | null | null | null | ingestion/src/metadata/utils/sql_lineage.py | TeddyCr/OpenMetadata | 9e11ad54d99a3317c27c101269768e7aa2c6eb5f | [
"Apache-2.0"
] | null | null | null | ingestion/src/metadata/utils/sql_lineage.py | TeddyCr/OpenMetadata | 9e11ad54d99a3317c27c101269768e7aa2c6eb5f | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 Collate
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Helper functions to handle SQL lineage operations
"""
import traceback
from logging.config import DictConfigurator
from typing import Any, Iterable, List, Optional
from metadata.generated.schema.api.lineage.addLineage import AddLineageRequest
from metadata.generated.schema.entity.data.table import Table
from metadata.generated.schema.type.entityLineage import (
ColumnLineage,
EntitiesEdge,
LineageDetails,
)
from metadata.generated.schema.type.entityReference import EntityReference
from metadata.ingestion.ometa.client import APIError
from metadata.ingestion.ometa.ometa_api import OpenMetadata
from metadata.utils import fqn
from metadata.utils.helpers import get_formatted_entity_name
from metadata.utils.logger import utils_logger
logger = utils_logger()
column_lineage_map = {}
def split_raw_table_name(database: str, raw_name: str) -> dict:
database_schema = None
if "." in raw_name:
database_schema, table = fqn.split(raw_name)[-2:]
if database_schema == "<default>":
database_schema = None
return {"database": database, "database_schema": database_schema, "table": table}
def get_column_fqn(table_entity: Table, column: str) -> Optional[str]:
"""
Get fqn of column if exist in table entity
"""
if not table_entity:
return
for tbl_column in table_entity.columns:
if column.lower() == tbl_column.name.__root__.lower():
return tbl_column.fullyQualifiedName.__root__
def search_table_entities(
metadata: OpenMetadata,
service_name: str,
database: Optional[str],
database_schema: Optional[str],
table: str,
) -> Optional[List[Table]]:
"""
Method to get table entity from database, database_schema & table name.
It uses ES to build the FQN if we miss some info and will run
a request against the API to find the Entity.
"""
try:
table_fqns = fqn.build(
metadata,
entity_type=Table,
service_name=service_name,
database_name=database,
schema_name=database_schema,
table_name=table,
fetch_multiple_entities=True,
)
table_entities: Optional[List[Table]] = []
for table_fqn in table_fqns or []:
table_entity: Table = metadata.get_by_name(Table, fqn=table_fqn)
if table_entity:
table_entities.append(table_entity)
return table_entities
except Exception as err:
logger.debug(traceback.format_exc())
logger.error(err)
def get_table_entities_from_query(
metadata: OpenMetadata,
service_name: str,
database_name: str,
database_schema: str,
table_name: str,
) -> List[Table]:
"""
Fetch data from API and ES with a fallback strategy.
If the sys data is incorrect, use the table name ingredients.
:param metadata: OpenMetadata client
:param service_name: Service being ingested.
:param database_name: Name of the database informed on db sys results
:param database_schema: Name of the schema informed on db sys results
:param table_name: Table name extracted from query. Can be `table`, `schema.table` or `db.schema.table`
:return: List of tables matching the criteria
"""
# First try to find the data from the given db and schema
# Otherwise, pick it up from the table_name str
# Finally, try with upper case
split_table = table_name.split(".")
empty_list: List[Any] = [None] # Otherwise, there's a typing error in the concat
database_query, schema_query, table = (
empty_list * (3 - len(split_table))
) + split_table
table_entities = search_table_entities(
metadata=metadata,
service_name=service_name,
database=database_name,
database_schema=database_schema,
table=table,
)
if table_entities:
return table_entities
table_entities = search_table_entities(
metadata=metadata,
service_name=service_name,
database=database_query,
database_schema=schema_query,
table=table,
)
if table_entities:
return table_entities
def get_column_lineage(
to_entity: Table,
from_entity: Table,
to_table_raw_name: str,
from_table_raw_name: str,
) -> List[ColumnLineage]:
column_lineage = []
if column_lineage_map.get(to_table_raw_name) and column_lineage_map.get(
to_table_raw_name
).get(from_table_raw_name):
for to_col, from_col in column_lineage_map.get(to_table_raw_name).get(
from_table_raw_name
):
to_col_fqn = get_column_fqn(to_entity, to_col)
from_col_fqn = get_column_fqn(from_entity, from_col)
if to_col_fqn and from_col_fqn:
column_lineage.append(
ColumnLineage(fromColumns=[from_col_fqn], toColumn=to_col_fqn)
)
return column_lineage
def _create_lineage_by_table_name(
metadata: OpenMetadata,
from_table: str,
to_table: str,
service_name: str,
database_name: Optional[str],
schema_name: Optional[str],
query: str,
) -> Optional[Iterable[AddLineageRequest]]:
"""
This method is to create a lineage between two tables
"""
try:
from_table_entities = get_table_entities_from_query(
metadata=metadata,
service_name=service_name,
database_name=database_name,
database_schema=schema_name,
table_name=from_table,
)
to_table_entities = get_table_entities_from_query(
metadata=metadata,
service_name=service_name,
database_name=database_name,
database_schema=schema_name,
table_name=to_table,
)
for from_entity in from_table_entities or []:
for to_entity in to_table_entities or []:
col_lineage = get_column_lineage(
to_entity=to_entity,
to_table_raw_name=str(to_table),
from_entity=from_entity,
from_table_raw_name=str(from_table),
)
lineage_details = None
if col_lineage:
lineage_details = LineageDetails(
sqlQuery=query, columnsLineage=col_lineage
)
if from_entity and to_entity:
lineage = AddLineageRequest(
edge=EntitiesEdge(
fromEntity=EntityReference(
id=from_entity.id.__root__,
type="table",
),
toEntity=EntityReference(
id=to_entity.id.__root__,
type="table",
),
)
)
if lineage_details:
lineage.edge.lineageDetails = lineage_details
yield lineage
except Exception as err:
logger.debug(traceback.format_exc())
logger.error(traceback.format_exc())
def populate_column_lineage_map(raw_column_lineage):
lineage_map = {}
if not raw_column_lineage or len(raw_column_lineage[0]) != 2:
return lineage_map
for source, target in raw_column_lineage:
if lineage_map.get(str(target.parent)):
ele = lineage_map.get(str(target.parent))
if ele.get(str(source.parent)):
ele[str(source.parent)].append(
(
target.raw_name,
source.raw_name,
)
)
else:
ele[str(source.parent)] = [(target.raw_name, source.raw_name)]
else:
lineage_map[str(target.parent)] = {
str(source.parent): [(target.raw_name, source.raw_name)]
}
return lineage_map
def get_lineage_by_query(
metadata: OpenMetadata,
service_name: str,
database_name: Optional[str],
schema_name: Optional[str],
query: str,
) -> Optional[Iterable[AddLineageRequest]]:
"""
This method parses the query to get source, target and intermediate table names to create lineage,
and returns True if target table is found to create lineage otherwise returns False.
"""
# Prevent sqllineage from modifying the logger config
# Disable the DictConfigurator.configure method while importing LineageRunner
configure = DictConfigurator.configure
DictConfigurator.configure = lambda _: None
from sqllineage.runner import LineageRunner
# Reverting changes after import is done
DictConfigurator.configure = configure
column_lineage_map.clear()
try:
result = LineageRunner(query)
raw_column_lineage = result.get_column_lineage()
column_lineage_map.update(populate_column_lineage_map(raw_column_lineage))
for intermediate_table in result.intermediate_tables:
for source_table in result.source_tables:
yield from _create_lineage_by_table_name(
metadata,
from_table=str(source_table),
to_table=str(intermediate_table),
service_name=service_name,
database_name=database_name,
schema_name=schema_name,
query=query,
)
for target_table in result.target_tables:
yield from _create_lineage_by_table_name(
metadata,
from_table=str(intermediate_table),
to_table=str(target_table),
service_name=service_name,
database_name=database_name,
schema_name=schema_name,
query=query,
)
if not result.intermediate_tables:
for target_table in result.target_tables:
for source_table in result.source_tables:
yield from _create_lineage_by_table_name(
metadata,
from_table=str(source_table),
to_table=str(target_table),
service_name=service_name,
database_name=database_name,
schema_name=schema_name,
query=query,
)
except Exception as err:
logger.debug(str(err))
logger.warning(f"Ingesting lineage failed")
def get_lineage_via_table_entity(
metadata: OpenMetadata,
table_entity: Table,
database_name: str,
schema_name: str,
service_name: str,
query: str,
) -> Optional[Iterable[AddLineageRequest]]:
# Prevent sqllineage from modifying the logger config
# Disable the DictConfigurator.configure method while importing LineageRunner
configure = DictConfigurator.configure
DictConfigurator.configure = lambda _: None
from sqllineage.runner import LineageRunner
# Reverting changes after import is done
DictConfigurator.configure = configure
column_lineage_map.clear()
try:
parser = LineageRunner(query)
to_table_name = table_entity.name.__root__
for from_table_name in parser.source_tables:
yield from _create_lineage_by_table_name(
metadata,
from_table=str(from_table_name),
to_table=f"{schema_name}.{to_table_name}",
service_name=service_name,
database_name=database_name,
schema_name=schema_name,
query=query,
) or []
except Exception as e:
logger.error("Failed to create view lineage")
logger.debug(f"Query : {query}")
logger.debug(traceback.format_exc())
| 35.403955 | 107 | 0.629698 | 0 | 0 | 6,185 | 0.493497 | 0 | 0 | 0 | 0 | 2,379 | 0.189819 |
52dff2e2e050102f09392ebcaa10f06cee060760 | 8,695 | py | Python | dcgan/utils/train.py | Ontheway361/ak47GAN | 7a1b4b3c2159ab0e2662f3d0a2cedccb30e62f3c | [
"MIT"
] | null | null | null | dcgan/utils/train.py | Ontheway361/ak47GAN | 7a1b4b3c2159ab0e2662f3d0a2cedccb30e62f3c | [
"MIT"
] | null | null | null | dcgan/utils/train.py | Ontheway361/ak47GAN | 7a1b4b3c2159ab0e2662f3d0a2cedccb30e62f3c | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
import time
import torch
import numpy as np
import torchvision
import torch.nn as nn
import torch.optim as optim
import matplotlib.pyplot as plt
from torch.utils.data import DataLoader
from utils import *
from IPython import embed
class DCGAN(object):
def __init__(self, args):
self.args = args
self.model = dict()
self.data = dict()
self.rescache = dict()
self.device = args.use_gpu and torch.cuda.is_available()
def _report_settings(self):
''' Report the settings '''
str = '-' * 16
print('%sEnvironment Versions%s' % (str, str))
print("- Python : {}".format(sys.version.strip().split('|')[0]))
print("- PyTorch : {}".format(torch.__version__))
print("- TorchVison: {}".format(torchvision.__version__))
print("- USE_GPU : {}".format(self.device))
print('-' * 52)
def _model_loader(self):
self.model['generator'] = Generator(self.args.in_dim, self.args.gchannels)
self.model['discriminator'] = Discriminator(self.args.dchannels)
self.model['criterion'] = nn.BCELoss()
self.model['opti_gene'] = optim.Adam(self.model['generator'].parameters(), \
lr=self.args.base_lr, betas=(self.args.beta, 0.999))
self.model['opti_disc'] = optim.Adam(self.model['discriminator'].parameters(), \
lr=self.args.base_lr, betas=(self.args.beta, 0.999))
# self.model['scheduler'] = torch.optim.lr_scheduler.MultiStepLR(
# self.model['optimizer'], milestones=[12, 20, 30, 45], gamma=self.args.gamma)
if self.device:
self.model['generator'] = self.model['generator'].cuda()
self.model['discriminator'] = self.model['discriminator'].cuda()
if len(self.args.gpu_ids) > 1:
self.model['generator'] = torch.nn.DataParallel(self.model['generator'], device_ids=self.args.gpu_ids)
self.model['discriminator'] = torch.nn.DataParallel(self.model['discriminator'], device_ids=self.args.gpu_ids)
torch.backends.cudnn.benchmark = True
print('Parallel mode was going ...')
else:
print('Single-gpu mode was going ...')
else:
print('CPU mode was going ...')
if len(self.args.resume) > 2:
checkpoint = torch.load(self.args.resume, map_location=lambda storage, loc: storage)
self.args.start = checkpoint['epoch']
self.model['generator'].load_state_dict(checkpoint['generator'])
self.model['discriminator'].load_state_dict(checkpoint['discriminator'])
print('Resuming the train process at %3d epoches ...' % self.args.start)
print('Model loading was finished ...')
def _data_loader(self):
self.data['train_loader'] = DataLoader(
CelebA(args=self.args),
batch_size = self.args.batch_size, \
shuffle = True,\
num_workers= self.args.workers)
self.data['fixed_noise'] = torch.randn(64, self.args.in_dim ,1, 1)
if self.device:
self.data['fixed_noise'] = self.data['fixed_noise'].cuda()
self.rescache['gloss'] = []
self.rescache['dloss'] = []
self.rescache['fake'] = []
print('Data loading was finished ...')
def _model_train(self, epoch = 0):
total_dloss, total_gloss = 0, 0
for idx, imgs in enumerate(self.data['train_loader']):
# update discriminator
self.model['discriminator'].train()
self.model['generator'].eval()
imgs.requires_grad = False
if self.device:
imgs = imgs.cuda()
b_size = imgs.size(0)
self.model['discriminator'].zero_grad()
gty = torch.full((b_size,), 1)
if self.device:
gty = gty.cuda()
predy = self.model['discriminator'](imgs).view(-1)
dloss_real = self.model['criterion'](predy, gty)
dloss_real.backward()
noise = torch.randn(b_size, self.args.in_dim, 1, 1)
if self.device:
noise = noise.cuda()
fake = self.model['generator'](noise)
gty.fill_(0) # TODO
predy = self.model['discriminator'](fake.detach()).view(-1)
dloss_fake = self.model['criterion'](predy, gty)
dloss_fake.backward()
self.model['opti_disc'].step()
d_loss_real = dloss_real.mean().item()
d_loss_fake = dloss_fake.mean().item()
d_loss = d_loss_real + d_loss_fake
self.rescache['dloss'].append(d_loss)
total_dloss += d_loss
# update generator
self.model['generator'].train()
self.model['discriminator'].eval()
self.model['generator'].zero_grad()
gty.fill_(1) # TODO
predy = self.model['discriminator'](fake).view(-1)
gloss = self.model['criterion'](predy, gty)
gloss.backward()
self.model['opti_gene'].step()
g_loss = gloss.mean().item()
self.rescache['gloss'].append(g_loss)
total_gloss += g_loss
if (idx + 1) % self.args.print_freq == 0:
print('epoch : %2d|%2d, iter : %4d|%4d, dloss : %.4f, gloss : %.4f' % \
(epoch, self.args.epoches, idx+1, len(self.data['train_loader']), \
np.mean(self.rescache['dloss']), np.mean(self.rescache['gloss'])))
if (idx + 1) % self.args.monitor_freq == 0:
with torch.no_grad():
self.model['generator'].eval()
fake = self.model['generator'](self.data['fixed_noise']).detach().cpu()
self.rescache['fake'].append(fake)
return total_dloss, total_gloss
def _main_loop(self):
min_loss = 1e3
for epoch in range(self.args.start, self.args.epoches + 1):
start_time = time.time()
dloss, gloss = self._model_train(epoch)
train_loss = dloss + gloss
# self.model['scheduler'].step()
end_time = time.time()
print('Single epoch cost time : %.2f mins' % ((end_time - start_time)/60))
if not os.path.exists(self.args.save_to):
os.mkdir(self.args.save_to)
if (min_loss > train_loss) and (not self.args.is_debug):
print('%snew SOTA was found%s' % ('*'*16, '*'*16))
min_loss = train_loss
filename = os.path.join(self.args.save_to, 'sota.pth.tar')
torch.save({
'epoch' : epoch,
'generator' : self.model['generator'].state_dict(),
'discriminator' : self.model['discriminator'].state_dict(),
'loss' : min_loss,
}, filename)
if (epoch % self.args.save_freq == 0) and (not self.args.is_debug):
filename = os.path.join(self.args.save_to, 'epoch_'+str(epoch)+'.pth.tar')
torch.save({
'epoch' : epoch,
'generator' : self.model['generator'].state_dict(),
'discriminator' : self.model['discriminator'].state_dict(),
'loss' : train_loss,
}, filename)
if self.args.is_debug:
break
def _visual_res(self):
''' Visual the training process '''
# gloss and dloss
plt.figure(figsize=(10,5))
plt.title("Generator and Discriminator Loss During Training")
plt.plot(self.rescache['gloss'], label="gloss")
plt.plot(self.rescache['dloss'], label="dloss")
plt.xlabel("iterations")
plt.ylabel("loss")
plt.legend()
plt.savefig('loss.jpg', dpi=400)
# save the fake-images
np.save('fake.npy', self.rescache['fake'])
def train_runner(self):
self._report_settings()
self._model_loader()
self._data_loader()
self._main_loop()
self._visual_res()
if __name__ == "__main__":
faceu = DCGAN(training_args())
faceu.train_runner()
| 37.317597 | 126 | 0.53272 | 8,297 | 0.954227 | 0 | 0 | 0 | 0 | 0 | 0 | 1,732 | 0.199195 |
52e00f970cfb2937e6c8fd35dd1fbfe0b22df7e8 | 8,940 | py | Python | testscripts/RDKB/component/sysutil/TS_SANITY_CheckLanMode_AfterReboot.py | cablelabs/tools-tdkb | 1fd5af0f6b23ce6614a4cfcbbaec4dde430fad69 | [
"Apache-2.0"
] | null | null | null | testscripts/RDKB/component/sysutil/TS_SANITY_CheckLanMode_AfterReboot.py | cablelabs/tools-tdkb | 1fd5af0f6b23ce6614a4cfcbbaec4dde430fad69 | [
"Apache-2.0"
] | null | null | null | testscripts/RDKB/component/sysutil/TS_SANITY_CheckLanMode_AfterReboot.py | cablelabs/tools-tdkb | 1fd5af0f6b23ce6614a4cfcbbaec4dde430fad69 | [
"Apache-2.0"
] | null | null | null | ##########################################################################
# If not stated otherwise in this file or this component's Licenses.txt
# file the following copyright and licenses apply:
#
# Copyright 2018 RDK Management
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##########################################################################
'''
<?xml version="1.0" encoding="UTF-8"?>
<xml>
<id/>
<version>4</version>
<name>TS_SANITY_CheckLanMode_AfterReboot</name>
<primitive_test_id/>
<primitive_test_name>ExecuteCmd</primitive_test_name>
<primitive_test_version>1</primitive_test_version>
<status>FREE</status>
<synopsis>Set bridge mode and reboot the device and check whether bridge mode is persistent after reboot.</synopsis>
<groups_id/>
<execution_time>10</execution_time>
<long_duration>false</long_duration>
<advanced_script>false</advanced_script>
<remarks/>
<skip>false</skip>
<box_types>
<box_type>Broadband</box_type>
<box_type>Emulator</box_type>
<box_type>RPI</box_type>
</box_types>
<rdk_versions>
<rdk_version>RDKB</rdk_version>
</rdk_versions>
<test_cases>
<test_case_id>TC_SYSUTIL_10</test_case_id>
<test_objective>Set bridge mode and reboot the device and check whether bridge mode is persistent after reboot.</test_objective>
<test_type>Positive</test_type>
<test_setup>Emulator,Broadband,RPI</test_setup>
<pre_requisite>TDK Agent should be in running state or invoke it through StartTdk.sh script</pre_requisite>
<api_or_interface_used>None</api_or_interface_used>
<input_parameters>"Device.X_CISCO_COM_DeviceControl.LanManagementEntry.1.LanMode"</input_parameters>
<automation_approch>1. Get the current lanMode
2. Set the lanmode to bridge-static
3. Reboot the device
4. Get the current lanmode again
5. Revert the lanmode to original value</automation_approch>
<except_output>The lanmode should persists</except_output>
<priority>High</priority>
<test_stub_interface>sysutil</test_stub_interface>
<test_script>TS_SANITY_CheckLanMode_AfterReboot</test_script>
<skipped>No</skipped>
<release_version>M59</release_version>
<remarks/>
</test_cases>
<script_tags>
<script_tag>BASIC</script_tag>
</script_tags>
</xml>
'''
# use tdklib library,which provides a wrapper for tdk testcase script
import tdklib;
from time import sleep;
#Test component to be tested
obj = tdklib.TDKScriptingLibrary("sysutil","1");
pamObj = tdklib.TDKScriptingLibrary("pam","RDKB");
#IP and Port of box, No need to change,
#This will be replaced with correspoing Box Ip and port while executing script
ip = <ipaddress>
port = <port>
obj.configureTestCase(ip,port,'TS_SANITY_CheckLanMode_AfterReboot');
pamObj.configureTestCase(ip,port,'TS_SANITY_CheckLanMode_AfterReboot');
#Get the result of connection with test component and STB
loadmodulestatus1 =obj.getLoadModuleResult();
loadmodulestatus2 =pamObj.getLoadModuleResult();
if "SUCCESS" in loadmodulestatus1.upper() and loadmodulestatus2.upper:
#Set the result status of execution
obj.setLoadModuleStatus("SUCCESS");
pamObj.setLoadModuleStatus("SUCCESS");
#Get the current Lan mode
tdkTestObj = pamObj.createTestStep('pam_GetParameterValues');
tdkTestObj.addParameter("ParamName","Device.X_CISCO_COM_DeviceControl.LanManagementEntry.1.LanMode")
expectedresult="SUCCESS";
#Execute the test case in STB
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
lanMode = tdkTestObj.getResultDetails().strip();
if expectedresult in actualresult and lanMode:
tdkTestObj.setResultStatus("SUCCESS");
#Set the result status of execution
print "TEST STEP 2: Get the current lanMode"
print "EXPECTED RESULT 2: Should get the current lanMode"
print "ACTUAL RESULT 2: Current lanMode is %s" %lanMode;
print "[TEST EXECUTION RESULT] : SUCCESS";
#Set the lanMode to bridge-static
tdkTestObj = pamObj.createTestStep('pam_SetParameterValues');
tdkTestObj.addParameter("ParamName","Device.X_CISCO_COM_DeviceControl.LanManagementEntry.1.LanMode")
tdkTestObj.addParameter("ParamValue","bridge-static");
tdkTestObj.addParameter("Type","string");
expectedresult="SUCCESS";
#Execute the test case in STB
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
details = tdkTestObj.getResultDetails();
if expectedresult in actualresult:
#Set the result status of execution
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 1: Set the lanMode to bridge-static";
print "EXPECTED RESULT 1: Should set the lanMode to bridge-static";
print "ACTUAL RESULT 1: %s" %details;
print "[TEST EXECUTION RESULT] : SUCCESS" ;
#rebooting the device
obj.initiateReboot();
sleep(300);
#Check if the lanMode persists
tdkTestObj = pamObj.createTestStep('pam_GetParameterValues');
tdkTestObj.addParameter("ParamName","Device.X_CISCO_COM_DeviceControl.LanManagementEntry.1.LanMode")
expectedresult="SUCCESS";
#Execute the test case in STB
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
lanMode1 = tdkTestObj.getResultDetails().strip();
if expectedresult in actualresult and lanMode1 == "bridge-static":
tdkTestObj.setResultStatus("SUCCESS");
#Set the result status of execution
print "TEST STEP 2: Get the current lanMode"
print "EXPECTED RESULT 2: Should get the current lanMode as bridge-static"
print "ACTUAL RESULT 2: Current lanMode is %s" %lanMode1;
print "[TEST EXECUTION RESULT] : SUCCESS";
else:
tdkTestObj.setResultStatus("FAILURE");
#Set the result status of execution
print "TEST STEP 2: Get the current lanMode"
print "EXPECTED RESULT 2: Should get the current lanMode as bridge-static"
print "ACTUAL RESULT 2: Current lanMode is %s" %lanMode1;
print "[TEST EXECUTION RESULT] : FAILURE";
#Revert the value of lanMode
tdkTestObj = pamObj.createTestStep('pam_SetParameterValues');
tdkTestObj.addParameter("ParamName","Device.X_CISCO_COM_DeviceControl.LanManagementEntry.1.LanMode")
tdkTestObj.addParameter("ParamValue",lanMode);
tdkTestObj.addParameter("Type","string");
expectedresult="SUCCESS";
#Execute the test case in STB
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
details = tdkTestObj.getResultDetails();
if expectedresult in actualresult:
#Set the result status of execution
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 1:Revert the value of lanMode";
print "EXPECTED RESULT 1: Should revert the lanMode";
print "ACTUAL RESULT 1: %s" %details;
print "[TEST EXECUTION RESULT] : SUCCESS" ;
else:
#Set the result status of execution
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 1:Revert the value of lanMode";
print "EXPECTED RESULT 1: Should revert the lanMode";
print "ACTUAL RESULT 1: %s" %details;
print "[TEST EXECUTION RESULT] : FAILURE" ;
else:
#Set the result status of execution
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 1: Set the lanMode to bridge-static";
print "EXPECTED RESULT 1: Should set the lanMode to bridge-static";
print "ACTUAL RESULT 1: %s" %details;
print "[TEST EXECUTION RESULT] : FAILURE" ;
else:
tdkTestObj.setResultStatus("FAILURE");
#Set the result status of execution
print "TEST STEP 2: Get the current lanMode"
print "EXPECTED RESULT 2: Should get the current lanMode"
print "ACTUAL RESULT 2: Current lanMode is %s" %lanMode;
print "[TEST EXECUTION RESULT] : FAILURE";
obj.unloadModule("sysutil");
pamObj.unloadModule("pam");
else:
print "Failed to load sysutil module";
sysObj.setLoadModuleStatus("FAILURE");
print "Module loading failed";
| 42.980769 | 132 | 0.687248 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,677 | 0.635011 |
52e223443238aa78f530247b57393b36f3ae7fbd | 37,563 | py | Python | fsrobo_r_driver/fsrobo_r_controller/info_catch_client.py | FUJISOFT-Robotics/fsrobo_r | 89f113f81f9119d1667b5afa4e718e2814c8dfd8 | [
"BSD-3-Clause"
] | 4 | 2019-10-15T14:32:07.000Z | 2021-02-05T01:01:02.000Z | fsrobo_r_driver/fsrobo_r_controller/info_catch_client.py | FUJISOFT-Robotics/fsrobo_r | 89f113f81f9119d1667b5afa4e718e2814c8dfd8 | [
"BSD-3-Clause"
] | 1 | 2019-10-24T08:42:21.000Z | 2019-10-31T06:05:00.000Z | fsrobo_r_driver/fsrobo_r_controller/info_catch_client.py | FUJISOFT-Robotics/fsrobo_r | 89f113f81f9119d1667b5afa4e718e2814c8dfd8 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# FSRobo-R Package BSDL
# ---------
# Copyright (C) 2019 FUJISOFT. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ---------
import time
import socket
import json
from struct import pack, unpack
class JSONSocket(object):
BUFFER_SIZE = 4096
def __init__(self, ip_addr, port):
self._ip_addr = ip_addr
self._port = port
self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._decoder = json.JSONDecoder(strict=False)
self._recv_buffer = ''
def connect(self):
self._sock.connect((self._ip_addr, self._port))
def close(self):
self._sock.close()
def send(self, data):
self._sock.sendall(json.dumps(data))
def recv(self):
need_recv = len(self._recv_buffer) == 0
#print('len: {}'.format(len(self._recv_buffer)))
while True:
try:
if need_recv:
recv_data = self._sock.recv(self.BUFFER_SIZE)
if (len(recv_data) == 0):
raise socket.error('recv error')
self._recv_buffer += recv_data
need_recv = False
else:
# XXX leading null char causes ValueError. Should fix server?
self._recv_buffer = self._recv_buffer.strip('\0')
data, index = self._decoder.raw_decode(self._recv_buffer)
self._recv_buffer = self._recv_buffer[index:]
#print('OK!:{}:{}:'.format(self._recv_buffer, self._recv_buffer.encode('hex')))
return data
except ValueError as e:
#print(e)
#print(self._recv_buffer)
#print(self._recv_buffer.encode('hex'))
need_recv = True
class InfoCatchClient(object):
ControlPort = 5000
def __init__(self, ip_addr='192.168.0.23'):
self._ip_addr = ip_addr
self._data_port = None
self._data_sock = None
def send_control(self, msg):
sock = JSONSocket(self._ip_addr, self.ControlPort)
sock.connect()
sock.send(msg)
data = sock.recv()
sock.close()
return data
def recv(self):
return self._data_sock.recv()
def connect(self, filters, sampling_time=10):
control_data = {'ST00': 'ON', 'ST01': sampling_time, 'ST02': sampling_time}
for filter in filters:
control_data[filter] = 'ON'
res = self.send_control(control_data)
if (res['RT00'] != 'OK'):
return False
self._data_port = res['RT01']
self._data_sock = JSONSocket(self._ip_addr, self._data_port)
retry_count = 0
while True:
try:
self._data_sock.connect()
break
except socket.error as e:
retry_count += 1
if retry_count > 10:
raise socket.error(e)
time.sleep(0.5)
def close(self):
if self._data_port is not None:
control_data = {'ST00': 'OFF', 'RT01': self._data_port}
res = self.send_control(control_data)
if res['RT00'] != 'OK':
print('warning: send_control returns {}'.format(res))
self._data_port = None
if self._data_sock is not None:
self._data_sock.close()
class Label(object):
I000 = "i000" # 0xFFFF Date/Time
# Header block
H003 = "h003" # 0x0008 update_counter
H004 = "h004" # 0x000C now_updating
# Memory I/O
M000 = "m000" # 0x0100 dio_io
M001 = "m001" # 0x0104 dio_io
M100 = "m100" # 0x0300 mio_si0
M102 = "m102" # 0x0308 mio_si2
M107 = "m107" # 0x031C mio_sl3
M200 = "m200" # 0x0320 dio_io[0]
M201 = "m201" # 0x0324 dio_io[1]
M202 = "m202" # 0x0328 dio_io[2]
M203 = "m203" # 0x032C dio_io[3]
M204 = "m204" # 0x0330 dio_io[4]
M205 = "m205" # 0x0334 dio_io[5]
M206 = "m206" # 0x0338 dio_io[6]
M207 = "m207" # 0x033C dio_io[7]
M208 = "m208" # 0x0340 dio_io[8]
M209 = "m209" # 0x0344 dio_io[9]
M210 = "m210" # 0x0348 dio_io[10]
M211 = "m211" # 0x034C dio_io[11]
M212 = "m212" # 0x0350 dio_io[12]
M213 = "m213" # 0x0354 dio_io[13]
M214 = "m214" # 0x0358 dio_io[14]
M215 = "m215" # 0x035C dio_io[15]
M216 = "m216" # 0x0360 dio_io[16]
M217 = "m217" # 0x0364 dio_io[17]
M218 = "m218" # 0x0368 dio_io[18]
M219 = "m219" # 0x036C dio_io[19]
M220 = "m220" # 0x0370 dio_io[20]
M221 = "m221" # 0x0374 dio_io[21]
M222 = "m222" # 0x0378 dio_io[22]
M223 = "m223" # 0x037C dio_io[23]
M224 = "m224" # 0x0380 dio_io[24]
M225 = "m225" # 0x0384 dio_io[25]
M226 = "m226" # 0x0388 dio_io[26]
M227 = "m227" # 0x038C dio_io[27]
M228 = "m228" # 0x0390 dio_io[28]
M229 = "m229" # 0x0394 dio_io[29]
M230 = "m230" # 0x0398 dio_io[30]
M231 = "m231" # 0x039C dio_io[31]
M232 = "m232" # 0x03A0 dio_io[32]
M233 = "m233" # 0x03A4 dio_io[33]
M234 = "m234" # 0x03A8 dio_io[34]
M235 = "m235" # 0x03AC dio_io[35]
M236 = "m236" # 0x03B0 dio_io[36]
M237 = "m237" # 0x03B4 dio_io[37]
M238 = "m238" # 0x03B8 dio_io[38]
M239 = "m239" # 0x03BC dio_io[39]
M240 = "m240" # 0x03C0 dio_io[40]
M241 = "m241" # 0x03C4 dio_io[41]
M242 = "m242" # 0x03C8 dio_io[42]
M243 = "m243" # 0x03CC dio_io[43]
M244 = "m244" # 0x03D0 dio_io[44]
M245 = "m245" # 0x03D4 dio_io[45]
M246 = "m246" # 0x03D8 dio_io[46]
M247 = "m247" # 0x03DC dio_io[47]
M248 = "m248" # 0x03E0 dio_io[48]
M249 = "m249" # 0x03E4 dio_io[49]
M250 = "m250" # 0x03E8 dio_io[50]
M251 = "m251" # 0x03EC dio_io[51]
M252 = "m252" # 0x03F0 dio_io[52]
M253 = "m253" # 0x03F4 dio_io[53]
M254 = "m254" # 0x03F8 dio_io[54]
M255 = "m255" # 0x03FC dio_io[55]
M256 = "m256" # 0x0400 dio_io[56]
M257 = "m257" # 0x0404 dio_io[57]
M258 = "m258" # 0x0408 dio_io[58]
M259 = "m259" # 0x040C dio_io[59]
M260 = "m260" # 0x0410 dio_io[60]
M261 = "m261" # 0x0414 dio_io[61]
M262 = "m262" # 0x0418 dio_io[62]
M263 = "m263" # 0x041C dio_io[63]
M264 = "m264" # 0x0420 dio_io[64]
M265 = "m265" # 0x0424 dio_io[65]
M266 = "m266" # 0x0428 dio_io[66]
M267 = "m267" # 0x042C dio_io[67]
M268 = "m268" # 0x0430 dio_io[68]
M269 = "m269" # 0x0434 dio_io[69]
M270 = "m270" # 0x0438 dio_io[70]
M271 = "m271" # 0x043C dio_io[71]
M272 = "m272" # 0x0440 dio_io[72]
M273 = "m273" # 0x0444 dio_io[73]
M274 = "m274" # 0x0448 dio_io[74]
M275 = "m275" # 0x044C dio_io[75]
M276 = "m276" # 0x0450 dio_io[76]
M277 = "m277" # 0x0454 dio_io[77]
M278 = "m278" # 0x0458 dio_io[78]
M279 = "m279" # 0x045C dio_io[79]
M280 = "m280" # 0x0460 dio_io[80]
M281 = "m281" # 0x0464 dio_io[81]
M282 = "m282" # 0x0468 dio_io[82]
M283 = "m283" # 0x046C dio_io[83]
M284 = "m284" # 0x0470 dio_io[84]
M285 = "m285" # 0x0474 dio_io[85]
M286 = "m286" # 0x0478 dio_io[86]
M287 = "m287" # 0x047C dio_io[87]
M288 = "m288" # 0x0480 dio_io[88]
M289 = "m289" # 0x0484 dio_io[89]
M290 = "m290" # 0x0488 dio_io[90]
M291 = "m291" # 0x048C dio_io[91]
M292 = "m292" # 0x0490 dio_io[92]
M293 = "m293" # 0x0494 dio_io[93]
M294 = "m294" # 0x0498 dio_io[94]
M295 = "m295" # 0x049C dio_io[95]
M296 = "m296" # 0x04A0 dio_io[96]
M297 = "m297" # 0x04A4 dio_io[97]
M298 = "m298" # 0x04A8 dio_io[98]
M299 = "m299" # 0x04AC dio_io[99]
M300 = "m300" # 0x04B0 dio_io[100]
M301 = "m301" # 0x04B4 dio_io[101]
M302 = "m302" # 0x04B8 dio_io[102]
M303 = "m303" # 0x04BC dio_io[103]
M304 = "m304" # 0x04C0 dio_io[104]
M305 = "m305" # 0x04C4 dio_io[105]
M306 = "m306" # 0x04C8 dio_io[106]
M307 = "m307" # 0x04CC dio_io[107]
M308 = "m308" # 0x04D0 dio_io[108]
M309 = "m309" # 0x04D4 dio_io[109]
M310 = "m310" # 0x04D8 dio_io[110]
M311 = "m311" # 0x04DC dio_io[111]
M312 = "m312" # 0x04E0 dio_io[112]
M313 = "m313" # 0x04E4 dio_io[113]
M314 = "m314" # 0x04E8 dio_io[114]
M315 = "m315" # 0x04EC dio_io[115]
M316 = "m316" # 0x04F0 dio_io[116]
M317 = "m317" # 0x04F4 dio_io[117]
M318 = "m318" # 0x04F8 dio_io[118]
M319 = "m319" # 0x04FC dio_io[119]
M320 = "m320" # 0x0500 dio_io[120]
# Ethercat joiont information
S000 = "s000" # 0x0500 cia402ctrl[0-5]
S001 = "s001" # 0x0502 ctrl[0-5]
S002 = "s002" # 0x0504 cia402targetpls[0-5]
S003 = "s003" # 0x0508 notification[0-5]
S004 = "s004" # 0x050C cia402sts[0-5]
S005 = "s005" # 0x050E sts[0-5]
S006 = "s006" # 0x0510 rtn[0-5]
S007 = "s007" # 0x0512 cia402err[0-5]
S008 = "s008" # 0x0514 alarm[0-5]
S009 = "s009" # 0x0518 targetplsfb[0-5]
S010 = "s010" # 0x051C cia402actualpls[0-5]
S011 = "s011" # 0x0520 cia402followingerr[0-5]
S012 = "s012" # 0x0524 observer_output_value[0-5]
S013 = "s013" # 0x0528 torque[0-5]
S014 = "s014" # 0x052A thermal[0-5]
S015 = "s015" # 0x052C disturbance[0-5]
S016 = "s016" # 0x052E gainrate[0-5]
S017 = "s017" # 0x0530 polerate[0-5]
S018 = "s018" # 0x0532 filtered_torque[0-5]
S019 = "s019" # 0x0534 filtered_velocity[0-5]
S020 = "s020" # 0x0536 filtered_D[0-5]
S020 = "s020" # 0x0538 filtered_Q[0-5]
# Force torque sensor information
F000 = "f000" # 0x0700 sts
F001 = "f001" # 0x0701 gain_sts
F100 = "f100" # 0x0710 zero_point[0-7]
F200 = "f200" # 0x0720 raw_value[0-7]
F300 = "f300" # 0x0730 gain[0-7]
# System management block information
Y000 = "y000" # 0x0800 robtask_name[0-31]
Y001 = "y001" # 0x0820 running_name[0-31]
Y002 = "y002" # 0x0840 running_pid
Y003 = "y003" # 0x0844 assign_port[0]
Y004 = "y004" # 0x0846 assign_port[1]
Y005 = "y005" # 0x0848 assign_port[2]
Y006 = "y006" # 0x084A assign_port[3]
Y007 = "y007" # 0x084C assign_port[4]
Y008 = "y008" # 0x085E assign_port[5]
Y009 = "y009" # 0x0850 assign_port[6]
Y010 = "y010" # 0x0852 assign_port[7]
Y011 = "y011" # 0x0854 assign_port[8]
Y012 = "y012" # 0x0856 assign_port[9]
Y013 = "y013" # 0x0858 assign_port[10]
Y014 = "y014" # 0x085A assign_port[11]
# User block information
U000 = "u000" # 0x1800 intval[0]
U001 = "u001" # 0x1804 intval[1]
U002 = "u002" # 0x1808 intval[2]
U003 = "u003" # 0x180C intval[3]
U004 = "u004" # 0x1810 intval[4]
U005 = "u005" # 0x1814 intval[5]
U006 = "u006" # 0x1818 intval[6]
U007 = "u007" # 0x181C intval[7]
U008 = "u008" # 0x1820 intval[8]
U009 = "u009" # 0x1824 intval[9]
U010 = "u010" # 0x1828 intval[10]
U011 = "u011" # 0x182C intval[11]
U012 = "u012" # 0x1830 intval[12]
U013 = "u013" # 0x1834 intval[13]
U014 = "u014" # 0x1838 intval[14]
U015 = "u015" # 0x183C intval[15]
U016 = "u016" # 0x1840 intval[16]
U017 = "u017" # 0x1844 intval[17]
U018 = "u018" # 0x1848 intval[18]
U019 = "u019" # 0x184C intval[19]
U020 = "u020" # 0x1850 intval[20]
U021 = "u021" # 0x1854 intval[21]
U022 = "u022" # 0x1858 intval[22]
U023 = "u023" # 0x185C intval[23]
U024 = "u024" # 0x1860 intval[24]
U025 = "u025" # 0x1864 intval[25]
U026 = "u026" # 0x1868 intval[26]
U027 = "u027" # 0x186C intval[27]
U028 = "u028" # 0x1870 intval[28]
U029 = "u029" # 0x1874 intval[29]
U030 = "u030" # 0x1878 intval[30]
U031 = "u031" # 0x187C intval[31]
U032 = "u032" # 0x1880 intval[32]
U033 = "u033" # 0x1884 intval[33]
U034 = "u034" # 0x1888 intval[34]
U035 = "u035" # 0x188C intval[35]
U036 = "u036" # 0x1890 intval[36]
U037 = "u037" # 0x1894 intval[37]
U038 = "u038" # 0x1898 intval[38]
U039 = "u039" # 0x189C intval[39]
U040 = "u040" # 0x18A0 intval[40]
U041 = "u041" # 0x18A4 intval[41]
U042 = "u042" # 0x18A8 intval[42]
U043 = "u043" # 0x18AC intval[43]
U044 = "u044" # 0x18B0 intval[44]
U045 = "u045" # 0x18B4 intval[45]
U046 = "u046" # 0x18B8 intval[46]
U047 = "u047" # 0x18BC intval[47]
U048 = "u048" # 0x18C0 intval[48]
U049 = "u049" # 0x18C4 intval[49]
U050 = "u050" # 0x18C8 intval[50]
U051 = "u051" # 0x18CC intval[51]
U052 = "u052" # 0x18D0 intval[52]
U053 = "u053" # 0x18D4 intval[53]
U054 = "u054" # 0x18D8 intval[54]
U055 = "u055" # 0x18DC intval[55]
U056 = "u056" # 0x18E0 intval[56]
U057 = "u057" # 0x18E4 intval[57]
U058 = "u058" # 0x18E8 intval[58]
U059 = "u059" # 0x18EC intval[59]
U060 = "u060" # 0x18F0 intval[60]
U061 = "u061" # 0x18F4 intval[61]
U062 = "u062" # 0x18F8 intval[62]
U063 = "u063" # 0x18FC intval[63]
U064 = "u064" # 0x1900 intval[64]
U065 = "u065" # 0x1904 intval[65]
U066 = "u066" # 0x1908 intval[66]
U067 = "u067" # 0x190C intval[67]
U068 = "u068" # 0x1910 intval[68]
U069 = "u069" # 0x1914 intval[69]
U070 = "u070" # 0x1918 intval[70]
U071 = "u071" # 0x191C intval[71]
U072 = "u072" # 0x1920 intval[72]
U073 = "u073" # 0x1924 intval[73]
U074 = "u074" # 0x1928 intval[74]
U075 = "u075" # 0x192C intval[75]
U076 = "u076" # 0x1930 intval[76]
U077 = "u077" # 0x1934 intval[77]
U078 = "u078" # 0x1938 intval[78]
U079 = "u079" # 0x193C intval[79]
U080 = "u080" # 0x1940 intval[80]
U081 = "u081" # 0x1944 intval[81]
U082 = "u082" # 0x1948 intval[82]
U083 = "u083" # 0x194C intval[83]
U084 = "u084" # 0x1950 intval[84]
U085 = "u085" # 0x1954 intval[85]
U086 = "u086" # 0x1958 intval[86]
U087 = "u087" # 0x195C intval[87]
U088 = "u088" # 0x1960 intval[88]
U089 = "u089" # 0x1964 intval[89]
U090 = "u090" # 0x1968 intval[90]
U091 = "u091" # 0x196C intval[91]
U092 = "u092" # 0x1970 intval[92]
U093 = "u093" # 0x1974 intval[93]
U094 = "u094" # 0x1978 intval[94]
U095 = "u095" # 0x197C intval[95]
U096 = "u096" # 0x1980 intval[96]
U097 = "u097" # 0x1984 intval[97]
U098 = "u098" # 0x1988 intval[98]
U099 = "u099" # 0x198C intval[99]
U100 = "u100" # 0x1990 intval[100]
U101 = "u101" # 0x1994 intval[101]
U102 = "u102" # 0x1998 intval[102]
U103 = "u103" # 0x199C intval[103]
U104 = "u104" # 0x19A0 intval[104]
U105 = "u105" # 0x19A4 intval[105]
U106 = "u106" # 0x19A8 intval[106]
U107 = "u107" # 0x19AC intval[107]
U108 = "u108" # 0x19B0 intval[108]
U109 = "u109" # 0x19B4 intval[109]
U110 = "u110" # 0x19B8 intval[110]
U111 = "u111" # 0x19BC intval[111]
U112 = "u112" # 0x19C0 intval[112]
U113 = "u113" # 0x19C4 intval[113]
U114 = "u114" # 0x19C8 intval[114]
U115 = "u115" # 0x19CC intval[115]
U116 = "u116" # 0x19D0 intval[116]
U117 = "u117" # 0x19D4 intval[117]
U118 = "u118" # 0x19D8 intval[118]
U119 = "u119" # 0x19DC intval[119]
U120 = "u120" # 0x19E0 intval[120]
U121 = "u121" # 0x19E4 intval[121]
U122 = "u122" # 0x19E8 intval[122]
U123 = "u123" # 0x19EC intval[123]
U124 = "u124" # 0x19F0 intval[124]
U125 = "u125" # 0x19F4 intval[125]
U126 = "u126" # 0x19F8 intval[126]
U127 = "u127" # 0x19FC intval[127]
U128 = "u128" # 0x1A00 intval[128]
U129 = "u129" # 0x1A04 intval[129]
U130 = "u130" # 0x1A08 intval[130]
U131 = "u131" # 0x1A0C intval[131]
U132 = "u132" # 0x1A10 intval[132]
U133 = "u133" # 0x1A14 intval[133]
U134 = "u134" # 0x1A18 intval[134]
U135 = "u135" # 0x1A1C intval[135]
U136 = "u136" # 0x1A20 intval[136]
U137 = "u137" # 0x1A24 intval[137]
U138 = "u138" # 0x1A28 intval[138]
U139 = "u139" # 0x1A2C intval[139]
U140 = "u140" # 0x1A30 intval[140]
U141 = "u141" # 0x1A34 intval[141]
U142 = "u142" # 0x1A38 intval[142]
U143 = "u143" # 0x1A3C intval[143]
U144 = "u144" # 0x1A40 intval[144]
U145 = "u145" # 0x1A44 intval[145]
U146 = "u146" # 0x1A48 intval[146]
U147 = "u147" # 0x1A4C intval[147]
U148 = "u148" # 0x1A50 intval[148]
U149 = "u149" # 0x1A54 intval[149]
U150 = "u150" # 0x1A58 intval[150]
U151 = "u151" # 0x1A5C intval[151]
U152 = "u152" # 0x1A60 intval[152]
U153 = "u153" # 0x1A64 intval[153]
U154 = "u154" # 0x1A68 intval[154]
U155 = "u155" # 0x1A6C intval[155]
U156 = "u156" # 0x1A70 intval[156]
U157 = "u157" # 0x1A74 intval[157]
U158 = "u158" # 0x1A78 intval[158]
U159 = "u159" # 0x1A7C intval[159]
U160 = "u160" # 0x1A80 intval[160]
U161 = "u161" # 0x1A84 intval[161]
U162 = "u162" # 0x1A88 intval[162]
U163 = "u163" # 0x1A8C intval[163]
U164 = "u164" # 0x1A90 intval[164]
U165 = "u165" # 0x1A94 intval[165]
U166 = "u166" # 0x1A98 intval[166]
U167 = "u167" # 0x1A9C intval[167]
U168 = "u168" # 0x1AA0 intval[168]
U169 = "u169" # 0x1AA4 intval[169]
U170 = "u170" # 0x1AA8 intval[170]
U171 = "u171" # 0x1AAC intval[171]
U172 = "u172" # 0x1AB0 intval[172]
U173 = "u173" # 0x1AB4 intval[173]
U174 = "u174" # 0x1AB8 intval[174]
U175 = "u175" # 0x1ABC intval[175]
U176 = "u176" # 0x1AC0 intval[176]
U177 = "u177" # 0x1AC4 intval[177]
U178 = "u178" # 0x1AC8 intval[178]
U179 = "u179" # 0x1ACC intval[179]
U180 = "u180" # 0x1AD0 intval[180]
U181 = "u181" # 0x1AD4 intval[181]
U182 = "u182" # 0x1AD8 intval[182]
U183 = "u183" # 0x1ADC intval[183]
U184 = "u184" # 0x1AE0 intval[184]
U185 = "u185" # 0x1AE4 intval[185]
U186 = "u186" # 0x1AE8 intval[186]
U187 = "u187" # 0x1AEC intval[187]
U188 = "u188" # 0x1AF0 intval[188]
U189 = "u189" # 0x1AF4 intval[189]
U190 = "u190" # 0x1AF8 intval[190]
U191 = "u191" # 0x1AFC intval[191]
U192 = "u192" # 0x1B00 intval[192]
U193 = "u193" # 0x1B04 intval[193]
U194 = "u194" # 0x1B08 intval[194]
U195 = "u195" # 0x1B0C intval[195]
U196 = "u196" # 0x1B10 intval[196]
U197 = "u197" # 0x1B14 intval[197]
U198 = "u198" # 0x1B18 intval[198]
U199 = "u199" # 0x1B1C intval[199]
U200 = "u200" # 0x1B20 intval[200]
U201 = "u201" # 0x1B24 intval[201]
U202 = "u202" # 0x1B28 intval[202]
U203 = "u203" # 0x1B2C intval[203]
U204 = "u204" # 0x1B30 intval[204]
U205 = "u205" # 0x1B34 intval[205]
U206 = "u206" # 0x1B38 intval[206]
U207 = "u207" # 0x1B3C intval[207]
U208 = "u208" # 0x1B40 intval[208]
U209 = "u209" # 0x1B44 intval[209]
U210 = "u210" # 0x1B48 intval[210]
U211 = "u211" # 0x1B4C intval[211]
U212 = "u212" # 0x1B50 intval[212]
U213 = "u213" # 0x1B54 intval[213]
U214 = "u214" # 0x1B58 intval[214]
U215 = "u215" # 0x1B5C intval[215]
U216 = "u216" # 0x1B60 intval[216]
U217 = "u217" # 0x1B64 intval[217]
U218 = "u218" # 0x1B68 intval[218]
U219 = "u219" # 0x1B6C intval[219]
U220 = "u220" # 0x1B70 intval[220]
U221 = "u221" # 0x1B74 intval[221]
U222 = "u222" # 0x1B78 intval[222]
U223 = "u223" # 0x1B7C intval[223]
U224 = "u224" # 0x1B80 intval[224]
U225 = "u225" # 0x1B84 intval[225]
U226 = "u226" # 0x1B88 intval[226]
U227 = "u227" # 0x1B8C intval[227]
U228 = "u228" # 0x1B90 intval[228]
U229 = "u229" # 0x1B94 intval[229]
U230 = "u230" # 0x1B98 intval[230]
U231 = "u231" # 0x1B9C intval[231]
U232 = "u232" # 0x1BA0 intval[232]
U233 = "u233" # 0x1BA4 intval[233]
U234 = "u234" # 0x1BA8 intval[234]
U235 = "u235" # 0x1BAC intval[235]
U236 = "u236" # 0x1BB0 intval[236]
U237 = "u237" # 0x1BB4 intval[237]
U238 = "u238" # 0x1BB8 intval[238]
U239 = "u239" # 0x1BBC intval[239]
U240 = "u240" # 0x1BC0 intval[240]
U241 = "u241" # 0x1BC4 intval[241]
U242 = "u242" # 0x1BC8 intval[242]
U243 = "u243" # 0x1BCC intval[243]
U244 = "u244" # 0x1BD0 intval[244]
U245 = "u245" # 0x1BD4 intval[245]
U246 = "u246" # 0x1BD8 intval[246]
U247 = "u247" # 0x1BDC intval[247]
U248 = "u248" # 0x1BE0 intval[248]
U249 = "u249" # 0x1BE4 intval[249]
U250 = "u250" # 0x1BE8 intval[250]
U251 = "u251" # 0x1BEC intval[251]
U252 = "u252" # 0x1BF0 intval[252]
U253 = "u253" # 0x1BF4 intval[253]
U254 = "u254" # 0x1BF8 intval[254]
U255 = "u255" # 0x1BFC intval[255]
M300 = "m300" # 0x1C00 floatval[0]
M301 = "m301" # 0x1C08 floatval[1]
M302 = "m302" # 0x1C10 floatval[2]
M303 = "m303" # 0x1C18 floatval[3]
M304 = "m304" # 0x1C20 floatval[4]
M305 = "m305" # 0x1C28 floatval[5]
M306 = "m306" # 0x1C30 floatval[6]
M307 = "m307" # 0x1C38 floatval[7]
M308 = "m308" # 0x1C40 floatval[8]
M309 = "m309" # 0x1C48 floatval[9]
M310 = "m310" # 0x1C50 floatval[10]
M311 = "m311" # 0x1C58 floatval[11]
M312 = "m312" # 0x1C60 floatval[12]
M313 = "m313" # 0x1C68 floatval[13]
M314 = "m314" # 0x1C70 floatval[14]
M315 = "m315" # 0x1C78 floatval[15]
M316 = "m316" # 0x1C80 floatval[16]
M317 = "m317" # 0x1C88 floatval[17]
M318 = "m318" # 0x1C90 floatval[18]
M319 = "m319" # 0x1C98 floatval[19]
M320 = "m320" # 0x1CA0 floatval[20]
M321 = "m321" # 0x1CA8 floatval[21]
M322 = "m322" # 0x1CB0 floatval[22]
M323 = "m323" # 0x1CB8 floatval[23]
M324 = "m324" # 0x1CC0 floatval[24]
M325 = "m325" # 0x1CC8 floatval[25]
M326 = "m326" # 0x1CD0 floatval[26]
M327 = "m327" # 0x1CD8 floatval[27]
M328 = "m328" # 0x1CE0 floatval[28]
M329 = "m329" # 0x1CE8 floatval[29]
M330 = "m330" # 0x1CF0 floatval[30]
M331 = "m331" # 0x1CF8 floatval[31]
M332 = "m332" # 0x1D00 floatval[32]
M333 = "m333" # 0x1D08 floatval[33]
M334 = "m334" # 0x1D10 floatval[34]
M335 = "m335" # 0x1D18 floatval[35]
M336 = "m336" # 0x1D20 floatval[36]
M337 = "m337" # 0x1D28 floatval[37]
M338 = "m338" # 0x1D30 floatval[38]
M339 = "m339" # 0x1D38 floatval[39]
M340 = "m340" # 0x1D40 floatval[40]
M341 = "m341" # 0x1D48 floatval[41]
M342 = "m342" # 0x1D50 floatval[42]
M343 = "m343" # 0x1D58 floatval[43]
M344 = "m344" # 0x1D60 floatval[44]
M345 = "m345" # 0x1D68 floatval[45]
M346 = "m346" # 0x1D70 floatval[46]
M347 = "m347" # 0x1D78 floatval[47]
M348 = "m348" # 0x1D80 floatval[48]
M349 = "m349" # 0x1D88 floatval[49]
M350 = "m350" # 0x1D90 floatval[50]
M351 = "m351" # 0x1D98 floatval[51]
M352 = "m352" # 0x1DA0 floatval[52]
M353 = "m353" # 0x1DA8 floatval[53]
M354 = "m354" # 0x1DB0 floatval[54]
M355 = "m355" # 0x1DB8 floatval[55]
M356 = "m356" # 0x1DC0 floatval[56]
M357 = "m357" # 0x1DC8 floatval[57]
M358 = "m358" # 0x1DD0 floatval[58]
M359 = "m359" # 0x1DD8 floatval[59]
M360 = "m360" # 0x1DE0 floatval[60]
M361 = "m361" # 0x1DE8 floatval[61]
M362 = "m362" # 0x1DF0 floatval[62]
M363 = "m363" # 0x1DF8 floatval[63]
M364 = "m364" # 0x1E00 floatval[64]
M365 = "m365" # 0x1E08 floatval[65]
M366 = "m366" # 0x1E10 floatval[66]
M367 = "m367" # 0x1E18 floatval[67]
M368 = "m368" # 0x1E20 floatval[68]
M369 = "m369" # 0x1E28 floatval[69]
M370 = "m370" # 0x1E30 floatval[70]
M371 = "m371" # 0x1E38 floatval[71]
M372 = "m372" # 0x1E40 floatval[72]
M373 = "m373" # 0x1E48 floatval[73]
M374 = "m374" # 0x1E50 floatval[74]
M375 = "m375" # 0x1E58 floatval[75]
M376 = "m376" # 0x1E60 floatval[76]
M377 = "m377" # 0x1E68 floatval[77]
M378 = "m378" # 0x1E70 floatval[78]
M379 = "m379" # 0x1E78 floatval[79]
M380 = "m380" # 0x1E80 floatval[80]
M381 = "m381" # 0x1E88 floatval[81]
M382 = "m382" # 0x1E90 floatval[82]
M383 = "m383" # 0x1E98 floatval[83]
M384 = "m384" # 0x1EA0 floatval[84]
M385 = "m385" # 0x1EA8 floatval[85]
M386 = "m386" # 0x1EB0 floatval[86]
M387 = "m387" # 0x1EB8 floatval[87]
M388 = "m388" # 0x1EC0 floatval[88]
M389 = "m389" # 0x1EC8 floatval[89]
M390 = "m390" # 0x1ED0 floatval[90]
M391 = "m391" # 0x1ED8 floatval[91]
M392 = "m392" # 0x1EE0 floatval[92]
M393 = "m393" # 0x1EE8 floatval[93]
M394 = "m394" # 0x1EF0 floatval[94]
M395 = "m395" # 0x1EF8 floatval[95]
M396 = "m396" # 0x1F00 floatval[96]
M397 = "m397" # 0x1F08 floatval[97]
M398 = "m398" # 0x1F10 floatval[98]
M399 = "m399" # 0x1F18 floatval[99]
M400 = "m400" # 0x1F20 floatval[100]
M401 = "m401" # 0x1F28 floatval[101]
M402 = "m402" # 0x1F30 floatval[102]
M403 = "m403" # 0x1F38 floatval[103]
M404 = "m404" # 0x1F40 floatval[104]
M405 = "m405" # 0x1F48 floatval[105]
M406 = "m406" # 0x1F50 floatval[106]
M407 = "m407" # 0x1F58 floatval[107]
M408 = "m408" # 0x1F60 floatval[108]
M409 = "m409" # 0x1F68 floatval[109]
M410 = "m410" # 0x1F70 floatval[110]
M411 = "m411" # 0x1F78 floatval[111]
M412 = "m412" # 0x1F80 floatval[112]
M413 = "m413" # 0x1F88 floatval[113]
M414 = "m414" # 0x1F90 floatval[114]
M415 = "m415" # 0x1F98 floatval[115]
M416 = "m416" # 0x1FA0 floatval[116]
M417 = "m417" # 0x1FA8 floatval[117]
M418 = "m418" # 0x1FB0 floatval[118]
M419 = "m419" # 0x1FB8 floatval[119]
M420 = "m420" # 0x1FC0 floatval[120]
M421 = "m421" # 0x1FC8 floatval[121]
M422 = "m422" # 0x1FD0 floatval[122]
M423 = "m423" # 0x1FD8 floatval[123]
M424 = "m424" # 0x1FE0 floatval[124]
M425 = "m425" # 0x1FE8 floatval[125]
M426 = "m426" # 0x1FF0 floatval[126]
M427 = "m427" # 0x1FF8 floatval[127]
M428 = "m428" # 0x2000 floatval[128]
M429 = "m429" # 0x2008 floatval[129]
M430 = "m430" # 0x2010 floatval[130]
M431 = "m431" # 0x2018 floatval[131]
M432 = "m432" # 0x2020 floatval[132]
M433 = "m433" # 0x2028 floatval[133]
M434 = "m434" # 0x2030 floatval[134]
M435 = "m435" # 0x2038 floatval[135]
M436 = "m436" # 0x2040 floatval[136]
M437 = "m437" # 0x2048 floatval[137]
M438 = "m438" # 0x2050 floatval[138]
M439 = "m439" # 0x2058 floatval[139]
M440 = "m440" # 0x2060 floatval[140]
M441 = "m441" # 0x2068 floatval[141]
M442 = "m442" # 0x2070 floatval[142]
M443 = "m443" # 0x2078 floatval[143]
M444 = "m444" # 0x2080 floatval[144]
M445 = "m445" # 0x2088 floatval[145]
M446 = "m446" # 0x2090 floatval[146]
M447 = "m447" # 0x2098 floatval[147]
M448 = "m448" # 0x20A0 floatval[148]
M449 = "m449" # 0x20A8 floatval[149]
M450 = "m450" # 0x20B0 floatval[150]
M451 = "m451" # 0x20B8 floatval[151]
M452 = "m452" # 0x20C0 floatval[152]
M453 = "m453" # 0x20C8 floatval[153]
M454 = "m454" # 0x20D0 floatval[154]
M455 = "m455" # 0x20D8 floatval[155]
M456 = "m456" # 0x20E0 floatval[156]
M457 = "m457" # 0x20E8 floatval[157]
M458 = "m458" # 0x20F0 floatval[158]
M459 = "m459" # 0x20F8 floatval[159]
M460 = "m460" # 0x2100 floatval[160]
M461 = "m461" # 0x2108 floatval[161]
M462 = "m462" # 0x2110 floatval[162]
M463 = "m463" # 0x2118 floatval[163]
M464 = "m464" # 0x2120 floatval[164]
M465 = "m465" # 0x2128 floatval[165]
M466 = "m466" # 0x2130 floatval[166]
M467 = "m467" # 0x2138 floatval[167]
M468 = "m468" # 0x2140 floatval[168]
M469 = "m469" # 0x2148 floatval[169]
M470 = "m470" # 0x2150 floatval[170]
M471 = "m471" # 0x2158 floatval[171]
M472 = "m472" # 0x2160 floatval[172]
M473 = "m473" # 0x2168 floatval[173]
M474 = "m474" # 0x2170 floatval[174]
M475 = "m475" # 0x2178 floatval[175]
M476 = "m476" # 0x2180 floatval[176]
M477 = "m477" # 0x2188 floatval[177]
M478 = "m478" # 0x2190 floatval[178]
M479 = "m479" # 0x2198 floatval[179]
M480 = "m480" # 0x21A0 floatval[180]
M481 = "m481" # 0x21A8 floatval[181]
M482 = "m482" # 0x21B0 floatval[182]
M483 = "m483" # 0x21B8 floatval[183]
M484 = "m484" # 0x21C0 floatval[184
M485 = "m485" # 0x21C8 floatval[185]
M486 = "m486" # 0x21D0 floatval[186]
M487 = "m487" # 0x21D8 floatval[187]
M488 = "m488" # 0x21E0 floatval[188]
M489 = "m489" # 0x21E8 floatval[189]
M490 = "m490" # 0x21F0 floatval[190]
M491 = "m491" # 0x21F8 floatval[191]
M492 = "m492" # 0x2200 floatval[192]
M493 = "m493" # 0x2208 floatval[193]
M494 = "m494" # 0x2210 floatval[194]
M495 = "m495" # 0x2218 floatval[195]
M496 = "m496" # 0x2220 floatval[196]
M497 = "m497" # 0x2228 floatval[197]
M498 = "m498" # 0x2230 floatval[198]
M499 = "m499" # 0x2238 floatval[199]
M500 = "m500" # 0x2240 floatval[200]
M501 = "m501" # 0x2248 floatval[201]
M502 = "m502" # 0x2250 floatval[202]
M503 = "m503" # 0x2258 floatval[203]
M504 = "m504" # 0x2260 floatval[204]
M505 = "m505" # 0x2268 floatval[205]
M506 = "m506" # 0x2270 floatval[206]
M507 = "m507" # 0x2278 floatval[207]
M508 = "m508" # 0x2280 floatval[208]
M509 = "m509" # 0x2288 floatval[209]
M510 = "m510" # 0x2290 floatval[210]
M511 = "m511" # 0x2298 floatval[211]
M512 = "m512" # 0x22A0 floatval[212]
M513 = "m513" # 0x22A8 floatval[213]
M514 = "m514" # 0x22B0 floatval[214]
M515 = "m515" # 0x22B8 floatval[215]
M516 = "m516" # 0x22C0 floatval[216]
M517 = "m517" # 0x22C8 floatval[217]
M518 = "m518" # 0x22D0 floatval[218]
M519 = "m519" # 0x22D8 floatval[219]
M520 = "m520" # 0x22E0 floatval[220]
M521 = "m521" # 0x22E8 floatval[221]
M522 = "m522" # 0x22F0 floatval[222]
M523 = "m523" # 0x22F8 floatval[223]
M524 = "m524" # 0x2300 floatval[224]
M525 = "m525" # 0x2308 floatval[225]
M526 = "m526" # 0x2310 floatval[226]
M527 = "m527" # 0x2318 floatval[227]
M528 = "m528" # 0x2320 floatval[228]
M529 = "m529" # 0x2328 floatval[229]
M530 = "m530" # 0x2330 floatval[230]
M531 = "m531" # 0x2338 floatval[231]
M532 = "m532" # 0x2340 floatval[232]
M533 = "m533" # 0x2348 floatval[233]
M534 = "m534" # 0x2350 floatval[234]
M535 = "m535" # 0x2358 floatval[235]
M536 = "m536" # 0x2360 floatval[236]
M537 = "m537" # 0x2368 floatval[237]
M538 = "m538" # 0x2370 floatval[238]
M539 = "m539" # 0x2378 floatval[239]
M540 = "m540" # 0x2380 floatval[240]
M541 = "m541" # 0x2388 floatval[241]
M542 = "m542" # 0x2390 floatval[242]
M543 = "m543" # 0x2398 floatval[243]
M544 = "m544" # 0x23A0 floatval[244]
M545 = "m545" # 0x23A8 floatval[245]
M546 = "m546" # 0x23B0 floatval[246]
M547 = "m547" # 0x23B8 floatval[247]
M548 = "m548" # 0x23C0 floatval[248]
M549 = "m549" # 0x23C8 floatval[249]
M550 = "m550" # 0x23D0 floatval[250]
M551 = "m551" # 0x23D8 floatval[251]
M552 = "m552" # 0x23E0 floatval[252]
M553 = "m553" # 0x23E8 floatval[253]
M554 = "m554" # 0x23F0 floatval[254]
M555 = "m555" # 0x23F8 floatval[255]
# Controller state information
C000 = "c000" # 0x2800 errcode
C001 = "c001" # 0x2802 bTeachMode
C002 = "c002" # 0x2804 bSPILargeFrame
# Robot configuration information
G000 = "g000" # 0x2C00 manip_type[0-35]
G001 = "g001" # 0x2C24 manip_serial[0-35]
G002 = "g002" # 0x2C48 format_version[0-2]
G003 = "g003" # 0x2C54 parameter_version[0-2]
# Robot status information
R000 = "r000" # 0x3000 cmdx,cmdy,cmdz,cmdrz,cmdry,cmdrx
R100 = "r100" # 0x3040 posture
R101 = "r101" # 0x3044 coordinate
R102 = "r102" # 0x3048 singular
R103 = "r103" # 0x304C multiturn
R200 = "r200" # 0x3050 joint[0-5]
R300 = "r300" # 0x3090 velocity
R301 = "r301" # 0x3098 vel_error_axes
R302 = "r302" # 0x309C softlimit
R303 = "r303" # 0x30A0 joint_svon_to_svoff[0-5]
R304 = "r304" # 0x30E0 b_saved
R305 = "r305" # 0x30E4 toolno
R306 = "r306" # 0x30E8 hdorgx,hdorgy,hdorgz,hdorgrz,hdorgry,hdorgrx
R400 = "r400" # 0x3128 carte_svon_to_svoff[0-5]
R401 = "r401" # 0x3168 svon_to_svoff_posture
R402 = "r402" # 0x316C svon_to_svoff_coordinate
R403 = "r403" # 0x3170 svon_to_svoff_singular
R404 = "r404" # 0x3174 svon_to_svoff_multiturn
R405 = "r405" # 0x3178 svon_to_svoff_toolno
R406 = "r406" # 0x317C bRequestHold
R407 = "r407" # 0x317E bRequestSuspend
R408 = "r408" # 0x3180 bSuspended
R409 = "r409" # 0x3184 permitted_worker_id
R410 = "r410" # 0x3188 tool_org_params[0-5]
R411 = "r411" # 0x31B8 tool_fwdmatrix[0-11]
R412 = "r412" # 0x3218 last_hold_factor
R413 = "r413" # 0x3219 vdesc0_sts
R414 = "r414" # 0x321A vdesc1_sts
R415 = "r415" # 0x321B n_queued
R416 = "r416" # 0x321C logical_cmd_pulse[0-5]
R417 = "r417" # 0x323C logical_fb_pulse[0-5]
R418 = "r418" # 0x325C holdinfo
R419 = "r419" # 0x3260 svsts
R419 = "r419" # 0x3264 manip_pwr
R420 = "r420" # 0x3266 ems
R421 = "r421" # 0x3268 vdesc0_mvid
R422 = "r422" # 0x326C vdesc1_mvid
if __name__ == '__main__':
c = InfoCatchClient()
def callback(data):
print(data)
c.connect([InfoCatchClient.Label.I000,
InfoCatchClient.Label.R200,
InfoCatchClient.Label.M000,
InfoCatchClient.Label.M001,
InfoCatchClient.Label.M100,
InfoCatchClient.Label.M102,
InfoCatchClient.Label.F000,
InfoCatchClient.Label.F200,
InfoCatchClient.Label.F300,
])
for x in range(10):
data = c.recv()
print(data)
c.close()
| 41.876254 | 99 | 0.554109 | 35,275 | 0.939089 | 0 | 0 | 0 | 0 | 0 | 0 | 21,610 | 0.5753 |
52e372e0f9b035b427af0f50f47141dc81bfbd38 | 2,734 | py | Python | src/models/spikeconvnet.py | k-timy/snn_pytorch | db13b68dd9dd92d624a60ad6af9776e06ebfbed3 | [
"MIT"
] | null | null | null | src/models/spikeconvnet.py | k-timy/snn_pytorch | db13b68dd9dd92d624a60ad6af9776e06ebfbed3 | [
"MIT"
] | null | null | null | src/models/spikeconvnet.py | k-timy/snn_pytorch | db13b68dd9dd92d624a60ad6af9776e06ebfbed3 | [
"MIT"
] | null | null | null | """
Author: Kourosh T. Baghaei
April 2021
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from .convnet import ConvNet
from .spikingconv2D import SpikingConv2D
from .spikingpool2D import SpikingAveragePool2D
from .spikinglinear import SpikingLinear
class SpikeConv(nn.Module):
def __init__(self, conv_net : ConvNet = None, device = None, dt=0.001, ref_period=0.0, threshold=1, all_sim_steps=False):
super(SpikeConv, self).__init__()
self.conv1 = SpikingConv2D(conv2d_nn=conv_net.conv1,device=device)
self.conv2 = SpikingConv2D(conv2d_nn=conv_net.conv2,device=device)
self.device = device
self.avg_pool1 = SpikingAveragePool2D(device=device)
self.avg_pool2 = SpikingAveragePool2D(device=device)
self.lin1 = SpikingLinear(conv_net.fc1, device=device)
self.lin2 = SpikingLinear(conv_net.fc2, device=device)
# if set true, the output shape would be: (simulation_steps, batch_size)
# otherwise: (batch_size,)
self.all_sim_steps = all_sim_steps
def reset_layers(self):
self.conv1.reset_layer()
self.conv2.reset_layer()
self.avg_pool1.reset_layer()
self.avg_pool2.reset_layer()
self.lin1.reset_layer()
self.lin2.reset_layer()
def forward(self, x): # FCN
"""
Shape of X : (batch_size, d1,d2,...,dn, simulation_epochs)
Shape of Output: (batch_size,last_layer_out,simulation_epochs)
"""
output_all_time = None
if self.all_sim_steps:
output_all_time = torch.zeros((x.shape[0],x.shape[-1]),requires_grad=False).to(self.device)
sim_steps = x.shape[-1]
for si in range(sim_steps):
tmp = x[...,si]
xi = self.conv1(tmp)
xi = self.avg_pool1(xi)
xi = self.conv2(xi)
xi = self.avg_pool2(xi)
xi = torch.flatten(xi, start_dim=1)
xi = self.lin1(xi)
xi = self.lin2(xi)
if self.all_sim_steps:
sqz = torch.squeeze(self.lin2.sum_spikes)
output_all_time[...,si] = torch.argmax(sqz, dim=1)
output = None
if self.all_sim_steps:
# Change the shape to: (sim_step , batch)
# so that it could be compared against targets with shape: (batch,)
output = output_all_time.transpose(0,1)
else:
sqz = torch.squeeze(self.lin2.sum_spikes)
output = torch.argmax(sqz,dim=1)
# reset layers to start simulation from zero state on the next batch
self.reset_layers()
return output | 37.972222 | 125 | 0.608266 | 2,460 | 0.899781 | 0 | 0 | 0 | 0 | 0 | 0 | 501 | 0.183248 |
52e3bcaa661af90f19ebadd86534e44f67f824f9 | 1,483 | py | Python | test/test_linked_list.py | kessler-oliveira/linkedlist | 17034b1ce9f72149db4e9b465df67de0a08f5d32 | [
"Apache-2.0"
] | null | null | null | test/test_linked_list.py | kessler-oliveira/linkedlist | 17034b1ce9f72149db4e9b465df67de0a08f5d32 | [
"Apache-2.0"
] | null | null | null | test/test_linked_list.py | kessler-oliveira/linkedlist | 17034b1ce9f72149db4e9b465df67de0a08f5d32 | [
"Apache-2.0"
] | null | null | null | import unittest
from random import randint
from model.node import Node
from model.linked_list import LinkedList
SIZE = 5
class TestLinkedList(unittest.TestCase):
def test_copy(self):
nodes = []
for i in range(SIZE):
nodes.append(Node(i))
if i:
nodes[i - 1].next = nodes[i]
for i in range(SIZE):
number = randint(0, SIZE)
if number < SIZE:
nodes[i].random = nodes[number]
if nodes:
linked_list = LinkedList(nodes[0])
linked_list_copy = linked_list.copy()
loop = linked_list.head
loop_copy = linked_list_copy.head
while loop:
self.assertEqual(loop.data, loop_copy.data)
if loop.next:
self.assertEqual(loop.next.data, loop_copy.next.data)
if loop.random:
self.assertEqual(loop.random.data, loop_copy.random.data)
loop = loop.next
loop_copy = loop_copy.next
print('----------------------------------------')
print('Original')
print('----------------------------------------')
print(linked_list)
print('----------------------------------------')
print('Copia')
print('----------------------------------------')
print(linked_list_copy)
if __name__ == '__main__':
unittest.main()
| 27.462963 | 77 | 0.466622 | 1,310 | 0.883345 | 0 | 0 | 0 | 0 | 0 | 0 | 195 | 0.13149 |
52e437d3e14d1999a9a643451767e7bea8ba79b9 | 3,141 | py | Python | res/pygments/ptx.py | wilwxk/GPUCompiler.jl | 03006840df21fb452f9eebc64c4d0a71123ed3b2 | [
"MIT"
] | 68 | 2020-04-06T14:02:31.000Z | 2022-03-11T02:00:57.000Z | res/pygments/ptx.py | wilwxk/GPUCompiler.jl | 03006840df21fb452f9eebc64c4d0a71123ed3b2 | [
"MIT"
] | 243 | 2020-04-09T19:31:49.000Z | 2022-03-16T15:48:21.000Z | res/pygments/ptx.py | wilwxk/GPUCompiler.jl | 03006840df21fb452f9eebc64c4d0a71123ed3b2 | [
"MIT"
] | 31 | 2020-04-09T14:02:05.000Z | 2022-03-11T02:01:03.000Z | from pygments.lexer import RegexLexer, include, words
from pygments.token import *
# https://docs.nvidia.com/cuda/parallel-thread-execution/index.html
class CustomLexer(RegexLexer):
string = r'"[^"]*?"'
followsym = r'[a-zA-Z0-9_$]*'
identifier = r'(?:[a-zA-Z]' + followsym + r'| [_$%]' + followsym + r')'
tokens = {
'root': [
include('whitespace'),
(r'%' + identifier, Name.Variable),
include('definition'),
include('statement'),
include('type'),
(identifier, Name.Variable),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[LlUu]*', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'0x[0-9a-fA-F]+[LlUu]*', Number.Hex),
(r'0[0-7]+[LlUu]*', Number.Oct),
(r'\b\d+[LlUu]*\b', Number.Integer),
(r'[&|^+*/%=~-]', Operator),
(r'[()\[\]\{\},.;<>@]', Punctuation),
],
'whitespace': [
(r'(\n|\s)+', Text),
(r'/\*.*?\*/', Comment.Multiline),
(r'//.*?\n', Comment.Single),
],
'definition': [
(words(('func', 'reg'), prefix=r'\.', suffix=r'\b'), Keyword.Reserved),
(r'^' + identifier + r':', Name.Label),
],
'statement': [
# directive
(words((
'address_size', 'file', 'minnctapersm', 'target', 'align', 'func', 'param',
'tex', 'branchtarget', 'global', 'pragma', 'version', 'callprototype',
'loc', 'reg', 'visible', 'calltargets', 'local', 'reqntid', 'weak', 'const',
'maxnctapersm', 'section', 'entry', 'maxnreg', 'shared', 'extern',
'maxntid', 'sreg', ), prefix=r'\.', suffix=r'\b'), Keyword),
# instruction
(words((
'abs', 'div', 'or', 'sin', 'add', 'ex2', 'pmevent', 'slct', 'vmad', 'addc',
'exit', 'popc', 'sqrt', 'vmax', 'and', 'fma', 'prefetch', 'st', 'atom',
'isspacep', 'prefetchu', 'sub', 'vmin', 'bar', 'ld', 'prmt', 'subc', 'bfe',
'ldu', 'rcp', 'suld', 'vote', 'bfi', 'lg2', 'red', 'suq', 'vset', 'bfind',
'mad', 'rem', 'sured', 'bret', 'sust', 'vshl', 'brev', 'madc', 'rsqrt',
'testp', 'vshr', 'brkpt', 'max', 'sad', 'tex', 'vsub', 'call', 'membar',
'selp', 'tld4', 'clz', 'min', 'set', 'trap', 'xor', 'cnot', 'mov', 'setp',
'txq', 'copysign', 'mul', 'shf', 'vabsdiff', 'cos', 'shfl', 'cvta', 'not',
'shr', 'cvt', 'neg', 'shl', 'vadd'), prefix=r'\b', suffix=r'[\.\w]+\b'), Keyword),
(words((
'vavrg', 'vmax', 'vmin', 'vset', 'mad', 'vsub', 'mul', 'vabsdiff',
'vadd'), prefix=r'\b', suffix=r'[24]\b'), Keyword),
],
'type': [
(words((
's8', 's16', 's32', 's64',
'u8', 'u16', 'u32', 'u64',
'f16', 'f16x2', 'f32', 'f64',
'b8', 'b16', 'b32', 'b64',
'pred'), prefix=r'\.', suffix=r'\b'), Keyword.Type),
],
}
| 43.027397 | 98 | 0.418338 | 2,987 | 0.950971 | 0 | 0 | 0 | 0 | 0 | 0 | 1,384 | 0.440624 |
52e56046055c96a92245aac6c622f673ad51945f | 1,962 | py | Python | tests/test_dork_erd.py | msudenvercs/Dork | 601cbb21398ac8edf5e688db2089ed8805bbdf00 | [
"MIT"
] | 1 | 2021-04-04T20:40:18.000Z | 2021-04-04T20:40:18.000Z | tests/test_dork_erd.py | msudenvercs/Dork | 601cbb21398ac8edf5e688db2089ed8805bbdf00 | [
"MIT"
] | 4 | 2019-06-04T00:59:46.000Z | 2019-06-08T17:22:19.000Z | tests/test_dork_erd.py | zenostrash/dork | 601cbb21398ac8edf5e688db2089ed8805bbdf00 | [
"MIT"
] | 4 | 2019-05-29T04:56:28.000Z | 2019-05-30T18:17:55.000Z | # -*- coding: utf-8 -*-
"""Basic tests for state and entity relationships in dork
"""
import dork.types
from tests.utils import has_many, is_a
def test_items_exist():
"""the dork module should define an Item
"""
assert "Item" in vars(dork.types)
is_a(dork.types.Item, type)
def test_holders_exist():
"""the dork module should define an Holder
"""
assert "Holder" in vars(dork.types)
is_a(dork.types.Holder, type)
def test_players_exist():
"""the dork module should define an Player
"""
assert "Player" in vars(dork.types)
is_a(dork.types.Player, type)
def test_rooms_exist():
"""the dork module should define an Room
"""
assert "Room" in vars(dork.types)
is_a(dork.types.Room, type)
def test_path_exists():
"""the dork module should define an Path
"""
assert "Path" in vars(dork.types)
is_a(dork.types.Path, type)
def test_map_exists():
"""the dork module should define an Map
"""
assert "Map" in vars(dork.types)
is_a(dork.types.Map, type)
def test_holder_has_many_items():
"""A Holder should have many Items
"""
has_many(dork.types.Holder, "holder", dork.types.Item, "items")
def test_player_is_a_holder(player):
"""A Player should be a Holder
"""
is_a(player, dork.types.Holder)
def test_room_is_a_holder(room):
"""A Room should be a Holder
"""
is_a(room, dork.types.Holder)
def test_room_has_many_players():
"""A Room should have many players
"""
has_many(dork.types.Room, "room", dork.types.Player, "players")
def test_room_has_many_paths():
"""A Room should have many Paths through exits and entrances.
"""
has_many(dork.types.Room, "entrance", dork.types.Path, "entrances")
has_many(dork.types.Room, "exit", dork.types.Path, "exits")
def test_map_has_many_rooms():
"""A Map should have many Rooms
"""
has_many(dork.types.Map, "map", dork.types.Room, "rooms")
| 23.082353 | 71 | 0.664628 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 756 | 0.385321 |
52e589ec26ec934cedbbb77cf5a8fda7e7d9cd96 | 1,115 | py | Python | woffle/data/parse.py | Finnkauski/woffle | 746ceb22ef03232f7963db6f7fb2f95fe0164a07 | [
"MIT"
] | null | null | null | woffle/data/parse.py | Finnkauski/woffle | 746ceb22ef03232f7963db6f7fb2f95fe0164a07 | [
"MIT"
] | null | null | null | woffle/data/parse.py | Finnkauski/woffle | 746ceb22ef03232f7963db6f7fb2f95fe0164a07 | [
"MIT"
] | null | null | null | """
text cleaning
"""
#-- Imports ---------------------------------------------------------------------
# base
import functools
import re
# third party
import toml
# project
from woffle.functions.compose import compose
#-- Definitions -----------------------------------------------------------------
#-- cleaning
#NOTE: all functions are endomorphic String -> String so their composition does
# not need to be tested and they can be composed in any order
# read the config files for the operations
with open('etc/regex') as f:
replace = toml.load(f)
with open('etc/encoding') as f:
encode = toml.load(f)
def regexes(r : dict, x : str) -> str:
return compose(*[functools.partial(re.sub, i, j) for i,j in r.items()])(x)
replacements = functools.partial(regexes, replace)
encoding = functools.partial(regexes, encode)
def unlines(x : str) -> str:
return x.replace('\n', '')
# Composition -----------------------------------------------------------------
parse = compose( encoding
, replacements
, unlines
, str.strip
)
| 24.23913 | 81 | 0.528251 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 520 | 0.466368 |
52e5f2e5b814ac4ae07480be88792003d295e02d | 452 | py | Python | tpRigToolkit/libs/ziva/core/utils.py | tpRigToolkit/tpRigToolkit-libs-ziva | dd317b7ec62ee4ef01f2d64e9dbca54061c24072 | [
"MIT"
] | null | null | null | tpRigToolkit/libs/ziva/core/utils.py | tpRigToolkit/tpRigToolkit-libs-ziva | dd317b7ec62ee4ef01f2d64e9dbca54061c24072 | [
"MIT"
] | null | null | null | tpRigToolkit/libs/ziva/core/utils.py | tpRigToolkit/tpRigToolkit-libs-ziva | dd317b7ec62ee4ef01f2d64e9dbca54061c24072 | [
"MIT"
] | null | null | null | #! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Module that contains utils functions related with Ziva plugin
"""
from __future__ import print_function, division, absolute_import
from tpDcc import dcc
PLUGIN_NAME = 'ziva'
def load_ziva_plugin():
if not is_ziva_plugin_loaded():
dcc.load_plugin(PLUGIN_NAME, quiet=True)
return is_ziva_plugin_loaded()
def is_ziva_plugin_loaded():
return dcc.is_plugin_loaded(PLUGIN_NAME)
| 18.833333 | 64 | 0.738938 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 120 | 0.265487 |
52e701ca2bd2e9c2118744e9eca7070d4919aed3 | 2,737 | py | Python | algolearn/coderunner.py | gda2048/algo_deep | 8076028b14276d653896b045f60d7c41a404a030 | [
"MIT"
] | 8 | 2019-06-08T14:50:21.000Z | 2019-12-19T19:04:01.000Z | algolearn/coderunner.py | gda2048/algo_deep | 8076028b14276d653896b045f60d7c41a404a030 | [
"MIT"
] | 7 | 2020-02-12T00:28:15.000Z | 2022-02-10T09:29:22.000Z | algolearn/coderunner.py | gda2048/algo_deep | 8076028b14276d653896b045f60d7c41a404a030 | [
"MIT"
] | null | null | null | import json
import sys
import io
import time
from wrapt_timeout_decorator import *
from contextlib import redirect_stdout
class Checker:
def __init__(self, questions, answers, user_answers):
self.questions = questions['questions']
self.answers = answers["answers"]
self.user_answers = user_answers["answers"]
@property
def res(self):
rs = []
for el in range(len(self.questions)):
rs += [sum(self.check(el))]
if len(rs) == 1:
return rs[0]
return rs
def print(self):
for el in range(len(self.questions)):
print("type: ", self.questions[el]["type"])
print("task: ", self.questions[el]["task"])
print("possible answers: ", self.questions[el]["answer"])
print("user answer:\n", self.user_answers[el]['answer'], sep="")
print("right answer:", self.answers[el]['answer'])
print("This answer is counted as", self.check(el))
print()
def check(self, el):
if self.questions[el]["type"] == "checkbox":
return self.checkbox(el)
elif self.questions[el]["type"] == "radio":
return self.radio(el)
elif self.questions[el]["type"] == "text":
return self.text(el)
elif self.questions[el]["type"] == "code":
return self.code(el)
def checkbox(self, el):
return [int(self.answers[el]["score"]) if sorted(self.answers[el]["answer"]) == sorted(self.user_answers[el]["answer"]) else 0]
def radio(self, el):
return [int(self.answers[el]["score"]) if self.answers[el]["answer"] == self.user_answers[el]["answer"] else 0]
def text(self, el):
return [int(self.answers[el]["score"]) if self.user_answers[el]["answer"][0].lower().strip(' ') in self.answers[el]["answer"] else 0]
@timeout(2)
def code(self, el):
test_list = []
try:
for item in range(len(self.answers[el]["answer"])):
sys.stdin = io.StringIO(self.answers[el]["answer"][item]["input"])
f = io.StringIO()
with redirect_stdout(f):
exec(self.user_answers[el]["answer"][0])
test_list.append(int(self.answers[el]["score"]) if f.getvalue().strip() == self.answers[el]["answer"][item]["output"] else 0)
sys.stdin = sys.__stdin__
except Exception:
test_list = [-1] # "TL"
return test_list
@staticmethod
def get_questions(file):
return json.load(file)
@staticmethod
def get_answers(file):
return json.load(file)
@staticmethod
def get_user_answers(file):
return json.load(file)
| 33.378049 | 141 | 0.571794 | 2,611 | 0.953964 | 0 | 0 | 1,044 | 0.38144 | 0 | 0 | 346 | 0.126416 |
52e897895ea9d9c314ef07443d9e4d0cc03615ec | 1,225 | py | Python | scaffold/iam/cf_builder.py | mccormickmichael/laurel | 6222d8d2dea6fa18cfe3b031434154003e8a125b | [
"Unlicense"
] | 1 | 2018-08-20T13:49:46.000Z | 2018-08-20T13:49:46.000Z | scaffold/iam/cf_builder.py | mccormickmichael/laurel | 6222d8d2dea6fa18cfe3b031434154003e8a125b | [
"Unlicense"
] | null | null | null | scaffold/iam/cf_builder.py | mccormickmichael/laurel | 6222d8d2dea6fa18cfe3b031434154003e8a125b | [
"Unlicense"
] | null | null | null | from datetime import datetime
from .cf_template import IAMTemplate
from scaffold.cf.stack.builder import StackBuilder
class IAMBuilder(StackBuilder):
def __init__(self, args, session, is_update):
super(IAMBuilder, self).__init__(args.stack_name, session, is_update)
self.args = args
def get_s3_bucket(self):
return self.args.deploy_s3_bucket
def create_s3_key_prefix(self):
return '{}/iam-{}'.format(self.args.deploy_s3_key_prefix, datetime.utcnow().strftime('%Y%m%d-%H%M%S'))
def get_build_parameter_names(self):
return list(IAMTemplate.BUILD_PARM_NAMES)
def get_capabilities(self):
# The iam stack contains inline policy resources. Explicitly acknowledge it here.
return ['CAPABILITY_IAM']
def create_template(self, dependencies, build_parameters):
return IAMTemplate(
self.stack_name,
description=build_parameters.description if self.args.desc is None else self.args.desc,
s3_bucket_name=build_parameters.bucket_name if self.args.bucket is None else self.args.bucket,
logging_enabled=build_parameters.logging_enabled if self.args.enable is None else self.args.enable,
)
| 38.28125 | 111 | 0.72 | 1,103 | 0.900408 | 0 | 0 | 0 | 0 | 0 | 0 | 123 | 0.100408 |
52e9d1812195fe5ed4ea6b6573203c34b1a66a6e | 175 | py | Python | editor/scheduler/__init__.py | iamsayem/smart-editor | 012ad2775cd33247642c629a2a92ec89e4462412 | [
"MIT"
] | null | null | null | editor/scheduler/__init__.py | iamsayem/smart-editor | 012ad2775cd33247642c629a2a92ec89e4462412 | [
"MIT"
] | null | null | null | editor/scheduler/__init__.py | iamsayem/smart-editor | 012ad2775cd33247642c629a2a92ec89e4462412 | [
"MIT"
] | null | null | null | """
Created by Sayem on 14 March, 2021
All rights reserved. Copyright © 2020.
"""
from .celery import app as celery_app
__author__ = "Sayem"
__all__ = ["celery_app"]
| 19.444444 | 42 | 0.685714 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 109 | 0.619318 |
52ebf319bf4a7a4da28db62e1cc1e5ef45bb9c2a | 194,383 | py | Python | sippy/thrift_stub_dir/thrift/ttypes.py | devatsrs/neon.service | b8630f64a2c9967b680eb1db69f06a92dfca3005 | [
"MIT"
] | null | null | null | sippy/thrift_stub_dir/thrift/ttypes.py | devatsrs/neon.service | b8630f64a2c9967b680eb1db69f06a92dfca3005 | [
"MIT"
] | null | null | null | sippy/thrift_stub_dir/thrift/ttypes.py | devatsrs/neon.service | b8630f64a2c9967b680eb1db69f06a92dfca3005 | [
"MIT"
] | null | null | null | #
# Autogenerated by Thrift Compiler (1.0.0-dev)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py:new_style1
#
#from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException
from thrift.Thrift import TType, TMessageType, TException, TApplicationException
import sys
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol, TProtocol
try:
from thrift.protocol import fastbinary
except:
fastbinary = None
class CallError(object):
NO_ERROR = 0
EXTERNAL_TRANSLATOR_REJECT = 1
BODY_LESS_INVITE = 2
ACCOUNT_EXPIRED = 3
CONNECTION_CAPACITY_EXCEEDED = 4
MALFORMED_SDP = 5
UNSUPPORTED_CONTENT_TYPE = 6
UNACCEPTABLE_CODEC = 7
INVALID_AUTH_CLD_TRANS_RULE = 8
INVALID_AUTH_CLI_TRANS_RULE = 9
INVALID_ACNT_CLD_TRANS_RULE = 10
INVALID_ACNT_CLI_TRANS_RULE = 11
CANNOT_BIND_SESSION = 12
INVALID_DID_CLI_TRANS_RULE = 13
NO_RATE_FOUND = 14
CALL_LOOP_DETECTED = 15
TOO_MANY_SESSIONS = 16
ACCOUNT_IN_USE = 17
HIGH_CALL_RATE_PER_ACCOUNT = 18
HIGH_CALL_RATE = 19
INSUFFICIENT_BALANCE = 20
FORBIDDEN_DESTINATION = 21
NO_CUSTOMER_RATES = 22
LOSS_PROTECTION = 23
ADDRESS_INCOMPLETE = 24
NO_ROUTES = 25
HIGH_CALL_RATE_PER_CONNECTION = 26
INVALID_ASSRT_ID_CLI_TRANS_RULE = 27
DNCL_BLOCKED = 28
_VALUES_TO_NAMES = {
0: "NO_ERROR",
1: "EXTERNAL_TRANSLATOR_REJECT",
2: "BODY_LESS_INVITE",
3: "ACCOUNT_EXPIRED",
4: "CONNECTION_CAPACITY_EXCEEDED",
5: "MALFORMED_SDP",
6: "UNSUPPORTED_CONTENT_TYPE",
7: "UNACCEPTABLE_CODEC",
8: "INVALID_AUTH_CLD_TRANS_RULE",
9: "INVALID_AUTH_CLI_TRANS_RULE",
10: "INVALID_ACNT_CLD_TRANS_RULE",
11: "INVALID_ACNT_CLI_TRANS_RULE",
12: "CANNOT_BIND_SESSION",
13: "INVALID_DID_CLI_TRANS_RULE",
14: "NO_RATE_FOUND",
15: "CALL_LOOP_DETECTED",
16: "TOO_MANY_SESSIONS",
17: "ACCOUNT_IN_USE",
18: "HIGH_CALL_RATE_PER_ACCOUNT",
19: "HIGH_CALL_RATE",
20: "INSUFFICIENT_BALANCE",
21: "FORBIDDEN_DESTINATION",
22: "NO_CUSTOMER_RATES",
23: "LOSS_PROTECTION",
24: "ADDRESS_INCOMPLETE",
25: "NO_ROUTES",
26: "HIGH_CALL_RATE_PER_CONNECTION",
27: "INVALID_ASSRT_ID_CLI_TRANS_RULE",
28: "DNCL_BLOCKED",
}
_NAMES_TO_VALUES = {
"NO_ERROR": 0,
"EXTERNAL_TRANSLATOR_REJECT": 1,
"BODY_LESS_INVITE": 2,
"ACCOUNT_EXPIRED": 3,
"CONNECTION_CAPACITY_EXCEEDED": 4,
"MALFORMED_SDP": 5,
"UNSUPPORTED_CONTENT_TYPE": 6,
"UNACCEPTABLE_CODEC": 7,
"INVALID_AUTH_CLD_TRANS_RULE": 8,
"INVALID_AUTH_CLI_TRANS_RULE": 9,
"INVALID_ACNT_CLD_TRANS_RULE": 10,
"INVALID_ACNT_CLI_TRANS_RULE": 11,
"CANNOT_BIND_SESSION": 12,
"INVALID_DID_CLI_TRANS_RULE": 13,
"NO_RATE_FOUND": 14,
"CALL_LOOP_DETECTED": 15,
"TOO_MANY_SESSIONS": 16,
"ACCOUNT_IN_USE": 17,
"HIGH_CALL_RATE_PER_ACCOUNT": 18,
"HIGH_CALL_RATE": 19,
"INSUFFICIENT_BALANCE": 20,
"FORBIDDEN_DESTINATION": 21,
"NO_CUSTOMER_RATES": 22,
"LOSS_PROTECTION": 23,
"ADDRESS_INCOMPLETE": 24,
"NO_ROUTES": 25,
"HIGH_CALL_RATE_PER_CONNECTION": 26,
"INVALID_ASSRT_ID_CLI_TRANS_RULE": 27,
"DNCL_BLOCKED": 28,
}
class TransactionRecordType(object):
CALLS = 1
CDRS = 2
CDRS_CONNECTIONS = 3
CDRS_CUSTOMERS = 4
CDRS_DIDS = 5
CDRS_CONNECTIONS_DIDS = 6
SURCHARGES = 7
COMMISSIONS = 8
UPDATE_ACCOUNT_BALANCE = 9
UPDATE_CUSTOMER_BALANCE = 10
UPDATE_VENDOR_BALANCE = 11
UPDATE_PLAN_MINUTES = 12
QUALITY_STATS = 13
CALLS_SDP = 14
CDRS_CUSTOMERS_DIDS = 15
_VALUES_TO_NAMES = {
1: "CALLS",
2: "CDRS",
3: "CDRS_CONNECTIONS",
4: "CDRS_CUSTOMERS",
5: "CDRS_DIDS",
6: "CDRS_CONNECTIONS_DIDS",
7: "SURCHARGES",
8: "COMMISSIONS",
9: "UPDATE_ACCOUNT_BALANCE",
10: "UPDATE_CUSTOMER_BALANCE",
11: "UPDATE_VENDOR_BALANCE",
12: "UPDATE_PLAN_MINUTES",
13: "QUALITY_STATS",
14: "CALLS_SDP",
15: "CDRS_CUSTOMERS_DIDS",
}
_NAMES_TO_VALUES = {
"CALLS": 1,
"CDRS": 2,
"CDRS_CONNECTIONS": 3,
"CDRS_CUSTOMERS": 4,
"CDRS_DIDS": 5,
"CDRS_CONNECTIONS_DIDS": 6,
"SURCHARGES": 7,
"COMMISSIONS": 8,
"UPDATE_ACCOUNT_BALANCE": 9,
"UPDATE_CUSTOMER_BALANCE": 10,
"UPDATE_VENDOR_BALANCE": 11,
"UPDATE_PLAN_MINUTES": 12,
"QUALITY_STATS": 13,
"CALLS_SDP": 14,
"CDRS_CUSTOMERS_DIDS": 15,
}
class NullInt64(object):
"""
Attributes:
- v
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'v', None, None, ), # 1
)
def __init__(self, v=None,):
self.v = v
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec), iprot.string_length_limit, iprot.container_length_limit)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.v = iprot.readI64()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('NullInt64')
if self.v is not None:
oprot.writeFieldBegin('v', TType.I64, 1)
oprot.writeI64(self.v)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class NullString(object):
"""
Attributes:
- s
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 's', 'UTF8', None, ), # 1
)
def __init__(self, s=None,):
self.s = s
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec), iprot.string_length_limit, iprot.container_length_limit)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.s = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('NullString')
if self.s is not None:
oprot.writeFieldBegin('s', TType.STRING, 1)
oprot.writeString(self.s.encode('utf-8') if sys.version_info[0] == 2 else self.s)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class UnixTime(object):
"""
Attributes:
- seconds
- nanoseconds
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'seconds', None, None, ), # 1
(2, TType.I64, 'nanoseconds', None, None, ), # 2
)
def __init__(self, seconds=None, nanoseconds=None,):
self.seconds = seconds
self.nanoseconds = nanoseconds
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec), iprot.string_length_limit, iprot.container_length_limit)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.seconds = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I64:
self.nanoseconds = iprot.readI64()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('UnixTime')
if self.seconds is not None:
oprot.writeFieldBegin('seconds', TType.I64, 1)
oprot.writeI64(self.seconds)
oprot.writeFieldEnd()
if self.nanoseconds is not None:
oprot.writeFieldBegin('nanoseconds', TType.I64, 2)
oprot.writeI64(self.nanoseconds)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class MonoTime(object):
"""
Attributes:
- monot
- realt
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'monot', (UnixTime, UnixTime.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'realt', (UnixTime, UnixTime.thrift_spec), None, ), # 2
)
def __init__(self, monot=None, realt=None,):
self.monot = monot
self.realt = realt
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec), iprot.string_length_limit, iprot.container_length_limit)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.monot = UnixTime()
self.monot.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.realt = UnixTime()
self.realt.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('MonoTime')
if self.monot is not None:
oprot.writeFieldBegin('monot', TType.STRUCT, 1)
self.monot.write(oprot)
oprot.writeFieldEnd()
if self.realt is not None:
oprot.writeFieldBegin('realt', TType.STRUCT, 2)
self.realt.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TransactionRecord(object):
"""
Attributes:
- type
- data
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'type', None, None, ), # 1
(2, TType.STRING, 'data', 'UTF8', None, ), # 2
)
def __init__(self, type=None, data=None,):
self.type = type
self.data = data
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec), iprot.string_length_limit, iprot.container_length_limit)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.type = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.data = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TransactionRecord')
if self.type is not None:
oprot.writeFieldBegin('type', TType.I32, 1)
oprot.writeI32(self.type)
oprot.writeFieldEnd()
if self.data is not None:
oprot.writeFieldBegin('data', TType.STRING, 2)
oprot.writeString(self.data.encode('utf-8') if sys.version_info[0] == 2 else self.data)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class Transaction(object):
"""
Attributes:
- records
"""
thrift_spec = (
None, # 0
(1, TType.LIST, 'records', (TType.STRUCT, (TransactionRecord, TransactionRecord.thrift_spec), False), None, ), # 1
)
def __init__(self, records=None,):
self.records = records
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec), iprot.string_length_limit, iprot.container_length_limit)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.records = []
(_etype3, _size0) = iprot.readListBegin()
for _i4 in range(_size0):
_elem5 = TransactionRecord()
_elem5.read(iprot)
self.records.append(_elem5)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('Transaction')
if self.records is not None:
oprot.writeFieldBegin('records', TType.LIST, 1)
oprot.writeListBegin(TType.STRUCT, len(self.records))
for iter6 in self.records:
iter6.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class Calls(object):
"""
Attributes:
- i_call
- call_id
- cld
- cli
- setup_time
- parent_i_call
- i_call_type
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'i_call', None, None, ), # 1
(2, TType.STRING, 'call_id', 'UTF8', None, ), # 2
(3, TType.STRING, 'cld', 'UTF8', None, ), # 3
(4, TType.STRING, 'cli', 'UTF8', None, ), # 4
(5, TType.I64, 'setup_time', None, None, ), # 5
(6, TType.STRUCT, 'parent_i_call', (NullInt64, NullInt64.thrift_spec), None, ), # 6
(7, TType.STRUCT, 'i_call_type', (NullInt64, NullInt64.thrift_spec), None, ), # 7
)
def __init__(self, i_call=None, call_id=None, cld=None, cli=None, setup_time=None, parent_i_call=None, i_call_type=None,):
self.i_call = i_call
self.call_id = call_id
self.cld = cld
self.cli = cli
self.setup_time = setup_time
self.parent_i_call = parent_i_call
self.i_call_type = i_call_type
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec), iprot.string_length_limit, iprot.container_length_limit)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.i_call = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.call_id = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.cld = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.cli = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.I64:
self.setup_time = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.STRUCT:
self.parent_i_call = NullInt64()
self.parent_i_call.read(iprot)
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.STRUCT:
self.i_call_type = NullInt64()
self.i_call_type.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('Calls')
if self.i_call is not None:
oprot.writeFieldBegin('i_call', TType.I64, 1)
oprot.writeI64(self.i_call)
oprot.writeFieldEnd()
if self.call_id is not None:
oprot.writeFieldBegin('call_id', TType.STRING, 2)
oprot.writeString(self.call_id.encode('utf-8') if sys.version_info[0] == 2 else self.call_id)
oprot.writeFieldEnd()
if self.cld is not None:
oprot.writeFieldBegin('cld', TType.STRING, 3)
oprot.writeString(self.cld.encode('utf-8') if sys.version_info[0] == 2 else self.cld)
oprot.writeFieldEnd()
if self.cli is not None:
oprot.writeFieldBegin('cli', TType.STRING, 4)
oprot.writeString(self.cli.encode('utf-8') if sys.version_info[0] == 2 else self.cli)
oprot.writeFieldEnd()
if self.setup_time is not None:
oprot.writeFieldBegin('setup_time', TType.I64, 5)
oprot.writeI64(self.setup_time)
oprot.writeFieldEnd()
if self.parent_i_call is not None:
oprot.writeFieldBegin('parent_i_call', TType.STRUCT, 6)
self.parent_i_call.write(oprot)
oprot.writeFieldEnd()
if self.i_call_type is not None:
oprot.writeFieldBegin('i_call_type', TType.STRUCT, 7)
self.i_call_type.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class Cdrs(object):
"""
Attributes:
- i_cdr
- i_call
- i_account
- result
- cost
- delay
- duration
- billed_duration
- connect_time
- disconnect_time
- cld_in
- cli_in
- prefix
- price_1
- price_n
- interval_1
- interval_n
- post_call_surcharge
- connect_fee
- free_seconds
- remote_ip
- grace_period
- user_agent
- pdd1xx
- i_protocol
- release_source
- plan_duration
- accessibility_cost
- lrn_cld
- lrn_cld_in
- area_name
- p_asserted_id
- remote_party_id
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'i_cdr', None, None, ), # 1
(2, TType.I64, 'i_call', None, None, ), # 2
(3, TType.I64, 'i_account', None, None, ), # 3
(4, TType.I64, 'result', None, None, ), # 4
(5, TType.DOUBLE, 'cost', None, None, ), # 5
(6, TType.DOUBLE, 'delay', None, None, ), # 6
(7, TType.DOUBLE, 'duration', None, None, ), # 7
(8, TType.DOUBLE, 'billed_duration', None, None, ), # 8
(9, TType.I64, 'connect_time', None, None, ), # 9
(10, TType.I64, 'disconnect_time', None, None, ), # 10
(11, TType.STRING, 'cld_in', 'UTF8', None, ), # 11
(12, TType.STRING, 'cli_in', 'UTF8', None, ), # 12
(13, TType.STRING, 'prefix', 'UTF8', None, ), # 13
(14, TType.DOUBLE, 'price_1', None, None, ), # 14
(15, TType.DOUBLE, 'price_n', None, None, ), # 15
(16, TType.I32, 'interval_1', None, None, ), # 16
(17, TType.I32, 'interval_n', None, None, ), # 17
(18, TType.DOUBLE, 'post_call_surcharge', None, None, ), # 18
(19, TType.DOUBLE, 'connect_fee', None, None, ), # 19
(20, TType.I64, 'free_seconds', None, None, ), # 20
(21, TType.STRING, 'remote_ip', 'UTF8', None, ), # 21
(22, TType.I32, 'grace_period', None, None, ), # 22
(23, TType.STRING, 'user_agent', 'UTF8', None, ), # 23
(24, TType.DOUBLE, 'pdd1xx', None, None, ), # 24
(25, TType.I16, 'i_protocol', None, None, ), # 25
(26, TType.STRING, 'release_source', 'UTF8', None, ), # 26
(27, TType.DOUBLE, 'plan_duration', None, None, ), # 27
(28, TType.DOUBLE, 'accessibility_cost', None, None, ), # 28
(29, TType.STRUCT, 'lrn_cld', (NullString, NullString.thrift_spec), None, ), # 29
(30, TType.STRUCT, 'lrn_cld_in', (NullString, NullString.thrift_spec), None, ), # 30
(31, TType.STRUCT, 'area_name', (NullString, NullString.thrift_spec), None, ), # 31
(32, TType.STRUCT, 'p_asserted_id', (NullString, NullString.thrift_spec), None, ), # 32
(33, TType.STRUCT, 'remote_party_id', (NullString, NullString.thrift_spec), None, ), # 33
)
def __init__(self, i_cdr=None, i_call=None, i_account=None, result=None, cost=None, delay=None, duration=None, billed_duration=None, connect_time=None, disconnect_time=None, cld_in=None, cli_in=None, prefix=None, price_1=None, price_n=None, interval_1=None, interval_n=None, post_call_surcharge=None, connect_fee=None, free_seconds=None, remote_ip=None, grace_period=None, user_agent=None, pdd1xx=None, i_protocol=None, release_source=None, plan_duration=None, accessibility_cost=None, lrn_cld=None, lrn_cld_in=None, area_name=None, p_asserted_id=None, remote_party_id=None,):
self.i_cdr = i_cdr
self.i_call = i_call
self.i_account = i_account
self.result = result
self.cost = cost
self.delay = delay
self.duration = duration
self.billed_duration = billed_duration
self.connect_time = connect_time
self.disconnect_time = disconnect_time
self.cld_in = cld_in
self.cli_in = cli_in
self.prefix = prefix
self.price_1 = price_1
self.price_n = price_n
self.interval_1 = interval_1
self.interval_n = interval_n
self.post_call_surcharge = post_call_surcharge
self.connect_fee = connect_fee
self.free_seconds = free_seconds
self.remote_ip = remote_ip
self.grace_period = grace_period
self.user_agent = user_agent
self.pdd1xx = pdd1xx
self.i_protocol = i_protocol
self.release_source = release_source
self.plan_duration = plan_duration
self.accessibility_cost = accessibility_cost
self.lrn_cld = lrn_cld
self.lrn_cld_in = lrn_cld_in
self.area_name = area_name
self.p_asserted_id = p_asserted_id
self.remote_party_id = remote_party_id
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec), iprot.string_length_limit, iprot.container_length_limit)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.i_cdr = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I64:
self.i_call = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I64:
self.i_account = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I64:
self.result = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.DOUBLE:
self.cost = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.DOUBLE:
self.delay = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.DOUBLE:
self.duration = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 8:
if ftype == TType.DOUBLE:
self.billed_duration = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 9:
if ftype == TType.I64:
self.connect_time = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 10:
if ftype == TType.I64:
self.disconnect_time = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 11:
if ftype == TType.STRING:
self.cld_in = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 12:
if ftype == TType.STRING:
self.cli_in = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 13:
if ftype == TType.STRING:
self.prefix = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 14:
if ftype == TType.DOUBLE:
self.price_1 = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 15:
if ftype == TType.DOUBLE:
self.price_n = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 16:
if ftype == TType.I32:
self.interval_1 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 17:
if ftype == TType.I32:
self.interval_n = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 18:
if ftype == TType.DOUBLE:
self.post_call_surcharge = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 19:
if ftype == TType.DOUBLE:
self.connect_fee = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 20:
if ftype == TType.I64:
self.free_seconds = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 21:
if ftype == TType.STRING:
self.remote_ip = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 22:
if ftype == TType.I32:
self.grace_period = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 23:
if ftype == TType.STRING:
self.user_agent = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 24:
if ftype == TType.DOUBLE:
self.pdd1xx = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 25:
if ftype == TType.I16:
self.i_protocol = iprot.readI16()
else:
iprot.skip(ftype)
elif fid == 26:
if ftype == TType.STRING:
self.release_source = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 27:
if ftype == TType.DOUBLE:
self.plan_duration = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 28:
if ftype == TType.DOUBLE:
self.accessibility_cost = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 29:
if ftype == TType.STRUCT:
self.lrn_cld = NullString()
self.lrn_cld.read(iprot)
else:
iprot.skip(ftype)
elif fid == 30:
if ftype == TType.STRUCT:
self.lrn_cld_in = NullString()
self.lrn_cld_in.read(iprot)
else:
iprot.skip(ftype)
elif fid == 31:
if ftype == TType.STRUCT:
self.area_name = NullString()
self.area_name.read(iprot)
else:
iprot.skip(ftype)
elif fid == 32:
if ftype == TType.STRUCT:
self.p_asserted_id = NullString()
self.p_asserted_id.read(iprot)
else:
iprot.skip(ftype)
elif fid == 33:
if ftype == TType.STRUCT:
self.remote_party_id = NullString()
self.remote_party_id.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('Cdrs')
if self.i_cdr is not None:
oprot.writeFieldBegin('i_cdr', TType.I64, 1)
oprot.writeI64(self.i_cdr)
oprot.writeFieldEnd()
if self.i_call is not None:
oprot.writeFieldBegin('i_call', TType.I64, 2)
oprot.writeI64(self.i_call)
oprot.writeFieldEnd()
if self.i_account is not None:
oprot.writeFieldBegin('i_account', TType.I64, 3)
oprot.writeI64(self.i_account)
oprot.writeFieldEnd()
if self.result is not None:
oprot.writeFieldBegin('result', TType.I64, 4)
oprot.writeI64(self.result)
oprot.writeFieldEnd()
if self.cost is not None:
oprot.writeFieldBegin('cost', TType.DOUBLE, 5)
oprot.writeDouble(self.cost)
oprot.writeFieldEnd()
if self.delay is not None:
oprot.writeFieldBegin('delay', TType.DOUBLE, 6)
oprot.writeDouble(self.delay)
oprot.writeFieldEnd()
if self.duration is not None:
oprot.writeFieldBegin('duration', TType.DOUBLE, 7)
oprot.writeDouble(self.duration)
oprot.writeFieldEnd()
if self.billed_duration is not None:
oprot.writeFieldBegin('billed_duration', TType.DOUBLE, 8)
oprot.writeDouble(self.billed_duration)
oprot.writeFieldEnd()
if self.connect_time is not None:
oprot.writeFieldBegin('connect_time', TType.I64, 9)
oprot.writeI64(self.connect_time)
oprot.writeFieldEnd()
if self.disconnect_time is not None:
oprot.writeFieldBegin('disconnect_time', TType.I64, 10)
oprot.writeI64(self.disconnect_time)
oprot.writeFieldEnd()
if self.cld_in is not None:
oprot.writeFieldBegin('cld_in', TType.STRING, 11)
oprot.writeString(self.cld_in.encode('utf-8') if sys.version_info[0] == 2 else self.cld_in)
oprot.writeFieldEnd()
if self.cli_in is not None:
oprot.writeFieldBegin('cli_in', TType.STRING, 12)
oprot.writeString(self.cli_in.encode('utf-8') if sys.version_info[0] == 2 else self.cli_in)
oprot.writeFieldEnd()
if self.prefix is not None:
oprot.writeFieldBegin('prefix', TType.STRING, 13)
oprot.writeString(self.prefix.encode('utf-8') if sys.version_info[0] == 2 else self.prefix)
oprot.writeFieldEnd()
if self.price_1 is not None:
oprot.writeFieldBegin('price_1', TType.DOUBLE, 14)
oprot.writeDouble(self.price_1)
oprot.writeFieldEnd()
if self.price_n is not None:
oprot.writeFieldBegin('price_n', TType.DOUBLE, 15)
oprot.writeDouble(self.price_n)
oprot.writeFieldEnd()
if self.interval_1 is not None:
oprot.writeFieldBegin('interval_1', TType.I32, 16)
oprot.writeI32(self.interval_1)
oprot.writeFieldEnd()
if self.interval_n is not None:
oprot.writeFieldBegin('interval_n', TType.I32, 17)
oprot.writeI32(self.interval_n)
oprot.writeFieldEnd()
if self.post_call_surcharge is not None:
oprot.writeFieldBegin('post_call_surcharge', TType.DOUBLE, 18)
oprot.writeDouble(self.post_call_surcharge)
oprot.writeFieldEnd()
if self.connect_fee is not None:
oprot.writeFieldBegin('connect_fee', TType.DOUBLE, 19)
oprot.writeDouble(self.connect_fee)
oprot.writeFieldEnd()
if self.free_seconds is not None:
oprot.writeFieldBegin('free_seconds', TType.I64, 20)
oprot.writeI64(self.free_seconds)
oprot.writeFieldEnd()
if self.remote_ip is not None:
oprot.writeFieldBegin('remote_ip', TType.STRING, 21)
oprot.writeString(self.remote_ip.encode('utf-8') if sys.version_info[0] == 2 else self.remote_ip)
oprot.writeFieldEnd()
if self.grace_period is not None:
oprot.writeFieldBegin('grace_period', TType.I32, 22)
oprot.writeI32(self.grace_period)
oprot.writeFieldEnd()
if self.user_agent is not None:
oprot.writeFieldBegin('user_agent', TType.STRING, 23)
oprot.writeString(self.user_agent.encode('utf-8') if sys.version_info[0] == 2 else self.user_agent)
oprot.writeFieldEnd()
if self.pdd1xx is not None:
oprot.writeFieldBegin('pdd1xx', TType.DOUBLE, 24)
oprot.writeDouble(self.pdd1xx)
oprot.writeFieldEnd()
if self.i_protocol is not None:
oprot.writeFieldBegin('i_protocol', TType.I16, 25)
oprot.writeI16(self.i_protocol)
oprot.writeFieldEnd()
if self.release_source is not None:
oprot.writeFieldBegin('release_source', TType.STRING, 26)
oprot.writeString(self.release_source.encode('utf-8') if sys.version_info[0] == 2 else self.release_source)
oprot.writeFieldEnd()
if self.plan_duration is not None:
oprot.writeFieldBegin('plan_duration', TType.DOUBLE, 27)
oprot.writeDouble(self.plan_duration)
oprot.writeFieldEnd()
if self.accessibility_cost is not None:
oprot.writeFieldBegin('accessibility_cost', TType.DOUBLE, 28)
oprot.writeDouble(self.accessibility_cost)
oprot.writeFieldEnd()
if self.lrn_cld is not None:
oprot.writeFieldBegin('lrn_cld', TType.STRUCT, 29)
self.lrn_cld.write(oprot)
oprot.writeFieldEnd()
if self.lrn_cld_in is not None:
oprot.writeFieldBegin('lrn_cld_in', TType.STRUCT, 30)
self.lrn_cld_in.write(oprot)
oprot.writeFieldEnd()
if self.area_name is not None:
oprot.writeFieldBegin('area_name', TType.STRUCT, 31)
self.area_name.write(oprot)
oprot.writeFieldEnd()
if self.p_asserted_id is not None:
oprot.writeFieldBegin('p_asserted_id', TType.STRUCT, 32)
self.p_asserted_id.write(oprot)
oprot.writeFieldEnd()
if self.remote_party_id is not None:
oprot.writeFieldBegin('remote_party_id', TType.STRUCT, 33)
self.remote_party_id.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class CdrsConnections(object):
"""
Attributes:
- i_cdrs_connection
- i_call
- i_connection
- result
- cost
- delay
- duration
- billed_duration
- setup_time
- connect_time
- disconnect_time
- cld_out
- cli_out
- prefix
- price_1
- price_n
- interval_1
- interval_n
- post_call_surcharge
- connect_fee
- free_seconds
- grace_period
- user_agent
- pdd100
- pdd1xx
- i_account_debug
- i_protocol
- release_source
- call_setup_time
- lrn_cld
- area_name
- i_media_relay
- remote_ip
- vendor_name
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'i_cdrs_connection', None, None, ), # 1
(2, TType.I64, 'i_call', None, None, ), # 2
(3, TType.I64, 'i_connection', None, None, ), # 3
(4, TType.I32, 'result', None, None, ), # 4
(5, TType.DOUBLE, 'cost', None, None, ), # 5
(6, TType.DOUBLE, 'delay', None, None, ), # 6
(7, TType.DOUBLE, 'duration', None, None, ), # 7
(8, TType.DOUBLE, 'billed_duration', None, None, ), # 8
(9, TType.I64, 'setup_time', None, None, ), # 9
(10, TType.I64, 'connect_time', None, None, ), # 10
(11, TType.I64, 'disconnect_time', None, None, ), # 11
(12, TType.STRING, 'cld_out', 'UTF8', None, ), # 12
(13, TType.STRING, 'cli_out', 'UTF8', None, ), # 13
(14, TType.STRING, 'prefix', 'UTF8', None, ), # 14
(15, TType.DOUBLE, 'price_1', None, None, ), # 15
(16, TType.DOUBLE, 'price_n', None, None, ), # 16
(17, TType.I32, 'interval_1', None, None, ), # 17
(18, TType.I32, 'interval_n', None, None, ), # 18
(19, TType.DOUBLE, 'post_call_surcharge', None, None, ), # 19
(20, TType.DOUBLE, 'connect_fee', None, None, ), # 20
(21, TType.I32, 'free_seconds', None, None, ), # 21
(22, TType.I32, 'grace_period', None, None, ), # 22
(23, TType.STRING, 'user_agent', 'UTF8', None, ), # 23
(24, TType.DOUBLE, 'pdd100', None, None, ), # 24
(25, TType.DOUBLE, 'pdd1xx', None, None, ), # 25
(26, TType.I64, 'i_account_debug', None, None, ), # 26
(27, TType.I32, 'i_protocol', None, None, ), # 27
(28, TType.STRING, 'release_source', 'UTF8', None, ), # 28
(29, TType.I64, 'call_setup_time', None, None, ), # 29
(30, TType.STRUCT, 'lrn_cld', (NullString, NullString.thrift_spec), None, ), # 30
(31, TType.STRUCT, 'area_name', (NullString, NullString.thrift_spec), None, ), # 31
(32, TType.STRUCT, 'i_media_relay', (NullInt64, NullInt64.thrift_spec), None, ), # 32
(33, TType.STRUCT, 'remote_ip', (NullString, NullString.thrift_spec), None, ), # 33
(34, TType.STRUCT, 'vendor_name', (NullString, NullString.thrift_spec), None, ), # 34
)
def __init__(self, i_cdrs_connection=None, i_call=None, i_connection=None, result=None, cost=None, delay=None, duration=None, billed_duration=None, setup_time=None, connect_time=None, disconnect_time=None, cld_out=None, cli_out=None, prefix=None, price_1=None, price_n=None, interval_1=None, interval_n=None, post_call_surcharge=None, connect_fee=None, free_seconds=None, grace_period=None, user_agent=None, pdd100=None, pdd1xx=None, i_account_debug=None, i_protocol=None, release_source=None, call_setup_time=None, lrn_cld=None, area_name=None, i_media_relay=None, remote_ip=None, vendor_name=None,):
self.i_cdrs_connection = i_cdrs_connection
self.i_call = i_call
self.i_connection = i_connection
self.result = result
self.cost = cost
self.delay = delay
self.duration = duration
self.billed_duration = billed_duration
self.setup_time = setup_time
self.connect_time = connect_time
self.disconnect_time = disconnect_time
self.cld_out = cld_out
self.cli_out = cli_out
self.prefix = prefix
self.price_1 = price_1
self.price_n = price_n
self.interval_1 = interval_1
self.interval_n = interval_n
self.post_call_surcharge = post_call_surcharge
self.connect_fee = connect_fee
self.free_seconds = free_seconds
self.grace_period = grace_period
self.user_agent = user_agent
self.pdd100 = pdd100
self.pdd1xx = pdd1xx
self.i_account_debug = i_account_debug
self.i_protocol = i_protocol
self.release_source = release_source
self.call_setup_time = call_setup_time
self.lrn_cld = lrn_cld
self.area_name = area_name
self.i_media_relay = i_media_relay
self.remote_ip = remote_ip
self.vendor_name = vendor_name
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec), iprot.string_length_limit, iprot.container_length_limit)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.i_cdrs_connection = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I64:
self.i_call = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I64:
self.i_connection = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I32:
self.result = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.DOUBLE:
self.cost = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.DOUBLE:
self.delay = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.DOUBLE:
self.duration = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 8:
if ftype == TType.DOUBLE:
self.billed_duration = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 9:
if ftype == TType.I64:
self.setup_time = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 10:
if ftype == TType.I64:
self.connect_time = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 11:
if ftype == TType.I64:
self.disconnect_time = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 12:
if ftype == TType.STRING:
self.cld_out = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 13:
if ftype == TType.STRING:
self.cli_out = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 14:
if ftype == TType.STRING:
self.prefix = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 15:
if ftype == TType.DOUBLE:
self.price_1 = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 16:
if ftype == TType.DOUBLE:
self.price_n = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 17:
if ftype == TType.I32:
self.interval_1 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 18:
if ftype == TType.I32:
self.interval_n = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 19:
if ftype == TType.DOUBLE:
self.post_call_surcharge = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 20:
if ftype == TType.DOUBLE:
self.connect_fee = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 21:
if ftype == TType.I32:
self.free_seconds = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 22:
if ftype == TType.I32:
self.grace_period = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 23:
if ftype == TType.STRING:
self.user_agent = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 24:
if ftype == TType.DOUBLE:
self.pdd100 = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 25:
if ftype == TType.DOUBLE:
self.pdd1xx = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 26:
if ftype == TType.I64:
self.i_account_debug = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 27:
if ftype == TType.I32:
self.i_protocol = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 28:
if ftype == TType.STRING:
self.release_source = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 29:
if ftype == TType.I64:
self.call_setup_time = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 30:
if ftype == TType.STRUCT:
self.lrn_cld = NullString()
self.lrn_cld.read(iprot)
else:
iprot.skip(ftype)
elif fid == 31:
if ftype == TType.STRUCT:
self.area_name = NullString()
self.area_name.read(iprot)
else:
iprot.skip(ftype)
elif fid == 32:
if ftype == TType.STRUCT:
self.i_media_relay = NullInt64()
self.i_media_relay.read(iprot)
else:
iprot.skip(ftype)
elif fid == 33:
if ftype == TType.STRUCT:
self.remote_ip = NullString()
self.remote_ip.read(iprot)
else:
iprot.skip(ftype)
elif fid == 34:
if ftype == TType.STRUCT:
self.vendor_name = NullString()
self.vendor_name.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('CdrsConnections')
if self.i_cdrs_connection is not None:
oprot.writeFieldBegin('i_cdrs_connection', TType.I64, 1)
oprot.writeI64(self.i_cdrs_connection)
oprot.writeFieldEnd()
if self.i_call is not None:
oprot.writeFieldBegin('i_call', TType.I64, 2)
oprot.writeI64(self.i_call)
oprot.writeFieldEnd()
if self.i_connection is not None:
oprot.writeFieldBegin('i_connection', TType.I64, 3)
oprot.writeI64(self.i_connection)
oprot.writeFieldEnd()
if self.result is not None:
oprot.writeFieldBegin('result', TType.I32, 4)
oprot.writeI32(self.result)
oprot.writeFieldEnd()
if self.cost is not None:
oprot.writeFieldBegin('cost', TType.DOUBLE, 5)
oprot.writeDouble(self.cost)
oprot.writeFieldEnd()
if self.delay is not None:
oprot.writeFieldBegin('delay', TType.DOUBLE, 6)
oprot.writeDouble(self.delay)
oprot.writeFieldEnd()
if self.duration is not None:
oprot.writeFieldBegin('duration', TType.DOUBLE, 7)
oprot.writeDouble(self.duration)
oprot.writeFieldEnd()
if self.billed_duration is not None:
oprot.writeFieldBegin('billed_duration', TType.DOUBLE, 8)
oprot.writeDouble(self.billed_duration)
oprot.writeFieldEnd()
if self.setup_time is not None:
oprot.writeFieldBegin('setup_time', TType.I64, 9)
oprot.writeI64(self.setup_time)
oprot.writeFieldEnd()
if self.connect_time is not None:
oprot.writeFieldBegin('connect_time', TType.I64, 10)
oprot.writeI64(self.connect_time)
oprot.writeFieldEnd()
if self.disconnect_time is not None:
oprot.writeFieldBegin('disconnect_time', TType.I64, 11)
oprot.writeI64(self.disconnect_time)
oprot.writeFieldEnd()
if self.cld_out is not None:
oprot.writeFieldBegin('cld_out', TType.STRING, 12)
oprot.writeString(self.cld_out.encode('utf-8') if sys.version_info[0] == 2 else self.cld_out)
oprot.writeFieldEnd()
if self.cli_out is not None:
oprot.writeFieldBegin('cli_out', TType.STRING, 13)
oprot.writeString(self.cli_out.encode('utf-8') if sys.version_info[0] == 2 else self.cli_out)
oprot.writeFieldEnd()
if self.prefix is not None:
oprot.writeFieldBegin('prefix', TType.STRING, 14)
oprot.writeString(self.prefix.encode('utf-8') if sys.version_info[0] == 2 else self.prefix)
oprot.writeFieldEnd()
if self.price_1 is not None:
oprot.writeFieldBegin('price_1', TType.DOUBLE, 15)
oprot.writeDouble(self.price_1)
oprot.writeFieldEnd()
if self.price_n is not None:
oprot.writeFieldBegin('price_n', TType.DOUBLE, 16)
oprot.writeDouble(self.price_n)
oprot.writeFieldEnd()
if self.interval_1 is not None:
oprot.writeFieldBegin('interval_1', TType.I32, 17)
oprot.writeI32(self.interval_1)
oprot.writeFieldEnd()
if self.interval_n is not None:
oprot.writeFieldBegin('interval_n', TType.I32, 18)
oprot.writeI32(self.interval_n)
oprot.writeFieldEnd()
if self.post_call_surcharge is not None:
oprot.writeFieldBegin('post_call_surcharge', TType.DOUBLE, 19)
oprot.writeDouble(self.post_call_surcharge)
oprot.writeFieldEnd()
if self.connect_fee is not None:
oprot.writeFieldBegin('connect_fee', TType.DOUBLE, 20)
oprot.writeDouble(self.connect_fee)
oprot.writeFieldEnd()
if self.free_seconds is not None:
oprot.writeFieldBegin('free_seconds', TType.I32, 21)
oprot.writeI32(self.free_seconds)
oprot.writeFieldEnd()
if self.grace_period is not None:
oprot.writeFieldBegin('grace_period', TType.I32, 22)
oprot.writeI32(self.grace_period)
oprot.writeFieldEnd()
if self.user_agent is not None:
oprot.writeFieldBegin('user_agent', TType.STRING, 23)
oprot.writeString(self.user_agent.encode('utf-8') if sys.version_info[0] == 2 else self.user_agent)
oprot.writeFieldEnd()
if self.pdd100 is not None:
oprot.writeFieldBegin('pdd100', TType.DOUBLE, 24)
oprot.writeDouble(self.pdd100)
oprot.writeFieldEnd()
if self.pdd1xx is not None:
oprot.writeFieldBegin('pdd1xx', TType.DOUBLE, 25)
oprot.writeDouble(self.pdd1xx)
oprot.writeFieldEnd()
if self.i_account_debug is not None:
oprot.writeFieldBegin('i_account_debug', TType.I64, 26)
oprot.writeI64(self.i_account_debug)
oprot.writeFieldEnd()
if self.i_protocol is not None:
oprot.writeFieldBegin('i_protocol', TType.I32, 27)
oprot.writeI32(self.i_protocol)
oprot.writeFieldEnd()
if self.release_source is not None:
oprot.writeFieldBegin('release_source', TType.STRING, 28)
oprot.writeString(self.release_source.encode('utf-8') if sys.version_info[0] == 2 else self.release_source)
oprot.writeFieldEnd()
if self.call_setup_time is not None:
oprot.writeFieldBegin('call_setup_time', TType.I64, 29)
oprot.writeI64(self.call_setup_time)
oprot.writeFieldEnd()
if self.lrn_cld is not None:
oprot.writeFieldBegin('lrn_cld', TType.STRUCT, 30)
self.lrn_cld.write(oprot)
oprot.writeFieldEnd()
if self.area_name is not None:
oprot.writeFieldBegin('area_name', TType.STRUCT, 31)
self.area_name.write(oprot)
oprot.writeFieldEnd()
if self.i_media_relay is not None:
oprot.writeFieldBegin('i_media_relay', TType.STRUCT, 32)
self.i_media_relay.write(oprot)
oprot.writeFieldEnd()
if self.remote_ip is not None:
oprot.writeFieldBegin('remote_ip', TType.STRUCT, 33)
self.remote_ip.write(oprot)
oprot.writeFieldEnd()
if self.vendor_name is not None:
oprot.writeFieldBegin('vendor_name', TType.STRUCT, 34)
self.vendor_name.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class CdrsCustomers(object):
"""
Attributes:
- i_cdrs_customer
- i_cdr
- i_customer
- cost
- billed_duration
- prefix
- price_1
- price_n
- interval_1
- interval_n
- post_call_surcharge
- connect_fee
- free_seconds
- grace_period
- i_call
- i_wholesaler
- setup_time
- duration
- area_name
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'i_cdrs_customer', None, None, ), # 1
(2, TType.I64, 'i_cdr', None, None, ), # 2
(3, TType.I64, 'i_customer', None, None, ), # 3
(4, TType.DOUBLE, 'cost', None, None, ), # 4
(5, TType.DOUBLE, 'billed_duration', None, None, ), # 5
(6, TType.STRING, 'prefix', 'UTF8', None, ), # 6
(7, TType.DOUBLE, 'price_1', None, None, ), # 7
(8, TType.DOUBLE, 'price_n', None, None, ), # 8
(9, TType.I32, 'interval_1', None, None, ), # 9
(10, TType.I32, 'interval_n', None, None, ), # 10
(11, TType.DOUBLE, 'post_call_surcharge', None, None, ), # 11
(12, TType.DOUBLE, 'connect_fee', None, None, ), # 12
(13, TType.I32, 'free_seconds', None, None, ), # 13
(14, TType.I32, 'grace_period', None, None, ), # 14
(15, TType.I64, 'i_call', None, None, ), # 15
(16, TType.I64, 'i_wholesaler', None, None, ), # 16
(17, TType.I64, 'setup_time', None, None, ), # 17
(18, TType.DOUBLE, 'duration', None, None, ), # 18
(19, TType.STRUCT, 'area_name', (NullString, NullString.thrift_spec), None, ), # 19
)
def __init__(self, i_cdrs_customer=None, i_cdr=None, i_customer=None, cost=None, billed_duration=None, prefix=None, price_1=None, price_n=None, interval_1=None, interval_n=None, post_call_surcharge=None, connect_fee=None, free_seconds=None, grace_period=None, i_call=None, i_wholesaler=None, setup_time=None, duration=None, area_name=None,):
self.i_cdrs_customer = i_cdrs_customer
self.i_cdr = i_cdr
self.i_customer = i_customer
self.cost = cost
self.billed_duration = billed_duration
self.prefix = prefix
self.price_1 = price_1
self.price_n = price_n
self.interval_1 = interval_1
self.interval_n = interval_n
self.post_call_surcharge = post_call_surcharge
self.connect_fee = connect_fee
self.free_seconds = free_seconds
self.grace_period = grace_period
self.i_call = i_call
self.i_wholesaler = i_wholesaler
self.setup_time = setup_time
self.duration = duration
self.area_name = area_name
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec), iprot.string_length_limit, iprot.container_length_limit)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.i_cdrs_customer = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I64:
self.i_cdr = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I64:
self.i_customer = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.DOUBLE:
self.cost = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.DOUBLE:
self.billed_duration = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.STRING:
self.prefix = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.DOUBLE:
self.price_1 = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 8:
if ftype == TType.DOUBLE:
self.price_n = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 9:
if ftype == TType.I32:
self.interval_1 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 10:
if ftype == TType.I32:
self.interval_n = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 11:
if ftype == TType.DOUBLE:
self.post_call_surcharge = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 12:
if ftype == TType.DOUBLE:
self.connect_fee = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 13:
if ftype == TType.I32:
self.free_seconds = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 14:
if ftype == TType.I32:
self.grace_period = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 15:
if ftype == TType.I64:
self.i_call = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 16:
if ftype == TType.I64:
self.i_wholesaler = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 17:
if ftype == TType.I64:
self.setup_time = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 18:
if ftype == TType.DOUBLE:
self.duration = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 19:
if ftype == TType.STRUCT:
self.area_name = NullString()
self.area_name.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('CdrsCustomers')
if self.i_cdrs_customer is not None:
oprot.writeFieldBegin('i_cdrs_customer', TType.I64, 1)
oprot.writeI64(self.i_cdrs_customer)
oprot.writeFieldEnd()
if self.i_cdr is not None:
oprot.writeFieldBegin('i_cdr', TType.I64, 2)
oprot.writeI64(self.i_cdr)
oprot.writeFieldEnd()
if self.i_customer is not None:
oprot.writeFieldBegin('i_customer', TType.I64, 3)
oprot.writeI64(self.i_customer)
oprot.writeFieldEnd()
if self.cost is not None:
oprot.writeFieldBegin('cost', TType.DOUBLE, 4)
oprot.writeDouble(self.cost)
oprot.writeFieldEnd()
if self.billed_duration is not None:
oprot.writeFieldBegin('billed_duration', TType.DOUBLE, 5)
oprot.writeDouble(self.billed_duration)
oprot.writeFieldEnd()
if self.prefix is not None:
oprot.writeFieldBegin('prefix', TType.STRING, 6)
oprot.writeString(self.prefix.encode('utf-8') if sys.version_info[0] == 2 else self.prefix)
oprot.writeFieldEnd()
if self.price_1 is not None:
oprot.writeFieldBegin('price_1', TType.DOUBLE, 7)
oprot.writeDouble(self.price_1)
oprot.writeFieldEnd()
if self.price_n is not None:
oprot.writeFieldBegin('price_n', TType.DOUBLE, 8)
oprot.writeDouble(self.price_n)
oprot.writeFieldEnd()
if self.interval_1 is not None:
oprot.writeFieldBegin('interval_1', TType.I32, 9)
oprot.writeI32(self.interval_1)
oprot.writeFieldEnd()
if self.interval_n is not None:
oprot.writeFieldBegin('interval_n', TType.I32, 10)
oprot.writeI32(self.interval_n)
oprot.writeFieldEnd()
if self.post_call_surcharge is not None:
oprot.writeFieldBegin('post_call_surcharge', TType.DOUBLE, 11)
oprot.writeDouble(self.post_call_surcharge)
oprot.writeFieldEnd()
if self.connect_fee is not None:
oprot.writeFieldBegin('connect_fee', TType.DOUBLE, 12)
oprot.writeDouble(self.connect_fee)
oprot.writeFieldEnd()
if self.free_seconds is not None:
oprot.writeFieldBegin('free_seconds', TType.I32, 13)
oprot.writeI32(self.free_seconds)
oprot.writeFieldEnd()
if self.grace_period is not None:
oprot.writeFieldBegin('grace_period', TType.I32, 14)
oprot.writeI32(self.grace_period)
oprot.writeFieldEnd()
if self.i_call is not None:
oprot.writeFieldBegin('i_call', TType.I64, 15)
oprot.writeI64(self.i_call)
oprot.writeFieldEnd()
if self.i_wholesaler is not None:
oprot.writeFieldBegin('i_wholesaler', TType.I64, 16)
oprot.writeI64(self.i_wholesaler)
oprot.writeFieldEnd()
if self.setup_time is not None:
oprot.writeFieldBegin('setup_time', TType.I64, 17)
oprot.writeI64(self.setup_time)
oprot.writeFieldEnd()
if self.duration is not None:
oprot.writeFieldBegin('duration', TType.DOUBLE, 18)
oprot.writeDouble(self.duration)
oprot.writeFieldEnd()
if self.area_name is not None:
oprot.writeFieldBegin('area_name', TType.STRUCT, 19)
self.area_name.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class CdrsDids(object):
"""
Attributes:
- i_cdrs_did
- i_call
- i_did
- did
- result
- cost
- duration
- billed_duration
- setup_time
- connect_time
- disconnect_time
- price_1
- price_n
- interval_1
- interval_n
- post_call_surcharge
- connect_fee
- free_seconds
- grace_period
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'i_cdrs_did', None, None, ), # 1
(2, TType.I64, 'i_call', None, None, ), # 2
(3, TType.I64, 'i_did', None, None, ), # 3
(4, TType.STRING, 'did', 'UTF8', None, ), # 4
(5, TType.I32, 'result', None, None, ), # 5
(6, TType.DOUBLE, 'cost', None, None, ), # 6
(7, TType.DOUBLE, 'duration', None, None, ), # 7
(8, TType.DOUBLE, 'billed_duration', None, None, ), # 8
(9, TType.I64, 'setup_time', None, None, ), # 9
(10, TType.I64, 'connect_time', None, None, ), # 10
(11, TType.I64, 'disconnect_time', None, None, ), # 11
(12, TType.DOUBLE, 'price_1', None, None, ), # 12
(13, TType.DOUBLE, 'price_n', None, None, ), # 13
(14, TType.I32, 'interval_1', None, None, ), # 14
(15, TType.I32, 'interval_n', None, None, ), # 15
(16, TType.DOUBLE, 'post_call_surcharge', None, None, ), # 16
(17, TType.DOUBLE, 'connect_fee', None, None, ), # 17
(18, TType.I32, 'free_seconds', None, None, ), # 18
(19, TType.I32, 'grace_period', None, None, ), # 19
)
def __init__(self, i_cdrs_did=None, i_call=None, i_did=None, did=None, result=None, cost=None, duration=None, billed_duration=None, setup_time=None, connect_time=None, disconnect_time=None, price_1=None, price_n=None, interval_1=None, interval_n=None, post_call_surcharge=None, connect_fee=None, free_seconds=None, grace_period=None,):
self.i_cdrs_did = i_cdrs_did
self.i_call = i_call
self.i_did = i_did
self.did = did
self.result = result
self.cost = cost
self.duration = duration
self.billed_duration = billed_duration
self.setup_time = setup_time
self.connect_time = connect_time
self.disconnect_time = disconnect_time
self.price_1 = price_1
self.price_n = price_n
self.interval_1 = interval_1
self.interval_n = interval_n
self.post_call_surcharge = post_call_surcharge
self.connect_fee = connect_fee
self.free_seconds = free_seconds
self.grace_period = grace_period
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec), iprot.string_length_limit, iprot.container_length_limit)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.i_cdrs_did = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I64:
self.i_call = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I64:
self.i_did = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.did = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.I32:
self.result = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.DOUBLE:
self.cost = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.DOUBLE:
self.duration = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 8:
if ftype == TType.DOUBLE:
self.billed_duration = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 9:
if ftype == TType.I64:
self.setup_time = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 10:
if ftype == TType.I64:
self.connect_time = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 11:
if ftype == TType.I64:
self.disconnect_time = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 12:
if ftype == TType.DOUBLE:
self.price_1 = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 13:
if ftype == TType.DOUBLE:
self.price_n = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 14:
if ftype == TType.I32:
self.interval_1 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 15:
if ftype == TType.I32:
self.interval_n = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 16:
if ftype == TType.DOUBLE:
self.post_call_surcharge = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 17:
if ftype == TType.DOUBLE:
self.connect_fee = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 18:
if ftype == TType.I32:
self.free_seconds = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 19:
if ftype == TType.I32:
self.grace_period = iprot.readI32()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('CdrsDids')
if self.i_cdrs_did is not None:
oprot.writeFieldBegin('i_cdrs_did', TType.I64, 1)
oprot.writeI64(self.i_cdrs_did)
oprot.writeFieldEnd()
if self.i_call is not None:
oprot.writeFieldBegin('i_call', TType.I64, 2)
oprot.writeI64(self.i_call)
oprot.writeFieldEnd()
if self.i_did is not None:
oprot.writeFieldBegin('i_did', TType.I64, 3)
oprot.writeI64(self.i_did)
oprot.writeFieldEnd()
if self.did is not None:
oprot.writeFieldBegin('did', TType.STRING, 4)
oprot.writeString(self.did.encode('utf-8') if sys.version_info[0] == 2 else self.did)
oprot.writeFieldEnd()
if self.result is not None:
oprot.writeFieldBegin('result', TType.I32, 5)
oprot.writeI32(self.result)
oprot.writeFieldEnd()
if self.cost is not None:
oprot.writeFieldBegin('cost', TType.DOUBLE, 6)
oprot.writeDouble(self.cost)
oprot.writeFieldEnd()
if self.duration is not None:
oprot.writeFieldBegin('duration', TType.DOUBLE, 7)
oprot.writeDouble(self.duration)
oprot.writeFieldEnd()
if self.billed_duration is not None:
oprot.writeFieldBegin('billed_duration', TType.DOUBLE, 8)
oprot.writeDouble(self.billed_duration)
oprot.writeFieldEnd()
if self.setup_time is not None:
oprot.writeFieldBegin('setup_time', TType.I64, 9)
oprot.writeI64(self.setup_time)
oprot.writeFieldEnd()
if self.connect_time is not None:
oprot.writeFieldBegin('connect_time', TType.I64, 10)
oprot.writeI64(self.connect_time)
oprot.writeFieldEnd()
if self.disconnect_time is not None:
oprot.writeFieldBegin('disconnect_time', TType.I64, 11)
oprot.writeI64(self.disconnect_time)
oprot.writeFieldEnd()
if self.price_1 is not None:
oprot.writeFieldBegin('price_1', TType.DOUBLE, 12)
oprot.writeDouble(self.price_1)
oprot.writeFieldEnd()
if self.price_n is not None:
oprot.writeFieldBegin('price_n', TType.DOUBLE, 13)
oprot.writeDouble(self.price_n)
oprot.writeFieldEnd()
if self.interval_1 is not None:
oprot.writeFieldBegin('interval_1', TType.I32, 14)
oprot.writeI32(self.interval_1)
oprot.writeFieldEnd()
if self.interval_n is not None:
oprot.writeFieldBegin('interval_n', TType.I32, 15)
oprot.writeI32(self.interval_n)
oprot.writeFieldEnd()
if self.post_call_surcharge is not None:
oprot.writeFieldBegin('post_call_surcharge', TType.DOUBLE, 16)
oprot.writeDouble(self.post_call_surcharge)
oprot.writeFieldEnd()
if self.connect_fee is not None:
oprot.writeFieldBegin('connect_fee', TType.DOUBLE, 17)
oprot.writeDouble(self.connect_fee)
oprot.writeFieldEnd()
if self.free_seconds is not None:
oprot.writeFieldBegin('free_seconds', TType.I32, 18)
oprot.writeI32(self.free_seconds)
oprot.writeFieldEnd()
if self.grace_period is not None:
oprot.writeFieldBegin('grace_period', TType.I32, 19)
oprot.writeI32(self.grace_period)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class CdrsConnectionsDids(object):
"""
Attributes:
- i_cdrs_connections_did
- i_call
- i_did_authorization
- did
- incoming_did
- i_connection
- result
- cost
- duration
- billed_duration
- setup_time
- connect_time
- disconnect_time
- price_1
- price_n
- interval_1
- interval_n
- post_call_surcharge
- connect_fee
- free_seconds
- grace_period
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'i_cdrs_connections_did', None, None, ), # 1
(2, TType.I64, 'i_call', None, None, ), # 2
(3, TType.I64, 'i_did_authorization', None, None, ), # 3
(4, TType.STRING, 'did', 'UTF8', None, ), # 4
(5, TType.STRING, 'incoming_did', 'UTF8', None, ), # 5
(6, TType.I64, 'i_connection', None, None, ), # 6
(7, TType.I32, 'result', None, None, ), # 7
(8, TType.DOUBLE, 'cost', None, None, ), # 8
(9, TType.DOUBLE, 'duration', None, None, ), # 9
(10, TType.DOUBLE, 'billed_duration', None, None, ), # 10
(11, TType.I64, 'setup_time', None, None, ), # 11
(12, TType.I64, 'connect_time', None, None, ), # 12
(13, TType.I64, 'disconnect_time', None, None, ), # 13
(14, TType.DOUBLE, 'price_1', None, None, ), # 14
(15, TType.DOUBLE, 'price_n', None, None, ), # 15
(16, TType.I32, 'interval_1', None, None, ), # 16
(17, TType.I32, 'interval_n', None, None, ), # 17
(18, TType.DOUBLE, 'post_call_surcharge', None, None, ), # 18
(19, TType.DOUBLE, 'connect_fee', None, None, ), # 19
(20, TType.I32, 'free_seconds', None, None, ), # 20
(21, TType.I32, 'grace_period', None, None, ), # 21
)
def __init__(self, i_cdrs_connections_did=None, i_call=None, i_did_authorization=None, did=None, incoming_did=None, i_connection=None, result=None, cost=None, duration=None, billed_duration=None, setup_time=None, connect_time=None, disconnect_time=None, price_1=None, price_n=None, interval_1=None, interval_n=None, post_call_surcharge=None, connect_fee=None, free_seconds=None, grace_period=None,):
self.i_cdrs_connections_did = i_cdrs_connections_did
self.i_call = i_call
self.i_did_authorization = i_did_authorization
self.did = did
self.incoming_did = incoming_did
self.i_connection = i_connection
self.result = result
self.cost = cost
self.duration = duration
self.billed_duration = billed_duration
self.setup_time = setup_time
self.connect_time = connect_time
self.disconnect_time = disconnect_time
self.price_1 = price_1
self.price_n = price_n
self.interval_1 = interval_1
self.interval_n = interval_n
self.post_call_surcharge = post_call_surcharge
self.connect_fee = connect_fee
self.free_seconds = free_seconds
self.grace_period = grace_period
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec), iprot.string_length_limit, iprot.container_length_limit)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.i_cdrs_connections_did = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I64:
self.i_call = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I64:
self.i_did_authorization = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.did = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRING:
self.incoming_did = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.I64:
self.i_connection = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.I32:
self.result = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 8:
if ftype == TType.DOUBLE:
self.cost = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 9:
if ftype == TType.DOUBLE:
self.duration = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 10:
if ftype == TType.DOUBLE:
self.billed_duration = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 11:
if ftype == TType.I64:
self.setup_time = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 12:
if ftype == TType.I64:
self.connect_time = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 13:
if ftype == TType.I64:
self.disconnect_time = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 14:
if ftype == TType.DOUBLE:
self.price_1 = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 15:
if ftype == TType.DOUBLE:
self.price_n = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 16:
if ftype == TType.I32:
self.interval_1 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 17:
if ftype == TType.I32:
self.interval_n = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 18:
if ftype == TType.DOUBLE:
self.post_call_surcharge = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 19:
if ftype == TType.DOUBLE:
self.connect_fee = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 20:
if ftype == TType.I32:
self.free_seconds = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 21:
if ftype == TType.I32:
self.grace_period = iprot.readI32()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('CdrsConnectionsDids')
if self.i_cdrs_connections_did is not None:
oprot.writeFieldBegin('i_cdrs_connections_did', TType.I64, 1)
oprot.writeI64(self.i_cdrs_connections_did)
oprot.writeFieldEnd()
if self.i_call is not None:
oprot.writeFieldBegin('i_call', TType.I64, 2)
oprot.writeI64(self.i_call)
oprot.writeFieldEnd()
if self.i_did_authorization is not None:
oprot.writeFieldBegin('i_did_authorization', TType.I64, 3)
oprot.writeI64(self.i_did_authorization)
oprot.writeFieldEnd()
if self.did is not None:
oprot.writeFieldBegin('did', TType.STRING, 4)
oprot.writeString(self.did.encode('utf-8') if sys.version_info[0] == 2 else self.did)
oprot.writeFieldEnd()
if self.incoming_did is not None:
oprot.writeFieldBegin('incoming_did', TType.STRING, 5)
oprot.writeString(self.incoming_did.encode('utf-8') if sys.version_info[0] == 2 else self.incoming_did)
oprot.writeFieldEnd()
if self.i_connection is not None:
oprot.writeFieldBegin('i_connection', TType.I64, 6)
oprot.writeI64(self.i_connection)
oprot.writeFieldEnd()
if self.result is not None:
oprot.writeFieldBegin('result', TType.I32, 7)
oprot.writeI32(self.result)
oprot.writeFieldEnd()
if self.cost is not None:
oprot.writeFieldBegin('cost', TType.DOUBLE, 8)
oprot.writeDouble(self.cost)
oprot.writeFieldEnd()
if self.duration is not None:
oprot.writeFieldBegin('duration', TType.DOUBLE, 9)
oprot.writeDouble(self.duration)
oprot.writeFieldEnd()
if self.billed_duration is not None:
oprot.writeFieldBegin('billed_duration', TType.DOUBLE, 10)
oprot.writeDouble(self.billed_duration)
oprot.writeFieldEnd()
if self.setup_time is not None:
oprot.writeFieldBegin('setup_time', TType.I64, 11)
oprot.writeI64(self.setup_time)
oprot.writeFieldEnd()
if self.connect_time is not None:
oprot.writeFieldBegin('connect_time', TType.I64, 12)
oprot.writeI64(self.connect_time)
oprot.writeFieldEnd()
if self.disconnect_time is not None:
oprot.writeFieldBegin('disconnect_time', TType.I64, 13)
oprot.writeI64(self.disconnect_time)
oprot.writeFieldEnd()
if self.price_1 is not None:
oprot.writeFieldBegin('price_1', TType.DOUBLE, 14)
oprot.writeDouble(self.price_1)
oprot.writeFieldEnd()
if self.price_n is not None:
oprot.writeFieldBegin('price_n', TType.DOUBLE, 15)
oprot.writeDouble(self.price_n)
oprot.writeFieldEnd()
if self.interval_1 is not None:
oprot.writeFieldBegin('interval_1', TType.I32, 16)
oprot.writeI32(self.interval_1)
oprot.writeFieldEnd()
if self.interval_n is not None:
oprot.writeFieldBegin('interval_n', TType.I32, 17)
oprot.writeI32(self.interval_n)
oprot.writeFieldEnd()
if self.post_call_surcharge is not None:
oprot.writeFieldBegin('post_call_surcharge', TType.DOUBLE, 18)
oprot.writeDouble(self.post_call_surcharge)
oprot.writeFieldEnd()
if self.connect_fee is not None:
oprot.writeFieldBegin('connect_fee', TType.DOUBLE, 19)
oprot.writeDouble(self.connect_fee)
oprot.writeFieldEnd()
if self.free_seconds is not None:
oprot.writeFieldBegin('free_seconds', TType.I32, 20)
oprot.writeI32(self.free_seconds)
oprot.writeFieldEnd()
if self.grace_period is not None:
oprot.writeFieldBegin('grace_period', TType.I32, 21)
oprot.writeI32(self.grace_period)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class Surcharges(object):
"""
Attributes:
- i_surcharge
- i_call
- cost
- i_surcharge_type
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'i_surcharge', None, None, ), # 1
(2, TType.I64, 'i_call', None, None, ), # 2
(3, TType.DOUBLE, 'cost', None, None, ), # 3
(4, TType.I64, 'i_surcharge_type', None, None, ), # 4
)
def __init__(self, i_surcharge=None, i_call=None, cost=None, i_surcharge_type=None,):
self.i_surcharge = i_surcharge
self.i_call = i_call
self.cost = cost
self.i_surcharge_type = i_surcharge_type
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec), iprot.string_length_limit, iprot.container_length_limit)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.i_surcharge = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I64:
self.i_call = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.DOUBLE:
self.cost = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I64:
self.i_surcharge_type = iprot.readI64()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('Surcharges')
if self.i_surcharge is not None:
oprot.writeFieldBegin('i_surcharge', TType.I64, 1)
oprot.writeI64(self.i_surcharge)
oprot.writeFieldEnd()
if self.i_call is not None:
oprot.writeFieldBegin('i_call', TType.I64, 2)
oprot.writeI64(self.i_call)
oprot.writeFieldEnd()
if self.cost is not None:
oprot.writeFieldBegin('cost', TType.DOUBLE, 3)
oprot.writeDouble(self.cost)
oprot.writeFieldEnd()
if self.i_surcharge_type is not None:
oprot.writeFieldBegin('i_surcharge_type', TType.I64, 4)
oprot.writeI64(self.i_surcharge_type)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class Commissions(object):
"""
Attributes:
- i_commission
- i_account
- i_customer
- i_cdrs_customer
- commission_size
- setup_time
- i_call
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'i_commission', None, None, ), # 1
(2, TType.STRUCT, 'i_account', (NullInt64, NullInt64.thrift_spec), None, ), # 2
(3, TType.STRUCT, 'i_customer', (NullInt64, NullInt64.thrift_spec), None, ), # 3
(4, TType.I64, 'i_cdrs_customer', None, None, ), # 4
(5, TType.DOUBLE, 'commission_size', None, None, ), # 5
(6, TType.I64, 'setup_time', None, None, ), # 6
(7, TType.I64, 'i_call', None, None, ), # 7
)
def __init__(self, i_commission=None, i_account=None, i_customer=None, i_cdrs_customer=None, commission_size=None, setup_time=None, i_call=None,):
self.i_commission = i_commission
self.i_account = i_account
self.i_customer = i_customer
self.i_cdrs_customer = i_cdrs_customer
self.commission_size = commission_size
self.setup_time = setup_time
self.i_call = i_call
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec), iprot.string_length_limit, iprot.container_length_limit)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.i_commission = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.i_account = NullInt64()
self.i_account.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.i_customer = NullInt64()
self.i_customer.read(iprot)
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I64:
self.i_cdrs_customer = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.DOUBLE:
self.commission_size = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.I64:
self.setup_time = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.I64:
self.i_call = iprot.readI64()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('Commissions')
if self.i_commission is not None:
oprot.writeFieldBegin('i_commission', TType.I64, 1)
oprot.writeI64(self.i_commission)
oprot.writeFieldEnd()
if self.i_account is not None:
oprot.writeFieldBegin('i_account', TType.STRUCT, 2)
self.i_account.write(oprot)
oprot.writeFieldEnd()
if self.i_customer is not None:
oprot.writeFieldBegin('i_customer', TType.STRUCT, 3)
self.i_customer.write(oprot)
oprot.writeFieldEnd()
if self.i_cdrs_customer is not None:
oprot.writeFieldBegin('i_cdrs_customer', TType.I64, 4)
oprot.writeI64(self.i_cdrs_customer)
oprot.writeFieldEnd()
if self.commission_size is not None:
oprot.writeFieldBegin('commission_size', TType.DOUBLE, 5)
oprot.writeDouble(self.commission_size)
oprot.writeFieldEnd()
if self.setup_time is not None:
oprot.writeFieldBegin('setup_time', TType.I64, 6)
oprot.writeI64(self.setup_time)
oprot.writeFieldEnd()
if self.i_call is not None:
oprot.writeFieldBegin('i_call', TType.I64, 7)
oprot.writeI64(self.i_call)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class CallsSdp(object):
"""
Attributes:
- i_calls_sdp
- i_call
- i_cdrs_connection
- time_stamp
- sdp
- sip_msg_type
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'i_calls_sdp', None, None, ), # 1
(2, TType.I64, 'i_call', None, None, ), # 2
(3, TType.STRUCT, 'i_cdrs_connection', (NullInt64, NullInt64.thrift_spec), None, ), # 3
(4, TType.STRUCT, 'time_stamp', (UnixTime, UnixTime.thrift_spec), None, ), # 4
(5, TType.STRING, 'sdp', 'UTF8', None, ), # 5
(6, TType.STRING, 'sip_msg_type', 'UTF8', None, ), # 6
)
def __init__(self, i_calls_sdp=None, i_call=None, i_cdrs_connection=None, time_stamp=None, sdp=None, sip_msg_type=None,):
self.i_calls_sdp = i_calls_sdp
self.i_call = i_call
self.i_cdrs_connection = i_cdrs_connection
self.time_stamp = time_stamp
self.sdp = sdp
self.sip_msg_type = sip_msg_type
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec), iprot.string_length_limit, iprot.container_length_limit)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.i_calls_sdp = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I64:
self.i_call = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.i_cdrs_connection = NullInt64()
self.i_cdrs_connection.read(iprot)
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRUCT:
self.time_stamp = UnixTime()
self.time_stamp.read(iprot)
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRING:
self.sdp = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.STRING:
self.sip_msg_type = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('CallsSdp')
if self.i_calls_sdp is not None:
oprot.writeFieldBegin('i_calls_sdp', TType.I64, 1)
oprot.writeI64(self.i_calls_sdp)
oprot.writeFieldEnd()
if self.i_call is not None:
oprot.writeFieldBegin('i_call', TType.I64, 2)
oprot.writeI64(self.i_call)
oprot.writeFieldEnd()
if self.i_cdrs_connection is not None:
oprot.writeFieldBegin('i_cdrs_connection', TType.STRUCT, 3)
self.i_cdrs_connection.write(oprot)
oprot.writeFieldEnd()
if self.time_stamp is not None:
oprot.writeFieldBegin('time_stamp', TType.STRUCT, 4)
self.time_stamp.write(oprot)
oprot.writeFieldEnd()
if self.sdp is not None:
oprot.writeFieldBegin('sdp', TType.STRING, 5)
oprot.writeString(self.sdp.encode('utf-8') if sys.version_info[0] == 2 else self.sdp)
oprot.writeFieldEnd()
if self.sip_msg_type is not None:
oprot.writeFieldBegin('sip_msg_type', TType.STRING, 6)
oprot.writeString(self.sip_msg_type.encode('utf-8') if sys.version_info[0] == 2 else self.sip_msg_type)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class CdrsCustomersDids(object):
"""
Attributes:
- i_cdrs_customers_did
- i_call
- i_customer
- i_did
- did
- result
- cost
- duration
- billed_duration
- setup_time
- connect_time
- disconnect_time
- price_1
- price_n
- interval_1
- interval_n
- post_call_surcharge
- connect_fee
- free_seconds
- grace_period
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'i_cdrs_customers_did', None, None, ), # 1
(2, TType.I64, 'i_call', None, None, ), # 2
(3, TType.I64, 'i_customer', None, None, ), # 3
(4, TType.I64, 'i_did', None, None, ), # 4
(5, TType.STRING, 'did', 'UTF8', None, ), # 5
(6, TType.I32, 'result', None, None, ), # 6
(7, TType.DOUBLE, 'cost', None, None, ), # 7
(8, TType.DOUBLE, 'duration', None, None, ), # 8
(9, TType.DOUBLE, 'billed_duration', None, None, ), # 9
(10, TType.I64, 'setup_time', None, None, ), # 10
(11, TType.I64, 'connect_time', None, None, ), # 11
(12, TType.I64, 'disconnect_time', None, None, ), # 12
(13, TType.DOUBLE, 'price_1', None, None, ), # 13
(14, TType.DOUBLE, 'price_n', None, None, ), # 14
(15, TType.I32, 'interval_1', None, None, ), # 15
(16, TType.I32, 'interval_n', None, None, ), # 16
(17, TType.DOUBLE, 'post_call_surcharge', None, None, ), # 17
(18, TType.DOUBLE, 'connect_fee', None, None, ), # 18
(19, TType.I32, 'free_seconds', None, None, ), # 19
(20, TType.I32, 'grace_period', None, None, ), # 20
)
def __init__(self, i_cdrs_customers_did=None, i_call=None, i_customer=None, i_did=None, did=None, result=None, cost=None, duration=None, billed_duration=None, setup_time=None, connect_time=None, disconnect_time=None, price_1=None, price_n=None, interval_1=None, interval_n=None, post_call_surcharge=None, connect_fee=None, free_seconds=None, grace_period=None,):
self.i_cdrs_customers_did = i_cdrs_customers_did
self.i_call = i_call
self.i_customer = i_customer
self.i_did = i_did
self.did = did
self.result = result
self.cost = cost
self.duration = duration
self.billed_duration = billed_duration
self.setup_time = setup_time
self.connect_time = connect_time
self.disconnect_time = disconnect_time
self.price_1 = price_1
self.price_n = price_n
self.interval_1 = interval_1
self.interval_n = interval_n
self.post_call_surcharge = post_call_surcharge
self.connect_fee = connect_fee
self.free_seconds = free_seconds
self.grace_period = grace_period
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec), iprot.string_length_limit, iprot.container_length_limit)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.i_cdrs_customers_did = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I64:
self.i_call = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I64:
self.i_customer = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I64:
self.i_did = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRING:
self.did = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.I32:
self.result = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.DOUBLE:
self.cost = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 8:
if ftype == TType.DOUBLE:
self.duration = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 9:
if ftype == TType.DOUBLE:
self.billed_duration = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 10:
if ftype == TType.I64:
self.setup_time = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 11:
if ftype == TType.I64:
self.connect_time = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 12:
if ftype == TType.I64:
self.disconnect_time = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 13:
if ftype == TType.DOUBLE:
self.price_1 = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 14:
if ftype == TType.DOUBLE:
self.price_n = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 15:
if ftype == TType.I32:
self.interval_1 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 16:
if ftype == TType.I32:
self.interval_n = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 17:
if ftype == TType.DOUBLE:
self.post_call_surcharge = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 18:
if ftype == TType.DOUBLE:
self.connect_fee = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 19:
if ftype == TType.I32:
self.free_seconds = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 20:
if ftype == TType.I32:
self.grace_period = iprot.readI32()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('CdrsCustomersDids')
if self.i_cdrs_customers_did is not None:
oprot.writeFieldBegin('i_cdrs_customers_did', TType.I64, 1)
oprot.writeI64(self.i_cdrs_customers_did)
oprot.writeFieldEnd()
if self.i_call is not None:
oprot.writeFieldBegin('i_call', TType.I64, 2)
oprot.writeI64(self.i_call)
oprot.writeFieldEnd()
if self.i_customer is not None:
oprot.writeFieldBegin('i_customer', TType.I64, 3)
oprot.writeI64(self.i_customer)
oprot.writeFieldEnd()
if self.i_did is not None:
oprot.writeFieldBegin('i_did', TType.I64, 4)
oprot.writeI64(self.i_did)
oprot.writeFieldEnd()
if self.did is not None:
oprot.writeFieldBegin('did', TType.STRING, 5)
oprot.writeString(self.did.encode('utf-8') if sys.version_info[0] == 2 else self.did)
oprot.writeFieldEnd()
if self.result is not None:
oprot.writeFieldBegin('result', TType.I32, 6)
oprot.writeI32(self.result)
oprot.writeFieldEnd()
if self.cost is not None:
oprot.writeFieldBegin('cost', TType.DOUBLE, 7)
oprot.writeDouble(self.cost)
oprot.writeFieldEnd()
if self.duration is not None:
oprot.writeFieldBegin('duration', TType.DOUBLE, 8)
oprot.writeDouble(self.duration)
oprot.writeFieldEnd()
if self.billed_duration is not None:
oprot.writeFieldBegin('billed_duration', TType.DOUBLE, 9)
oprot.writeDouble(self.billed_duration)
oprot.writeFieldEnd()
if self.setup_time is not None:
oprot.writeFieldBegin('setup_time', TType.I64, 10)
oprot.writeI64(self.setup_time)
oprot.writeFieldEnd()
if self.connect_time is not None:
oprot.writeFieldBegin('connect_time', TType.I64, 11)
oprot.writeI64(self.connect_time)
oprot.writeFieldEnd()
if self.disconnect_time is not None:
oprot.writeFieldBegin('disconnect_time', TType.I64, 12)
oprot.writeI64(self.disconnect_time)
oprot.writeFieldEnd()
if self.price_1 is not None:
oprot.writeFieldBegin('price_1', TType.DOUBLE, 13)
oprot.writeDouble(self.price_1)
oprot.writeFieldEnd()
if self.price_n is not None:
oprot.writeFieldBegin('price_n', TType.DOUBLE, 14)
oprot.writeDouble(self.price_n)
oprot.writeFieldEnd()
if self.interval_1 is not None:
oprot.writeFieldBegin('interval_1', TType.I32, 15)
oprot.writeI32(self.interval_1)
oprot.writeFieldEnd()
if self.interval_n is not None:
oprot.writeFieldBegin('interval_n', TType.I32, 16)
oprot.writeI32(self.interval_n)
oprot.writeFieldEnd()
if self.post_call_surcharge is not None:
oprot.writeFieldBegin('post_call_surcharge', TType.DOUBLE, 17)
oprot.writeDouble(self.post_call_surcharge)
oprot.writeFieldEnd()
if self.connect_fee is not None:
oprot.writeFieldBegin('connect_fee', TType.DOUBLE, 18)
oprot.writeDouble(self.connect_fee)
oprot.writeFieldEnd()
if self.free_seconds is not None:
oprot.writeFieldBegin('free_seconds', TType.I32, 19)
oprot.writeI32(self.free_seconds)
oprot.writeFieldEnd()
if self.grace_period is not None:
oprot.writeFieldBegin('grace_period', TType.I32, 20)
oprot.writeI32(self.grace_period)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class UpdateAccountBalanceMessage(object):
"""
Attributes:
- i_account
- delta
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'i_account', None, None, ), # 1
(2, TType.DOUBLE, 'delta', None, None, ), # 2
)
def __init__(self, i_account=None, delta=None,):
self.i_account = i_account
self.delta = delta
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec), iprot.string_length_limit, iprot.container_length_limit)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.i_account = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.DOUBLE:
self.delta = iprot.readDouble()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('UpdateAccountBalanceMessage')
if self.i_account is not None:
oprot.writeFieldBegin('i_account', TType.I64, 1)
oprot.writeI64(self.i_account)
oprot.writeFieldEnd()
if self.delta is not None:
oprot.writeFieldBegin('delta', TType.DOUBLE, 2)
oprot.writeDouble(self.delta)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class UpdateCustomerBalanceMessage(object):
"""
Attributes:
- i_customer
- delta
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'i_customer', None, None, ), # 1
(2, TType.DOUBLE, 'delta', None, None, ), # 2
)
def __init__(self, i_customer=None, delta=None,):
self.i_customer = i_customer
self.delta = delta
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec), iprot.string_length_limit, iprot.container_length_limit)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.i_customer = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.DOUBLE:
self.delta = iprot.readDouble()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('UpdateCustomerBalanceMessage')
if self.i_customer is not None:
oprot.writeFieldBegin('i_customer', TType.I64, 1)
oprot.writeI64(self.i_customer)
oprot.writeFieldEnd()
if self.delta is not None:
oprot.writeFieldBegin('delta', TType.DOUBLE, 2)
oprot.writeDouble(self.delta)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class UpdateVendorBalanceMessage(object):
"""
Attributes:
- i_vendor
- delta
- i_connection
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'i_vendor', None, None, ), # 1
(2, TType.DOUBLE, 'delta', None, None, ), # 2
(3, TType.I64, 'i_connection', None, None, ), # 3
)
def __init__(self, i_vendor=None, delta=None, i_connection=None,):
self.i_vendor = i_vendor
self.delta = delta
self.i_connection = i_connection
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec), iprot.string_length_limit, iprot.container_length_limit)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.i_vendor = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.DOUBLE:
self.delta = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I64:
self.i_connection = iprot.readI64()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('UpdateVendorBalanceMessage')
if self.i_vendor is not None:
oprot.writeFieldBegin('i_vendor', TType.I64, 1)
oprot.writeI64(self.i_vendor)
oprot.writeFieldEnd()
if self.delta is not None:
oprot.writeFieldBegin('delta', TType.DOUBLE, 2)
oprot.writeDouble(self.delta)
oprot.writeFieldEnd()
if self.i_connection is not None:
oprot.writeFieldBegin('i_connection', TType.I64, 3)
oprot.writeI64(self.i_connection)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class UpdatePlanMinutesMessage(object):
"""
Attributes:
- i_account
- i_service_plan
- delta
- chargeable_seconds
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'i_account', None, None, ), # 1
(2, TType.I64, 'i_service_plan', None, None, ), # 2
(3, TType.DOUBLE, 'delta', None, None, ), # 3
(4, TType.DOUBLE, 'chargeable_seconds', None, None, ), # 4
)
def __init__(self, i_account=None, i_service_plan=None, delta=None, chargeable_seconds=None,):
self.i_account = i_account
self.i_service_plan = i_service_plan
self.delta = delta
self.chargeable_seconds = chargeable_seconds
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec), iprot.string_length_limit, iprot.container_length_limit)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.i_account = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I64:
self.i_service_plan = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.DOUBLE:
self.delta = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.DOUBLE:
self.chargeable_seconds = iprot.readDouble()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('UpdatePlanMinutesMessage')
if self.i_account is not None:
oprot.writeFieldBegin('i_account', TType.I64, 1)
oprot.writeI64(self.i_account)
oprot.writeFieldEnd()
if self.i_service_plan is not None:
oprot.writeFieldBegin('i_service_plan', TType.I64, 2)
oprot.writeI64(self.i_service_plan)
oprot.writeFieldEnd()
if self.delta is not None:
oprot.writeFieldBegin('delta', TType.DOUBLE, 3)
oprot.writeDouble(self.delta)
oprot.writeFieldEnd()
if self.chargeable_seconds is not None:
oprot.writeFieldBegin('chargeable_seconds', TType.DOUBLE, 4)
oprot.writeDouble(self.chargeable_seconds)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class ConnectionQualityStats(object):
"""
Attributes:
- i_connection_quality_stats
- i_connection
- tstamp
- asr
- acd
- action
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'i_connection_quality_stats', None, None, ), # 1
(2, TType.I64, 'i_connection', None, None, ), # 2
(3, TType.I64, 'tstamp', None, None, ), # 3
(4, TType.DOUBLE, 'asr', None, None, ), # 4
(5, TType.I32, 'acd', None, None, ), # 5
(6, TType.STRING, 'action', 'UTF8', None, ), # 6
)
def __init__(self, i_connection_quality_stats=None, i_connection=None, tstamp=None, asr=None, acd=None, action=None,):
self.i_connection_quality_stats = i_connection_quality_stats
self.i_connection = i_connection
self.tstamp = tstamp
self.asr = asr
self.acd = acd
self.action = action
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec), iprot.string_length_limit, iprot.container_length_limit)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.i_connection_quality_stats = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I64:
self.i_connection = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I64:
self.tstamp = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.DOUBLE:
self.asr = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.I32:
self.acd = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.STRING:
self.action = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('ConnectionQualityStats')
if self.i_connection_quality_stats is not None:
oprot.writeFieldBegin('i_connection_quality_stats', TType.I64, 1)
oprot.writeI64(self.i_connection_quality_stats)
oprot.writeFieldEnd()
if self.i_connection is not None:
oprot.writeFieldBegin('i_connection', TType.I64, 2)
oprot.writeI64(self.i_connection)
oprot.writeFieldEnd()
if self.tstamp is not None:
oprot.writeFieldBegin('tstamp', TType.I64, 3)
oprot.writeI64(self.tstamp)
oprot.writeFieldEnd()
if self.asr is not None:
oprot.writeFieldBegin('asr', TType.DOUBLE, 4)
oprot.writeDouble(self.asr)
oprot.writeFieldEnd()
if self.acd is not None:
oprot.writeFieldBegin('acd', TType.I32, 5)
oprot.writeI32(self.acd)
oprot.writeFieldEnd()
if self.action is not None:
oprot.writeFieldBegin('action', TType.STRING, 6)
oprot.writeString(self.action.encode('utf-8') if sys.version_info[0] == 2 else self.action)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class RegisterError(TException):
"""
Attributes:
- cause
- i_call
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'cause', None, None, ), # 1
(2, TType.I64, 'i_call', None, None, ), # 2
)
def __init__(self, cause=None, i_call=None,):
self.cause = cause
self.i_call = i_call
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec), iprot.string_length_limit, iprot.container_length_limit)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.cause = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I64:
self.i_call = iprot.readI64()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('RegisterError')
if self.cause is not None:
oprot.writeFieldBegin('cause', TType.I32, 1)
oprot.writeI32(self.cause)
oprot.writeFieldEnd()
if self.i_call is not None:
oprot.writeFieldBegin('i_call', TType.I64, 2)
oprot.writeI64(self.i_call)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __str__(self):
return repr(self)
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TryBackupError(TException):
thrift_spec = (
)
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec), iprot.string_length_limit, iprot.container_length_limit)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TryBackupError')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __str__(self):
return repr(self)
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class EagainError(TException):
thrift_spec = (
)
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec), iprot.string_length_limit, iprot.container_length_limit)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('EagainError')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __str__(self):
return repr(self)
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class Billables(object):
"""
Attributes:
- free_seconds
- connect_fee
- price_1
- price_n
- interval_1
- interval_n
- post_call_surcharge
- grace_period
- prefix
- decimal_precision
- cost_round_up
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'free_seconds', None, None, ), # 1
(2, TType.DOUBLE, 'connect_fee', None, None, ), # 2
(3, TType.DOUBLE, 'price_1', None, None, ), # 3
(4, TType.DOUBLE, 'price_n', None, None, ), # 4
(5, TType.I32, 'interval_1', None, None, ), # 5
(6, TType.I32, 'interval_n', None, None, ), # 6
(7, TType.DOUBLE, 'post_call_surcharge', None, None, ), # 7
(8, TType.I32, 'grace_period', None, None, ), # 8
(9, TType.STRING, 'prefix', 'UTF8', None, ), # 9
(10, TType.I32, 'decimal_precision', None, None, ), # 10
(11, TType.BOOL, 'cost_round_up', None, None, ), # 11
)
def __init__(self, free_seconds=None, connect_fee=None, price_1=None, price_n=None, interval_1=None, interval_n=None, post_call_surcharge=None, grace_period=None, prefix=None, decimal_precision=None, cost_round_up=None,):
self.free_seconds = free_seconds
self.connect_fee = connect_fee
self.price_1 = price_1
self.price_n = price_n
self.interval_1 = interval_1
self.interval_n = interval_n
self.post_call_surcharge = post_call_surcharge
self.grace_period = grace_period
self.prefix = prefix
self.decimal_precision = decimal_precision
self.cost_round_up = cost_round_up
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec), iprot.string_length_limit, iprot.container_length_limit)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.free_seconds = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.DOUBLE:
self.connect_fee = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.DOUBLE:
self.price_1 = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.DOUBLE:
self.price_n = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.I32:
self.interval_1 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.I32:
self.interval_n = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.DOUBLE:
self.post_call_surcharge = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 8:
if ftype == TType.I32:
self.grace_period = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 9:
if ftype == TType.STRING:
self.prefix = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 10:
if ftype == TType.I32:
self.decimal_precision = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 11:
if ftype == TType.BOOL:
self.cost_round_up = iprot.readBool()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('Billables')
if self.free_seconds is not None:
oprot.writeFieldBegin('free_seconds', TType.I64, 1)
oprot.writeI64(self.free_seconds)
oprot.writeFieldEnd()
if self.connect_fee is not None:
oprot.writeFieldBegin('connect_fee', TType.DOUBLE, 2)
oprot.writeDouble(self.connect_fee)
oprot.writeFieldEnd()
if self.price_1 is not None:
oprot.writeFieldBegin('price_1', TType.DOUBLE, 3)
oprot.writeDouble(self.price_1)
oprot.writeFieldEnd()
if self.price_n is not None:
oprot.writeFieldBegin('price_n', TType.DOUBLE, 4)
oprot.writeDouble(self.price_n)
oprot.writeFieldEnd()
if self.interval_1 is not None:
oprot.writeFieldBegin('interval_1', TType.I32, 5)
oprot.writeI32(self.interval_1)
oprot.writeFieldEnd()
if self.interval_n is not None:
oprot.writeFieldBegin('interval_n', TType.I32, 6)
oprot.writeI32(self.interval_n)
oprot.writeFieldEnd()
if self.post_call_surcharge is not None:
oprot.writeFieldBegin('post_call_surcharge', TType.DOUBLE, 7)
oprot.writeDouble(self.post_call_surcharge)
oprot.writeFieldEnd()
if self.grace_period is not None:
oprot.writeFieldBegin('grace_period', TType.I32, 8)
oprot.writeI32(self.grace_period)
oprot.writeFieldEnd()
if self.prefix is not None:
oprot.writeFieldBegin('prefix', TType.STRING, 9)
oprot.writeString(self.prefix.encode('utf-8') if sys.version_info[0] == 2 else self.prefix)
oprot.writeFieldEnd()
if self.decimal_precision is not None:
oprot.writeFieldBegin('decimal_precision', TType.I32, 10)
oprot.writeI32(self.decimal_precision)
oprot.writeFieldEnd()
if self.cost_round_up is not None:
oprot.writeFieldBegin('cost_round_up', TType.BOOL, 11)
oprot.writeBool(self.cost_round_up)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class AccountBillables(object):
"""
Attributes:
- bparams
- area_name
- i_commission_agent
- commission_size
- i_wholesaler
- fresh_balance
- plan_only
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'bparams', (Billables, Billables.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'area_name', (NullString, NullString.thrift_spec), None, ), # 2
(3, TType.STRUCT, 'i_commission_agent', (NullInt64, NullInt64.thrift_spec), None, ), # 3
(4, TType.DOUBLE, 'commission_size', None, None, ), # 4
(5, TType.I64, 'i_wholesaler', None, None, ), # 5
(6, TType.DOUBLE, 'fresh_balance', None, None, ), # 6
(7, TType.BOOL, 'plan_only', None, None, ), # 7
)
def __init__(self, bparams=None, area_name=None, i_commission_agent=None, commission_size=None, i_wholesaler=None, fresh_balance=None, plan_only=None,):
self.bparams = bparams
self.area_name = area_name
self.i_commission_agent = i_commission_agent
self.commission_size = commission_size
self.i_wholesaler = i_wholesaler
self.fresh_balance = fresh_balance
self.plan_only = plan_only
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec), iprot.string_length_limit, iprot.container_length_limit)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.bparams = Billables()
self.bparams.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.area_name = NullString()
self.area_name.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.i_commission_agent = NullInt64()
self.i_commission_agent.read(iprot)
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.DOUBLE:
self.commission_size = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.I64:
self.i_wholesaler = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.DOUBLE:
self.fresh_balance = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.BOOL:
self.plan_only = iprot.readBool()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('AccountBillables')
if self.bparams is not None:
oprot.writeFieldBegin('bparams', TType.STRUCT, 1)
self.bparams.write(oprot)
oprot.writeFieldEnd()
if self.area_name is not None:
oprot.writeFieldBegin('area_name', TType.STRUCT, 2)
self.area_name.write(oprot)
oprot.writeFieldEnd()
if self.i_commission_agent is not None:
oprot.writeFieldBegin('i_commission_agent', TType.STRUCT, 3)
self.i_commission_agent.write(oprot)
oprot.writeFieldEnd()
if self.commission_size is not None:
oprot.writeFieldBegin('commission_size', TType.DOUBLE, 4)
oprot.writeDouble(self.commission_size)
oprot.writeFieldEnd()
if self.i_wholesaler is not None:
oprot.writeFieldBegin('i_wholesaler', TType.I64, 5)
oprot.writeI64(self.i_wholesaler)
oprot.writeFieldEnd()
if self.fresh_balance is not None:
oprot.writeFieldBegin('fresh_balance', TType.DOUBLE, 6)
oprot.writeDouble(self.fresh_balance)
oprot.writeFieldEnd()
if self.plan_only is not None:
oprot.writeFieldBegin('plan_only', TType.BOOL, 7)
oprot.writeBool(self.plan_only)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class CustomerBillables(object):
"""
Attributes:
- bparams
- area_name
- i_commission_agent
- commission_size
- i_customer
- i_wholesaler
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'bparams', (Billables, Billables.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'area_name', (NullString, NullString.thrift_spec), None, ), # 2
(3, TType.STRUCT, 'i_commission_agent', (NullInt64, NullInt64.thrift_spec), None, ), # 3
(4, TType.DOUBLE, 'commission_size', None, None, ), # 4
(5, TType.I64, 'i_customer', None, None, ), # 5
(6, TType.I64, 'i_wholesaler', None, None, ), # 6
)
def __init__(self, bparams=None, area_name=None, i_commission_agent=None, commission_size=None, i_customer=None, i_wholesaler=None,):
self.bparams = bparams
self.area_name = area_name
self.i_commission_agent = i_commission_agent
self.commission_size = commission_size
self.i_customer = i_customer
self.i_wholesaler = i_wholesaler
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec), iprot.string_length_limit, iprot.container_length_limit)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.bparams = Billables()
self.bparams.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.area_name = NullString()
self.area_name.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.i_commission_agent = NullInt64()
self.i_commission_agent.read(iprot)
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.DOUBLE:
self.commission_size = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.I64:
self.i_customer = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.I64:
self.i_wholesaler = iprot.readI64()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('CustomerBillables')
if self.bparams is not None:
oprot.writeFieldBegin('bparams', TType.STRUCT, 1)
self.bparams.write(oprot)
oprot.writeFieldEnd()
if self.area_name is not None:
oprot.writeFieldBegin('area_name', TType.STRUCT, 2)
self.area_name.write(oprot)
oprot.writeFieldEnd()
if self.i_commission_agent is not None:
oprot.writeFieldBegin('i_commission_agent', TType.STRUCT, 3)
self.i_commission_agent.write(oprot)
oprot.writeFieldEnd()
if self.commission_size is not None:
oprot.writeFieldBegin('commission_size', TType.DOUBLE, 4)
oprot.writeDouble(self.commission_size)
oprot.writeFieldEnd()
if self.i_customer is not None:
oprot.writeFieldBegin('i_customer', TType.I64, 5)
oprot.writeI64(self.i_customer)
oprot.writeFieldEnd()
if self.i_wholesaler is not None:
oprot.writeFieldBegin('i_wholesaler', TType.I64, 6)
oprot.writeI64(self.i_wholesaler)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class DidBillables(object):
"""
Attributes:
- bparams
- i_did
- did
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'bparams', (Billables, Billables.thrift_spec), None, ), # 1
(2, TType.I64, 'i_did', None, None, ), # 2
(3, TType.STRING, 'did', 'UTF8', None, ), # 3
)
def __init__(self, bparams=None, i_did=None, did=None,):
self.bparams = bparams
self.i_did = i_did
self.did = did
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec), iprot.string_length_limit, iprot.container_length_limit)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.bparams = Billables()
self.bparams.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I64:
self.i_did = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.did = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('DidBillables')
if self.bparams is not None:
oprot.writeFieldBegin('bparams', TType.STRUCT, 1)
self.bparams.write(oprot)
oprot.writeFieldEnd()
if self.i_did is not None:
oprot.writeFieldBegin('i_did', TType.I64, 2)
oprot.writeI64(self.i_did)
oprot.writeFieldEnd()
if self.did is not None:
oprot.writeFieldBegin('did', TType.STRING, 3)
oprot.writeString(self.did.encode('utf-8') if sys.version_info[0] == 2 else self.did)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class BuyingDidBillables(object):
"""
Attributes:
- bparams
- did
- i_connection
- i_did_authorization
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'bparams', (Billables, Billables.thrift_spec), None, ), # 1
(2, TType.STRING, 'did', 'UTF8', None, ), # 2
(3, TType.I64, 'i_connection', None, None, ), # 3
(4, TType.I64, 'i_did_authorization', None, None, ), # 4
)
def __init__(self, bparams=None, did=None, i_connection=None, i_did_authorization=None,):
self.bparams = bparams
self.did = did
self.i_connection = i_connection
self.i_did_authorization = i_did_authorization
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec), iprot.string_length_limit, iprot.container_length_limit)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.bparams = Billables()
self.bparams.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.did = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I64:
self.i_connection = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I64:
self.i_did_authorization = iprot.readI64()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('BuyingDidBillables')
if self.bparams is not None:
oprot.writeFieldBegin('bparams', TType.STRUCT, 1)
self.bparams.write(oprot)
oprot.writeFieldEnd()
if self.did is not None:
oprot.writeFieldBegin('did', TType.STRING, 2)
oprot.writeString(self.did.encode('utf-8') if sys.version_info[0] == 2 else self.did)
oprot.writeFieldEnd()
if self.i_connection is not None:
oprot.writeFieldBegin('i_connection', TType.I64, 3)
oprot.writeI64(self.i_connection)
oprot.writeFieldEnd()
if self.i_did_authorization is not None:
oprot.writeFieldBegin('i_did_authorization', TType.I64, 4)
oprot.writeI64(self.i_did_authorization)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class CustomerDidBillables(object):
"""
Attributes:
- bparams
- did
- i_customer
- i_did
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'bparams', (Billables, Billables.thrift_spec), None, ), # 1
(2, TType.STRING, 'did', 'UTF8', None, ), # 2
(3, TType.I64, 'i_customer', None, None, ), # 3
(4, TType.I64, 'i_did', None, None, ), # 4
)
def __init__(self, bparams=None, did=None, i_customer=None, i_did=None,):
self.bparams = bparams
self.did = did
self.i_customer = i_customer
self.i_did = i_did
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec), iprot.string_length_limit, iprot.container_length_limit)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.bparams = Billables()
self.bparams.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.did = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I64:
self.i_customer = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I64:
self.i_did = iprot.readI64()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('CustomerDidBillables')
if self.bparams is not None:
oprot.writeFieldBegin('bparams', TType.STRUCT, 1)
self.bparams.write(oprot)
oprot.writeFieldEnd()
if self.did is not None:
oprot.writeFieldBegin('did', TType.STRING, 2)
oprot.writeString(self.did.encode('utf-8') if sys.version_info[0] == 2 else self.did)
oprot.writeFieldEnd()
if self.i_customer is not None:
oprot.writeFieldBegin('i_customer', TType.I64, 3)
oprot.writeI64(self.i_customer)
oprot.writeFieldEnd()
if self.i_did is not None:
oprot.writeFieldBegin('i_did', TType.I64, 4)
oprot.writeI64(self.i_did)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class CreditTimes(object):
"""
Attributes:
- crtime_acct
- crtime_ext
- rtime
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'crtime_acct', (MonoTime, MonoTime.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'crtime_ext', (MonoTime, MonoTime.thrift_spec), None, ), # 2
(3, TType.STRUCT, 'rtime', (MonoTime, MonoTime.thrift_spec), None, ), # 3
)
def __init__(self, crtime_acct=None, crtime_ext=None, rtime=None,):
self.crtime_acct = crtime_acct
self.crtime_ext = crtime_ext
self.rtime = rtime
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec), iprot.string_length_limit, iprot.container_length_limit)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.crtime_acct = MonoTime()
self.crtime_acct.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.crtime_ext = MonoTime()
self.crtime_ext.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.rtime = MonoTime()
self.rtime.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('CreditTimes')
if self.crtime_acct is not None:
oprot.writeFieldBegin('crtime_acct', TType.STRUCT, 1)
self.crtime_acct.write(oprot)
oprot.writeFieldEnd()
if self.crtime_ext is not None:
oprot.writeFieldBegin('crtime_ext', TType.STRUCT, 2)
self.crtime_ext.write(oprot)
oprot.writeFieldEnd()
if self.rtime is not None:
oprot.writeFieldBegin('rtime', TType.STRUCT, 3)
self.rtime.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class Duration(object):
"""
Attributes:
- nanoseconds
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'nanoseconds', None, None, ), # 1
)
def __init__(self, nanoseconds=None,):
self.nanoseconds = nanoseconds
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec), iprot.string_length_limit, iprot.container_length_limit)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.nanoseconds = iprot.readI64()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('Duration')
if self.nanoseconds is not None:
oprot.writeFieldBegin('nanoseconds', TType.I64, 1)
oprot.writeI64(self.nanoseconds)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class Tariff(object):
"""
Attributes:
- post_call_surcharge
- connect_fee
- name
- i_tariff
- free_seconds
- i_owner
- iso_4217
- grace_period
- max_loss
- average_duration
- loss_protection
- local_calling
- local_calling_cli_validation_rule
- last_change_count
- local_id
- remote_id
- is_remote
- is_exportable
- decimal_precision
- cost_round_up
"""
thrift_spec = (
None, # 0
(1, TType.DOUBLE, 'post_call_surcharge', None, None, ), # 1
(2, TType.DOUBLE, 'connect_fee', None, None, ), # 2
(3, TType.STRUCT, 'name', (NullString, NullString.thrift_spec), None, ), # 3
(4, TType.I64, 'i_tariff', None, None, ), # 4
(5, TType.I32, 'free_seconds', None, None, ), # 5
(6, TType.I64, 'i_owner', None, None, ), # 6
(7, TType.STRING, 'iso_4217', 'UTF8', None, ), # 7
(8, TType.I32, 'grace_period', None, None, ), # 8
(9, TType.DOUBLE, 'max_loss', None, None, ), # 9
(10, TType.I32, 'average_duration', None, None, ), # 10
(11, TType.BOOL, 'loss_protection', None, None, ), # 11
(12, TType.BOOL, 'local_calling', None, None, ), # 12
(13, TType.STRING, 'local_calling_cli_validation_rule', 'UTF8', None, ), # 13
(14, TType.I64, 'last_change_count', None, None, ), # 14
(15, TType.STRUCT, 'local_id', (NullString, NullString.thrift_spec), None, ), # 15
(16, TType.STRUCT, 'remote_id', (NullString, NullString.thrift_spec), None, ), # 16
(17, TType.BOOL, 'is_remote', None, None, ), # 17
(18, TType.BOOL, 'is_exportable', None, None, ), # 18
(19, TType.I32, 'decimal_precision', None, None, ), # 19
(20, TType.BOOL, 'cost_round_up', None, None, ), # 20
)
def __init__(self, post_call_surcharge=None, connect_fee=None, name=None, i_tariff=None, free_seconds=None, i_owner=None, iso_4217=None, grace_period=None, max_loss=None, average_duration=None, loss_protection=None, local_calling=None, local_calling_cli_validation_rule=None, last_change_count=None, local_id=None, remote_id=None, is_remote=None, is_exportable=None, decimal_precision=None, cost_round_up=None,):
self.post_call_surcharge = post_call_surcharge
self.connect_fee = connect_fee
self.name = name
self.i_tariff = i_tariff
self.free_seconds = free_seconds
self.i_owner = i_owner
self.iso_4217 = iso_4217
self.grace_period = grace_period
self.max_loss = max_loss
self.average_duration = average_duration
self.loss_protection = loss_protection
self.local_calling = local_calling
self.local_calling_cli_validation_rule = local_calling_cli_validation_rule
self.last_change_count = last_change_count
self.local_id = local_id
self.remote_id = remote_id
self.is_remote = is_remote
self.is_exportable = is_exportable
self.decimal_precision = decimal_precision
self.cost_round_up = cost_round_up
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec), iprot.string_length_limit, iprot.container_length_limit)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.DOUBLE:
self.post_call_surcharge = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.DOUBLE:
self.connect_fee = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.name = NullString()
self.name.read(iprot)
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I64:
self.i_tariff = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.I32:
self.free_seconds = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.I64:
self.i_owner = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.STRING:
self.iso_4217 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 8:
if ftype == TType.I32:
self.grace_period = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 9:
if ftype == TType.DOUBLE:
self.max_loss = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 10:
if ftype == TType.I32:
self.average_duration = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 11:
if ftype == TType.BOOL:
self.loss_protection = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 12:
if ftype == TType.BOOL:
self.local_calling = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 13:
if ftype == TType.STRING:
self.local_calling_cli_validation_rule = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 14:
if ftype == TType.I64:
self.last_change_count = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 15:
if ftype == TType.STRUCT:
self.local_id = NullString()
self.local_id.read(iprot)
else:
iprot.skip(ftype)
elif fid == 16:
if ftype == TType.STRUCT:
self.remote_id = NullString()
self.remote_id.read(iprot)
else:
iprot.skip(ftype)
elif fid == 17:
if ftype == TType.BOOL:
self.is_remote = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 18:
if ftype == TType.BOOL:
self.is_exportable = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 19:
if ftype == TType.I32:
self.decimal_precision = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 20:
if ftype == TType.BOOL:
self.cost_round_up = iprot.readBool()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('Tariff')
if self.post_call_surcharge is not None:
oprot.writeFieldBegin('post_call_surcharge', TType.DOUBLE, 1)
oprot.writeDouble(self.post_call_surcharge)
oprot.writeFieldEnd()
if self.connect_fee is not None:
oprot.writeFieldBegin('connect_fee', TType.DOUBLE, 2)
oprot.writeDouble(self.connect_fee)
oprot.writeFieldEnd()
if self.name is not None:
oprot.writeFieldBegin('name', TType.STRUCT, 3)
self.name.write(oprot)
oprot.writeFieldEnd()
if self.i_tariff is not None:
oprot.writeFieldBegin('i_tariff', TType.I64, 4)
oprot.writeI64(self.i_tariff)
oprot.writeFieldEnd()
if self.free_seconds is not None:
oprot.writeFieldBegin('free_seconds', TType.I32, 5)
oprot.writeI32(self.free_seconds)
oprot.writeFieldEnd()
if self.i_owner is not None:
oprot.writeFieldBegin('i_owner', TType.I64, 6)
oprot.writeI64(self.i_owner)
oprot.writeFieldEnd()
if self.iso_4217 is not None:
oprot.writeFieldBegin('iso_4217', TType.STRING, 7)
oprot.writeString(self.iso_4217.encode('utf-8') if sys.version_info[0] == 2 else self.iso_4217)
oprot.writeFieldEnd()
if self.grace_period is not None:
oprot.writeFieldBegin('grace_period', TType.I32, 8)
oprot.writeI32(self.grace_period)
oprot.writeFieldEnd()
if self.max_loss is not None:
oprot.writeFieldBegin('max_loss', TType.DOUBLE, 9)
oprot.writeDouble(self.max_loss)
oprot.writeFieldEnd()
if self.average_duration is not None:
oprot.writeFieldBegin('average_duration', TType.I32, 10)
oprot.writeI32(self.average_duration)
oprot.writeFieldEnd()
if self.loss_protection is not None:
oprot.writeFieldBegin('loss_protection', TType.BOOL, 11)
oprot.writeBool(self.loss_protection)
oprot.writeFieldEnd()
if self.local_calling is not None:
oprot.writeFieldBegin('local_calling', TType.BOOL, 12)
oprot.writeBool(self.local_calling)
oprot.writeFieldEnd()
if self.local_calling_cli_validation_rule is not None:
oprot.writeFieldBegin('local_calling_cli_validation_rule', TType.STRING, 13)
oprot.writeString(self.local_calling_cli_validation_rule.encode('utf-8') if sys.version_info[0] == 2 else self.local_calling_cli_validation_rule)
oprot.writeFieldEnd()
if self.last_change_count is not None:
oprot.writeFieldBegin('last_change_count', TType.I64, 14)
oprot.writeI64(self.last_change_count)
oprot.writeFieldEnd()
if self.local_id is not None:
oprot.writeFieldBegin('local_id', TType.STRUCT, 15)
self.local_id.write(oprot)
oprot.writeFieldEnd()
if self.remote_id is not None:
oprot.writeFieldBegin('remote_id', TType.STRUCT, 16)
self.remote_id.write(oprot)
oprot.writeFieldEnd()
if self.is_remote is not None:
oprot.writeFieldBegin('is_remote', TType.BOOL, 17)
oprot.writeBool(self.is_remote)
oprot.writeFieldEnd()
if self.is_exportable is not None:
oprot.writeFieldBegin('is_exportable', TType.BOOL, 18)
oprot.writeBool(self.is_exportable)
oprot.writeFieldEnd()
if self.decimal_precision is not None:
oprot.writeFieldBegin('decimal_precision', TType.I32, 19)
oprot.writeI32(self.decimal_precision)
oprot.writeFieldEnd()
if self.cost_round_up is not None:
oprot.writeFieldBegin('cost_round_up', TType.BOOL, 20)
oprot.writeBool(self.cost_round_up)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TariffRate(object):
"""
Attributes:
- i_rate
- prefix
- i_tariff
- price_1
- price_n
- interval_1
- interval_n
- forbidden
- grace_period_enable
- local_price_1
- local_price_n
- local_interval_1
- local_interval_n
- area_name
- activation_date
- expiration_date
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'i_rate', None, None, ), # 1
(2, TType.STRING, 'prefix', 'UTF8', None, ), # 2
(3, TType.I64, 'i_tariff', None, None, ), # 3
(4, TType.DOUBLE, 'price_1', None, None, ), # 4
(5, TType.DOUBLE, 'price_n', None, None, ), # 5
(6, TType.I32, 'interval_1', None, None, ), # 6
(7, TType.I32, 'interval_n', None, None, ), # 7
(8, TType.BOOL, 'forbidden', None, None, ), # 8
(9, TType.BOOL, 'grace_period_enable', None, None, ), # 9
(10, TType.DOUBLE, 'local_price_1', None, None, ), # 10
(11, TType.DOUBLE, 'local_price_n', None, None, ), # 11
(12, TType.I32, 'local_interval_1', None, None, ), # 12
(13, TType.I32, 'local_interval_n', None, None, ), # 13
(14, TType.STRUCT, 'area_name', (NullString, NullString.thrift_spec), None, ), # 14
(15, TType.STRUCT, 'activation_date', (UnixTime, UnixTime.thrift_spec), None, ), # 15
(16, TType.STRUCT, 'expiration_date', (UnixTime, UnixTime.thrift_spec), None, ), # 16
)
def __init__(self, i_rate=None, prefix=None, i_tariff=None, price_1=None, price_n=None, interval_1=None, interval_n=None, forbidden=None, grace_period_enable=None, local_price_1=None, local_price_n=None, local_interval_1=None, local_interval_n=None, area_name=None, activation_date=None, expiration_date=None,):
self.i_rate = i_rate
self.prefix = prefix
self.i_tariff = i_tariff
self.price_1 = price_1
self.price_n = price_n
self.interval_1 = interval_1
self.interval_n = interval_n
self.forbidden = forbidden
self.grace_period_enable = grace_period_enable
self.local_price_1 = local_price_1
self.local_price_n = local_price_n
self.local_interval_1 = local_interval_1
self.local_interval_n = local_interval_n
self.area_name = area_name
self.activation_date = activation_date
self.expiration_date = expiration_date
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec), iprot.string_length_limit, iprot.container_length_limit)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.i_rate = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.prefix = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I64:
self.i_tariff = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.DOUBLE:
self.price_1 = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.DOUBLE:
self.price_n = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.I32:
self.interval_1 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.I32:
self.interval_n = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 8:
if ftype == TType.BOOL:
self.forbidden = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 9:
if ftype == TType.BOOL:
self.grace_period_enable = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 10:
if ftype == TType.DOUBLE:
self.local_price_1 = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 11:
if ftype == TType.DOUBLE:
self.local_price_n = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 12:
if ftype == TType.I32:
self.local_interval_1 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 13:
if ftype == TType.I32:
self.local_interval_n = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 14:
if ftype == TType.STRUCT:
self.area_name = NullString()
self.area_name.read(iprot)
else:
iprot.skip(ftype)
elif fid == 15:
if ftype == TType.STRUCT:
self.activation_date = UnixTime()
self.activation_date.read(iprot)
else:
iprot.skip(ftype)
elif fid == 16:
if ftype == TType.STRUCT:
self.expiration_date = UnixTime()
self.expiration_date.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TariffRate')
if self.i_rate is not None:
oprot.writeFieldBegin('i_rate', TType.I64, 1)
oprot.writeI64(self.i_rate)
oprot.writeFieldEnd()
if self.prefix is not None:
oprot.writeFieldBegin('prefix', TType.STRING, 2)
oprot.writeString(self.prefix.encode('utf-8') if sys.version_info[0] == 2 else self.prefix)
oprot.writeFieldEnd()
if self.i_tariff is not None:
oprot.writeFieldBegin('i_tariff', TType.I64, 3)
oprot.writeI64(self.i_tariff)
oprot.writeFieldEnd()
if self.price_1 is not None:
oprot.writeFieldBegin('price_1', TType.DOUBLE, 4)
oprot.writeDouble(self.price_1)
oprot.writeFieldEnd()
if self.price_n is not None:
oprot.writeFieldBegin('price_n', TType.DOUBLE, 5)
oprot.writeDouble(self.price_n)
oprot.writeFieldEnd()
if self.interval_1 is not None:
oprot.writeFieldBegin('interval_1', TType.I32, 6)
oprot.writeI32(self.interval_1)
oprot.writeFieldEnd()
if self.interval_n is not None:
oprot.writeFieldBegin('interval_n', TType.I32, 7)
oprot.writeI32(self.interval_n)
oprot.writeFieldEnd()
if self.forbidden is not None:
oprot.writeFieldBegin('forbidden', TType.BOOL, 8)
oprot.writeBool(self.forbidden)
oprot.writeFieldEnd()
if self.grace_period_enable is not None:
oprot.writeFieldBegin('grace_period_enable', TType.BOOL, 9)
oprot.writeBool(self.grace_period_enable)
oprot.writeFieldEnd()
if self.local_price_1 is not None:
oprot.writeFieldBegin('local_price_1', TType.DOUBLE, 10)
oprot.writeDouble(self.local_price_1)
oprot.writeFieldEnd()
if self.local_price_n is not None:
oprot.writeFieldBegin('local_price_n', TType.DOUBLE, 11)
oprot.writeDouble(self.local_price_n)
oprot.writeFieldEnd()
if self.local_interval_1 is not None:
oprot.writeFieldBegin('local_interval_1', TType.I32, 12)
oprot.writeI32(self.local_interval_1)
oprot.writeFieldEnd()
if self.local_interval_n is not None:
oprot.writeFieldBegin('local_interval_n', TType.I32, 13)
oprot.writeI32(self.local_interval_n)
oprot.writeFieldEnd()
if self.area_name is not None:
oprot.writeFieldBegin('area_name', TType.STRUCT, 14)
self.area_name.write(oprot)
oprot.writeFieldEnd()
if self.activation_date is not None:
oprot.writeFieldBegin('activation_date', TType.STRUCT, 15)
self.activation_date.write(oprot)
oprot.writeFieldEnd()
if self.expiration_date is not None:
oprot.writeFieldBegin('expiration_date', TType.STRUCT, 16)
self.expiration_date.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TariffRateList(object):
"""
Attributes:
- arr
"""
thrift_spec = (
None, # 0
(1, TType.LIST, 'arr', (TType.STRUCT, (TariffRate, TariffRate.thrift_spec), False), None, ), # 1
)
def __init__(self, arr=None,):
self.arr = arr
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec), iprot.string_length_limit, iprot.container_length_limit)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.arr = []
(_etype10, _size7) = iprot.readListBegin()
for _i11 in range(_size7):
_elem12 = TariffRate()
_elem12.read(iprot)
self.arr.append(_elem12)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TariffRateList')
if self.arr is not None:
oprot.writeFieldBegin('arr', TType.LIST, 1)
oprot.writeListBegin(TType.STRUCT, len(self.arr))
for iter13 in self.arr:
iter13.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class LocalTariffRate(object):
"""
Attributes:
- activation_date
- expiration_date
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'activation_date', (UnixTime, UnixTime.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'expiration_date', (UnixTime, UnixTime.thrift_spec), None, ), # 2
)
def __init__(self, activation_date=None, expiration_date=None,):
self.activation_date = activation_date
self.expiration_date = expiration_date
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec), iprot.string_length_limit, iprot.container_length_limit)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.activation_date = UnixTime()
self.activation_date.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.expiration_date = UnixTime()
self.expiration_date.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('LocalTariffRate')
if self.activation_date is not None:
oprot.writeFieldBegin('activation_date', TType.STRUCT, 1)
self.activation_date.write(oprot)
oprot.writeFieldEnd()
if self.expiration_date is not None:
oprot.writeFieldBegin('expiration_date', TType.STRUCT, 2)
self.expiration_date.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class LocalTariffRateList(object):
"""
Attributes:
- arr
"""
thrift_spec = (
None, # 0
(1, TType.LIST, 'arr', (TType.STRUCT, (LocalTariffRate, LocalTariffRate.thrift_spec), False), None, ), # 1
)
def __init__(self, arr=None,):
self.arr = arr
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec), iprot.string_length_limit, iprot.container_length_limit)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.arr = []
(_etype17, _size14) = iprot.readListBegin()
for _i18 in range(_size14):
_elem19 = LocalTariffRate()
_elem19.read(iprot)
self.arr.append(_elem19)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('LocalTariffRateList')
if self.arr is not None:
oprot.writeFieldBegin('arr', TType.LIST, 1)
oprot.writeListBegin(TType.STRUCT, len(self.arr))
for iter20 in self.arr:
iter20.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class LookupbparamResultEntry(object):
"""
Attributes:
- free_seconds
- connect_fee
- price_1
- price_n
- interval_1
- interval_n
- post_call_surcharge
- grace_period
- forbidden
- average_duration
- loss_protection
- max_loss
- prefix
- plan_only
- area_name
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'free_seconds', None, None, ), # 1
(2, TType.DOUBLE, 'connect_fee', None, None, ), # 2
(3, TType.DOUBLE, 'price_1', None, None, ), # 3
(4, TType.DOUBLE, 'price_n', None, None, ), # 4
(5, TType.I32, 'interval_1', None, None, ), # 5
(6, TType.I32, 'interval_n', None, None, ), # 6
(7, TType.DOUBLE, 'post_call_surcharge', None, None, ), # 7
(8, TType.I32, 'grace_period', None, None, ), # 8
(9, TType.BOOL, 'forbidden', None, None, ), # 9
(10, TType.I32, 'average_duration', None, None, ), # 10
(11, TType.BOOL, 'loss_protection', None, None, ), # 11
(12, TType.DOUBLE, 'max_loss', None, None, ), # 12
(13, TType.STRING, 'prefix', 'UTF8', None, ), # 13
(14, TType.BOOL, 'plan_only', None, None, ), # 14
(15, TType.STRUCT, 'area_name', (NullString, NullString.thrift_spec), None, ), # 15
)
def __init__(self, free_seconds=None, connect_fee=None, price_1=None, price_n=None, interval_1=None, interval_n=None, post_call_surcharge=None, grace_period=None, forbidden=None, average_duration=None, loss_protection=None, max_loss=None, prefix=None, plan_only=None, area_name=None,):
self.free_seconds = free_seconds
self.connect_fee = connect_fee
self.price_1 = price_1
self.price_n = price_n
self.interval_1 = interval_1
self.interval_n = interval_n
self.post_call_surcharge = post_call_surcharge
self.grace_period = grace_period
self.forbidden = forbidden
self.average_duration = average_duration
self.loss_protection = loss_protection
self.max_loss = max_loss
self.prefix = prefix
self.plan_only = plan_only
self.area_name = area_name
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec), iprot.string_length_limit, iprot.container_length_limit)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.free_seconds = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.DOUBLE:
self.connect_fee = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.DOUBLE:
self.price_1 = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.DOUBLE:
self.price_n = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.I32:
self.interval_1 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.I32:
self.interval_n = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.DOUBLE:
self.post_call_surcharge = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 8:
if ftype == TType.I32:
self.grace_period = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 9:
if ftype == TType.BOOL:
self.forbidden = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 10:
if ftype == TType.I32:
self.average_duration = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 11:
if ftype == TType.BOOL:
self.loss_protection = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 12:
if ftype == TType.DOUBLE:
self.max_loss = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 13:
if ftype == TType.STRING:
self.prefix = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 14:
if ftype == TType.BOOL:
self.plan_only = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 15:
if ftype == TType.STRUCT:
self.area_name = NullString()
self.area_name.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('LookupbparamResultEntry')
if self.free_seconds is not None:
oprot.writeFieldBegin('free_seconds', TType.I32, 1)
oprot.writeI32(self.free_seconds)
oprot.writeFieldEnd()
if self.connect_fee is not None:
oprot.writeFieldBegin('connect_fee', TType.DOUBLE, 2)
oprot.writeDouble(self.connect_fee)
oprot.writeFieldEnd()
if self.price_1 is not None:
oprot.writeFieldBegin('price_1', TType.DOUBLE, 3)
oprot.writeDouble(self.price_1)
oprot.writeFieldEnd()
if self.price_n is not None:
oprot.writeFieldBegin('price_n', TType.DOUBLE, 4)
oprot.writeDouble(self.price_n)
oprot.writeFieldEnd()
if self.interval_1 is not None:
oprot.writeFieldBegin('interval_1', TType.I32, 5)
oprot.writeI32(self.interval_1)
oprot.writeFieldEnd()
if self.interval_n is not None:
oprot.writeFieldBegin('interval_n', TType.I32, 6)
oprot.writeI32(self.interval_n)
oprot.writeFieldEnd()
if self.post_call_surcharge is not None:
oprot.writeFieldBegin('post_call_surcharge', TType.DOUBLE, 7)
oprot.writeDouble(self.post_call_surcharge)
oprot.writeFieldEnd()
if self.grace_period is not None:
oprot.writeFieldBegin('grace_period', TType.I32, 8)
oprot.writeI32(self.grace_period)
oprot.writeFieldEnd()
if self.forbidden is not None:
oprot.writeFieldBegin('forbidden', TType.BOOL, 9)
oprot.writeBool(self.forbidden)
oprot.writeFieldEnd()
if self.average_duration is not None:
oprot.writeFieldBegin('average_duration', TType.I32, 10)
oprot.writeI32(self.average_duration)
oprot.writeFieldEnd()
if self.loss_protection is not None:
oprot.writeFieldBegin('loss_protection', TType.BOOL, 11)
oprot.writeBool(self.loss_protection)
oprot.writeFieldEnd()
if self.max_loss is not None:
oprot.writeFieldBegin('max_loss', TType.DOUBLE, 12)
oprot.writeDouble(self.max_loss)
oprot.writeFieldEnd()
if self.prefix is not None:
oprot.writeFieldBegin('prefix', TType.STRING, 13)
oprot.writeString(self.prefix.encode('utf-8') if sys.version_info[0] == 2 else self.prefix)
oprot.writeFieldEnd()
if self.plan_only is not None:
oprot.writeFieldBegin('plan_only', TType.BOOL, 14)
oprot.writeBool(self.plan_only)
oprot.writeFieldEnd()
if self.area_name is not None:
oprot.writeFieldBegin('area_name', TType.STRUCT, 15)
self.area_name.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class LookupbparamResult(object):
"""
Attributes:
- bparams
- decimal_precision
- cost_round_up
"""
thrift_spec = (
None, # 0
(1, TType.LIST, 'bparams', (TType.STRUCT, (LookupbparamResultEntry, LookupbparamResultEntry.thrift_spec), False), None, ), # 1
(2, TType.I32, 'decimal_precision', None, None, ), # 2
(3, TType.BOOL, 'cost_round_up', None, None, ), # 3
)
def __init__(self, bparams=None, decimal_precision=None, cost_round_up=None,):
self.bparams = bparams
self.decimal_precision = decimal_precision
self.cost_round_up = cost_round_up
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec), iprot.string_length_limit, iprot.container_length_limit)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.bparams = []
(_etype24, _size21) = iprot.readListBegin()
for _i25 in range(_size21):
_elem26 = LookupbparamResultEntry()
_elem26.read(iprot)
self.bparams.append(_elem26)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.decimal_precision = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.BOOL:
self.cost_round_up = iprot.readBool()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('LookupbparamResult')
if self.bparams is not None:
oprot.writeFieldBegin('bparams', TType.LIST, 1)
oprot.writeListBegin(TType.STRUCT, len(self.bparams))
for iter27 in self.bparams:
iter27.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.decimal_precision is not None:
oprot.writeFieldBegin('decimal_precision', TType.I32, 2)
oprot.writeI32(self.decimal_precision)
oprot.writeFieldEnd()
if self.cost_round_up is not None:
oprot.writeFieldBegin('cost_round_up', TType.BOOL, 3)
oprot.writeBool(self.cost_round_up)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
| 34.373652 | 603 | 0.640874 | 193,746 | 0.996723 | 0 | 0 | 0 | 0 | 0 | 0 | 17,838 | 0.091767 |
52edcb7aea820f4031994c44df4c877102b60d05 | 385 | py | Python | Chapter 4 - Lists & Tuples/01_list.py | alex-dsouza777/Python-Basics | 8f1c406f2319cd65b5d54dfea990d09fa69d9adf | [
"MIT"
] | null | null | null | Chapter 4 - Lists & Tuples/01_list.py | alex-dsouza777/Python-Basics | 8f1c406f2319cd65b5d54dfea990d09fa69d9adf | [
"MIT"
] | null | null | null | Chapter 4 - Lists & Tuples/01_list.py | alex-dsouza777/Python-Basics | 8f1c406f2319cd65b5d54dfea990d09fa69d9adf | [
"MIT"
] | 1 | 2021-04-21T10:23:08.000Z | 2021-04-21T10:23:08.000Z | #Create a list using []
a = [1,2,3,7,66]
#print the list using print() function
print(a)
#Access using index using a[0], a[1], ....
print(a[2])
#Changing the value of the list
a[0] = 777
print(a)
#We can create a list with items of different type
b = [77,"Root",False,6.9]
print(b)
#List Slicing
friends = ["Root","Groot","Sam","Alex",99]
print(friends[0:3])
print(friends[-4:])
| 16.73913 | 50 | 0.649351 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 227 | 0.58961 |
52eed26c5719e163902c297e3bbd2c62a20d2246 | 3,596 | py | Python | lab_1/orthograph/errors1.py | etzinis/speech_nlp_labs | 128ea5a79a0502aa80fb2f4eee4af051049a9e0e | [
"MIT"
] | null | null | null | lab_1/orthograph/errors1.py | etzinis/speech_nlp_labs | 128ea5a79a0502aa80fb2f4eee4af051049a9e0e | [
"MIT"
] | null | null | null | lab_1/orthograph/errors1.py | etzinis/speech_nlp_labs | 128ea5a79a0502aa80fb2f4eee4af051049a9e0e | [
"MIT"
] | null | null | null | import math
fco = open('train_co.txt', 'r')
fwr =open('train_wr.txt', 'r')
fcoal = open('train_co_al.txt', 'w')
fwral =open('train_wr_al.txt', 'w')
#first we align the two files
colines = fco.readlines()
wrlines = fwr.readlines()
for i in colines:
cols = i.split()
for j in cols:
fcoal.write(j+'\n')
for i in wrlines:
cols = i.split()
for j in cols:
fwral.write(j+'\n')
#here we check only for 1 error per word
fcoal = open('train_co_al.txt', 'r')
fwral =open('train_wr_al.txt', 'r')
coword = fcoal.readlines()
wrword = fwral.readlines()
sub=0
swap=0
deletion=0
insertion=0
total=0
for i in range(len(coword)):
testword=wrword[i]
rightword=coword[i]
#CHECK FOR ONLY ONE SUBSTITUTION OR ONLY ONE SWAP BETWEEN LETTERS
if len(coword[i])==len(wrword[i]):
replaced=0
#here we check for replacements
for j in range(len(coword[i])):
if (rightword[j]!=testword[j]) and (replaced==0): #then we have only one substitution
replaced=1
elif (rightword[j]!=testword[j]) and (replaced==1): #then we have more than one substitutions
replaced=2
break
else:
continue
if replaced==1:
total+=1
sub+=1
#--------------------------------------------
#here we check for swaps
allagh=0
prevco=rightword[0]
prevte=testword[0]
for j in range(1,len(coword[i])):
if (rightword[j]==prevte) and (testword[j]==prevco) and (testword[j]!=rightword[j]) and (allagh==0):
allagh=1
prevco=rightword[j]
prevte=testword[j]
elif (rightword[j]==testword[j]):
prevco=rightword[j]
prevte=testword[j]
continue
else:
allagh=2
break
if allagh==1:
total+=1
swap+=1
#--------------------------------------------
#----------------------------------------------------------------------------------
#WE CHECK FOR ONLY ONE INSERTION
elif len(coword[i])==len(wrword[i])+1:
ins=0
jco=0
jte=0
while jco <len(coword[i]) and jte<len(testword):
if (rightword[jco]==testword[jte]):
jco+=1
jte+=1
continue
elif (rightword[jco]!=testword[jte]) and ins==0:
jco+=1
ins=1
continue
else:
ins=2
break
if ins==1:
total+=1
insertion+=1
#CHECK FOR ONLY ONE DELETION
elif len(coword[i])+1==len(wrword[i]):
de=0
jco=0
jte=0
while jco <len(coword[i]) and jte<len(testword):
if (rightword[jco]==testword[jte]):
jco+=1
jte+=1
continue
elif (rightword[jco]!=testword[jte]) and de==0:
jte+=1
de=1
continue
else:
de=2
break
if de==1:
total+=1
deletion+=1
costsub=-math.log10(sub/total)
costswap=-math.log10(swap/total)
costinsertion=-math.log10(insertion/total)
costdeletion=-math.log10(deletion/total)
print('\nCosts Computed from the training \n')
print('substitution',str(costsub))
print('swap',str(costswap))
print('insertion',str(costinsertion))
print('deletion',str(costdeletion))
print('\n')
#HERE WE CREATE THE TRANDUCER E
fe=open('orth_E.txt','w')
#alphabet='ΧΥ'
alphabet='ΕΡΤΥΘΙΟΠΑΣΔΦΓΗΞΚΛΖΧΨΩΒΝΜ'
cnt=2
fe.write('0 1 eps eps 0'+'\n') #we can have no error at all
for i in range(len(alphabet)):
fe.write('0 1 eps '+alphabet[i]+' '+str(costinsertion)+'\n')
fe.write('0 1 '+alphabet[i]+' eps '+str(costdeletion)+'\n')
for j in range(len(alphabet)):
if alphabet[i]!=alphabet[j]:
fe.write('0 1 '+alphabet[i]+' '+alphabet[j]+' '+str(costsub)+'\n')
for j in range(len(alphabet)):
if alphabet[i]!=alphabet[j]:
fe.write('0 '+str(cnt)+' '+alphabet[i]+' '+alphabet[j]+' '+str(costswap)+'\n')
fe.write(str(cnt)+' 1 '+alphabet[j]+' '+alphabet[i]+' '+str(0)+'\n')
cnt+=1
fe.write(str(1))
| 23.350649 | 103 | 0.611235 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 945 | 0.260906 |
52efbce325d7ba01d3a81e4caaaade2bb5824322 | 25 | py | Python | examples/basic/src/events/__init__.py | boosterl/rabbitmq-pika-flask | 45b98624c49c800c4e2bdaa9e1c04f1aa5863369 | [
"MIT"
] | 17 | 2021-04-04T17:24:06.000Z | 2022-03-11T13:22:01.000Z | examples/basic/src/events/__init__.py | boosterl/rabbitmq-pika-flask | 45b98624c49c800c4e2bdaa9e1c04f1aa5863369 | [
"MIT"
] | 10 | 2021-02-11T18:13:11.000Z | 2022-03-10T17:53:08.000Z | examples/basic/src/events/__init__.py | boosterl/rabbitmq-pika-flask | 45b98624c49c800c4e2bdaa9e1c04f1aa5863369 | [
"MIT"
] | 5 | 2021-04-04T17:23:46.000Z | 2021-12-11T07:49:10.000Z | __all__ = ['PingEvents']
| 12.5 | 24 | 0.68 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 0.48 |
52f61b19ff8770f512160af7fe3ad09b84f41c3c | 217 | py | Python | snowy_version_python2.py | cclauss/flake8_for_ow | f665197bf6a36e1a9b0f274b01032c9006338067 | [
"Apache-2.0"
] | null | null | null | snowy_version_python2.py | cclauss/flake8_for_ow | f665197bf6a36e1a9b0f274b01032c9006338067 | [
"Apache-2.0"
] | null | null | null | snowy_version_python2.py | cclauss/flake8_for_ow | f665197bf6a36e1a9b0f274b01032c9006338067 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python2
# coding: utf-8
import sys
def main():
s = ' '.join((u'❄ ☃ ❄', sys.version.split()[0], u'❄ ☃ ❄'))
print(type(s))
return {'snowy_version': s}
if __name__ == '__main__':
main()
| 16.692308 | 62 | 0.539171 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 93 | 0.406114 |
52f7087deb618b8761200bf76550f8eceae6e954 | 80 | py | Python | scripts/ai.py | MaxReimann/Troen | aeabab35da15fe69f89b163f3762dccb9947e93c | [
"MIT"
] | null | null | null | scripts/ai.py | MaxReimann/Troen | aeabab35da15fe69f89b163f3762dccb9947e93c | [
"MIT"
] | 6 | 2016-08-15T01:56:11.000Z | 2016-08-15T01:56:40.000Z | scripts/ai.py | MaxReimann/Troen | aeabab35da15fe69f89b163f3762dccb9947e93c | [
"MIT"
] | null | null | null | #AI is imported by default
#this is only a test for now
print "hello from aipy"
| 20 | 28 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 71 | 0.8875 |
52f8caa37db41e0bb16922cb2ce77c7b5e60682e | 542 | py | Python | answers/old/python/fizzBuzz.py | mschultz4/practice | 3c692953bffbfc6a9d362d880087ecfcec1b6bea | [
"MIT"
] | null | null | null | answers/old/python/fizzBuzz.py | mschultz4/practice | 3c692953bffbfc6a9d362d880087ecfcec1b6bea | [
"MIT"
] | null | null | null | answers/old/python/fizzBuzz.py | mschultz4/practice | 3c692953bffbfc6a9d362d880087ecfcec1b6bea | [
"MIT"
] | null | null | null | # Fizzbuzz.py
import sys
with open(sys.argv[1], 'r') as file:
for line in file:
nums = line.split()
i = int(nums[2])
n = 1
output = ""
while n <= i:
if n % int(nums[0]) == 0 and n % int(nums[1]) == 0:
output += "FB "
elif n % int(nums[0]) == 0:
output += "F "
elif n % int(nums[1]) == 0:
output += "B "
else:
output += (str(n) + " ")
n += 1
print(output.rstrip()) | 27.1 | 64 | 0.369004 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 34 | 0.062731 |
52fa77fc729c94ac81a3d83288fe663868091162 | 5,421 | py | Python | 2parser/word_lists.py | formalabstracts/CNL-CIC | c857ee0d52b4ba91dd06a51c8f9f3ec2749ca0eb | [
"MIT"
] | 14 | 2019-06-27T16:34:39.000Z | 2021-01-07T18:13:04.000Z | 2parser/word_lists.py | formalabstracts/CNL-CIC | c857ee0d52b4ba91dd06a51c8f9f3ec2749ca0eb | [
"MIT"
] | 8 | 2019-10-17T06:09:51.000Z | 2020-03-25T15:51:32.000Z | 2parser/word_lists.py | formalabstracts/CNL-CIC | c857ee0d52b4ba91dd06a51c8f9f3ec2749ca0eb | [
"MIT"
] | 17 | 2019-06-27T16:34:53.000Z | 2020-08-15T01:30:32.000Z | singular = [
'this','as','is','thesis','hypothesis','less','obvious','us','yes','cos',
'always','perhaps','alias','plus','apropos',
'was','its','bus','his','is','us',
'this','thus','axis','bias','minus','basis',
'praxis','status','modulus','analysis',
'aparatus'
]
invariable = [ #frozen_list - cannot be given a synonym
'a','an','all','and','any','are','as','assume','be','by',
'case','classifier',
'coercion','conjecture','contradiction','contrary','corollary','declare',
'def',
'define','defined','definition','denote','division','do','document',
'does','dump','each','else','end','enddivision','endsection',
'endsubdivision','endsubsection','endsubsubsection','equal',
'equation','error','enter','every','exhaustive','exist','exit',
'false','fix','fixed','for','forall','formula','fun','function','has','have',
'having','hence','holding','hypothesis','if','iff','in','inferring',
'indeed','induction','inductive','introduce','is','it','left','lemma',
'let','library','make','map','match','moreover','mutual','namespace',
'no','not','notational','notation',
'notationless','obvious','of','off','on','only','ontored','or','over',
'pairwise','parameter','precedence','predicate','printgoal',
'proof','prop','property','prove','proposition',
'propped','qed','quotient','read','record','register','recursion','right',
'said','say','section','show','some','stand','structure','subdivision',
'subsection','subsubsection','such','suppose','synonym','take','that',
'the','then','theorem','there','therefore','thesis','this','timelimit',
'to','total','trivial','true','type','unique','us',
'warning','we','well','welldefined','well_defined','well_propped',
'where','with','write','wrong','yes',
#(* plural handled by sing 'classifiers', 'exists','implement',
# 'parameters','properties','propositions','synonyms','types',
]
transition = [ #phrase_list_transition_words
'a basic fact is','accordingly','additionally','again','also','and yet','as a result',
'as usual','as we have seen','as we see','at the same time','besides','but',
'by definition','certainly','clearly','computations show','consequently',
'conversely','equally important','explicitly','finally','first','for example',
'for instance','for simplicity','for that reason','for this purpose','further',
'furthermore','generally','hence','here','however','importantly','in addition',
'in any event','in brief','in consequence','in contrast','in contrast to this',
'in each case','in fact','in general','in other words','in particular','in short',
'in sum','in summary','in the present case','in the same way','in this computation',
'in this sense','indeed','it follows','it is clear','it is enough to show',
'it is known','it is routine','it is trivial to see','it is understood',
'it turns out','last','likewise','more precisely','moreover','most importantly',
'neverthess','next','nonetheless','note',
'notice','now','observe','obviously','of course','on the contrary','on the other hand',
'on the whole','otherwise','second','similarly','so','specifically','still',
'that is','the point is','then','therefore','third','this gives','this implies',
'this means','this yields','thus','thus far','to begin with','to this end',
'trivially','we claim','we emphasize','we first show','we get','we have seen',
'we have','we know','we check','we may check','we obtain','we remark','we say','we see',
'we show','we understand','we write','recall','we recall',
'without loss of generality','yet'
]
preposition_list = [
'aboard','about','above','according to', 'across', 'against', 'ahead of',
'along','alongside','amid','amidst','among','around','at','atop','away from',
'before',
'behind','below','beneath','beside','between','beyond','by','concerning','despite',
'except','except at','excluding','following',
'from','in','in addition to','in place of','in regard to',
'inside','instead of','into','near','next to','of',
'off','on','on behalf of','on top of','onto','opposite','out','out of',
'outside','outside of',
'over','owing to','per','prior to','regarding','save','through',
'throughout','till','to','towards','under','until',
'up','up to','upon','with','with respect to','wrt','within','without'
# 'for', 'as', 'like', 'after', 'round', 'plus', 'since', 'than', 'past',
# 'during',
# synonyms with\~respect\~to/wrt
]
prim_list = [
'prim_classifier',
'prim_term_op_controlseq',
'prim_binary_relation_controlseq',
'prim_propositional_op_controlseq',
'prim_type_op_controlseq',
'prim_term_controlseq',
'prim_type_controlseq',
'prim_lambda_binder',
'prim_pi_binder',
'prim_binder_prop',
'prim_typed_name',
'prim_adjective',
'prim_adjective_multisubject',
'prim_simple_adjective',
'prim_simple_adjective_multisubject',
'prim_field_term_accessor',
'prim_field_type_accessor',
'prim_field_prop_accessor',
'prim_definite_noun',
'prim_identifier_term',
'prim_identifier_type',
'prim_possessed_noun',
'prim_verb',
'prim_verb_multisubject',
'prim_structure',
'prim_type_op',
'prim_type_word',
'prim_term_op',
'prim_binary_relation_op',
'prim_propositional_op',
'prim_relation'
]
| 47.13913 | 92 | 0.635307 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,445 | 0.819959 |
52faf9ee9dd8c96e16c79f2d90387099a923bd12 | 3,707 | py | Python | tests/test_cue.py | philipdexter/pycue | 07daeae58602a5d6d00c4bc7abb252da1644f529 | [
"Apache-2.0"
] | 6 | 2020-05-07T03:39:50.000Z | 2022-02-22T17:36:30.000Z | tests/test_cue.py | philipdexter/pycue | 07daeae58602a5d6d00c4bc7abb252da1644f529 | [
"Apache-2.0"
] | null | null | null | tests/test_cue.py | philipdexter/pycue | 07daeae58602a5d6d00c4bc7abb252da1644f529 | [
"Apache-2.0"
] | null | null | null |
import pytest
import cue
def test_basic():
cue.compile('')
assert '1' == str(cue.compile('1'))
assert ['1', '2', '3', '{\n\ta: 1\n}'] == [str(v) for v in cue.compile('[1,2,3,{a:1}]')]
assert [('a', '1'), ('b', '2')] == [(str(k), str(v)) for k, v in cue.compile('{a: 1, b: 2}')]
with pytest.raises(cue.CueError):
cue.compile('a')
v1 = cue.compile('{a: 1}')
v2 = cue.compile('{a: 2}')
v3 = cue.compile('{a: <3}')
assert False == v1.unifies_with(v2)
assert True == v1.unifies_with(v3)
assert True == v1.unifies_with(v3)
assert True == v2.unifies_with(v3)
with pytest.raises(ValueError):
iter(cue.compile('1'))
assert True == cue.compile('null').is_null()
assert True == cue.compile('true').is_bool()
assert True == cue.compile('1').is_int()
assert True == cue.compile('1.0').is_float()
assert True == cue.compile(r"'\x03abc'").is_bytes()
assert True == cue.compile('"hi"').is_string()
assert True == cue.compile('{a:1}').is_struct()
assert True == cue.compile('[1,2]').is_list()
assert 1 == cue.compile('1').to_int()
assert 2 == int(cue.compile('2'))
with pytest.raises(ValueError):
assert 1 == cue.compile('"hi"').to_int()
assert 9223372036854775807 == int(cue.compile("9223372036854775807"))
with pytest.raises(cue.CueError):
assert 9223372036854775808 == int(cue.compile("9223372036854775808"))
assert -9223372036854775807 == int(cue.compile('-9223372036854775807 '))
with pytest.raises(cue.CueError):
assert -9223372036854775808 == int(cue.compile('-9223372036854775808 '))
assert 1.0 == cue.compile('1.0').to_float()
assert 2.0 == float(cue.compile('2.0'))
with pytest.raises(ValueError):
assert 1.0 == cue.compile('"hi"').to_int()
assert 4.9 == float(cue.compile('1 + 3.9'))
assert True == cue.compile('true').to_bool()
assert False == bool(cue.compile('false && true'))
with pytest.raises(ValueError):
assert True == cue.compile('"hi"').to_int()
assert "ok" == cue.compile('"ok"').to_string()
assert '"okk"' == str(cue.compile('"ok" + "k"'))
with pytest.raises(ValueError):
assert "ok" == cue.compile('1').to_string()
assert {'a': 1, 'b': [{'c': 1}]} == cue.compile('{a: 1, b: [{c: 1}]}').to_dict()
assert {} == cue.compile('').to_dict()
with pytest.raises(ValueError):
assert {} == cue.compile('1').to_dict()
assert [1,2,{'a':2,'b':{'c':2}}] == cue.compile('[1,2,{a:2,b:{c:2}}]').to_list()
with pytest.raises(ValueError):
assert [] == cue.compile('1').to_list()
assert True == cue.compile('true').to_python()
assert 1 == cue.compile('1').to_python()
assert 1.0 == cue.compile('1.0').to_python()
assert "hi" == cue.compile('"hi"').to_python()
with pytest.raises(ValueError):
cue.compile('a: int').to_python()
with pytest.raises(ValueError):
cue.compile('a: <3').to_python()
def test_dumps():
assert '1.0' == cue.dumps(1.0)
assert '1' == cue.dumps(1)
assert 'true' == cue.dumps(True)
assert '"true"' == cue.dumps("true")
assert '[1,2,3]' == cue.dumps([1,2,3])
assert '[{a:1},{b:2},{c:[2,"hi"]}]' == cue.dumps([{'a':1},{'b':2},{'c':[2,'hi']}])
def test_loads():
assert 1.0 == cue.loads('1.0')
assert 1 == cue.loads('1')
assert True == cue.loads('true')
assert "true" == cue.loads('"true"')
assert [1,2,3] == cue.loads('[1,2,3]')
assert [{'a':1},{'b':2},{'c':[2,'hi']}] == cue.loads('[{a:1},{b:2},{c:[2,"hi"]}]')
def test_dumps_loads():
ps = [
(1.0, '1.0'),
(1, '1'),
(True, 'true'),
("true", '"true"'),
([1,2,3], '[1,2,3]'),
([{'a':1},{'b':2},{'c':[2,'hi']}], '[{a:1},{b:2},{c:[2,"hi"]}]'),
]
for p, s in ps:
assert p == cue.loads(cue.dumps(p))
assert s == cue.dumps(cue.loads(s))
| 34.971698 | 95 | 0.582951 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 684 | 0.184516 |
52fb85c29047f791c52471a045a458308ca562b8 | 11,924 | py | Python | control/tests/xferfcn_input_test.py | samlaf/python-control | c49b55b1f8dfe8e74c562d6a83bf5359343cdccb | [
"BSD-3-Clause"
] | 1 | 2019-04-13T02:54:56.000Z | 2019-04-13T02:54:56.000Z | control/tests/xferfcn_input_test.py | samlaf/python-control | c49b55b1f8dfe8e74c562d6a83bf5359343cdccb | [
"BSD-3-Clause"
] | 1 | 2018-01-06T18:35:50.000Z | 2018-01-06T19:02:25.000Z | control/tests/xferfcn_input_test.py | samlaf/python-control | c49b55b1f8dfe8e74c562d6a83bf5359343cdccb | [
"BSD-3-Clause"
] | 1 | 2019-05-27T16:11:18.000Z | 2019-05-27T16:11:18.000Z | #!/usr/bin/env python
#
# xferfcn_input_test.py - test inputs to TransferFunction class
# jed-frey, 18 Feb 2017 (based on xferfcn_test.py)
import unittest
import numpy as np
from numpy import int, int8, int16, int32, int64
from numpy import float, float16, float32, float64, longdouble
from numpy import all, ndarray, array
from control.xferfcn import _clean_part
class TestXferFcnInput(unittest.TestCase):
"""These are tests for functionality of cleaning and validating XferFcnInput."""
# Tests for raising exceptions.
def test_clean_part_bad_input_type(self):
"""Give the part cleaner invalid input type."""
self.assertRaises(TypeError, _clean_part, [[0., 1.], [2., 3.]])
def test_clean_part_bad_input_type2(self):
"""Give the part cleaner another invalid input type."""
self.assertRaises(TypeError, _clean_part, [1, "a"])
def test_clean_part_scalar(self):
"""Test single scalar value."""
num = 1
num_ = _clean_part(num)
assert isinstance(num_, list)
assert np.all([isinstance(part, list) for part in num_])
np.testing.assert_array_equal(num_[0][0], array([1.0], dtype=float))
def test_clean_part_list_scalar(self):
"""Test single scalar value in list."""
num = [1]
num_ = _clean_part(num)
assert isinstance(num_, list)
assert np.all([isinstance(part, list) for part in num_])
np.testing.assert_array_equal(num_[0][0], array([1.0], dtype=float))
def test_clean_part_tuple_scalar(self):
"""Test single scalar value in tuple."""
num = (1)
num_ = _clean_part(num)
assert isinstance(num_, list)
assert np.all([isinstance(part, list) for part in num_])
np.testing.assert_array_equal(num_[0][0], array([1.0], dtype=float))
def test_clean_part_list(self):
"""Test multiple values in a list."""
num = [1, 2]
num_ = _clean_part(num)
assert isinstance(num_, list)
assert np.all([isinstance(part, list) for part in num_])
np.testing.assert_array_equal(num_[0][0], array([1.0, 2.0], dtype=float))
def test_clean_part_tuple(self):
"""Test multiple values in tuple."""
num = (1, 2)
num_ = _clean_part(num)
assert isinstance(num_, list)
assert np.all([isinstance(part, list) for part in num_])
np.testing.assert_array_equal(num_[0][0], array([1.0, 2.0], dtype=float))
def test_clean_part_all_scalar_types(self):
"""Test single scalar value for all valid data types."""
for dtype in [int, int8, int16, int32, int64, float, float16, float32, float64, longdouble]:
num = dtype(1)
num_ = _clean_part(num)
assert isinstance(num_, list)
assert np.all([isinstance(part, list) for part in num_])
np.testing.assert_array_equal(num_[0][0], array([1.0], dtype=float))
def test_clean_part_np_array(self):
"""Test multiple values in numpy array."""
num = np.array([1, 2])
num_ = _clean_part(num)
assert isinstance(num_, list)
assert np.all([isinstance(part, list) for part in num_])
np.testing.assert_array_equal(num_[0][0], array([1.0, 2.0], dtype=float))
def test_clean_part_all_np_array_types(self):
"""Test scalar value in numpy array of ndim=0 for all data types."""
for dtype in [int, int8, int16, int32, int64, float, float16, float32, float64, longdouble]:
num = np.array(1, dtype=dtype)
num_ = _clean_part(num)
assert isinstance(num_, list)
assert np.all([isinstance(part, list) for part in num_])
np.testing.assert_array_equal(num_[0][0], array([1.0], dtype=float))
def test_clean_part_all_np_array_types2(self):
"""Test numpy array for all types."""
for dtype in [int, int8, int16, int32, int64, float, float16, float32, float64, longdouble]:
num = np.array([1, 2], dtype=dtype)
num_ = _clean_part(num)
assert isinstance(num_, list)
assert np.all([isinstance(part, list) for part in num_])
np.testing.assert_array_equal(num_[0][0], array([1.0, 2.0], dtype=float))
def test_clean_part_list_all_types(self):
"""Test list of a single value for all data types."""
for dtype in [int, int8, int16, int32, int64, float, float16, float32, float64, longdouble]:
num = [dtype(1)]
num_ = _clean_part(num)
assert isinstance(num_, list)
assert np.all([isinstance(part, list) for part in num_])
np.testing.assert_array_equal(num_[0][0], array([1.0], dtype=float))
def test_clean_part_list_all_types2(self):
"""List of list of numbers of all data types."""
for dtype in [int, int8, int16, int32, int64, float, float16, float32, float64, longdouble]:
num = [dtype(1), dtype(2)]
num_ = _clean_part(num)
assert isinstance(num_, list)
assert np.all([isinstance(part, list) for part in num_])
np.testing.assert_array_equal(num_[0][0], array([1.0, 2.0], dtype=float))
def test_clean_part_tuple_all_types(self):
"""Test tuple of a single value for all data types."""
for dtype in [int, int8, int16, int32, int64, float, float16, float32, float64, longdouble]:
num = (dtype(1),)
num_ = _clean_part(num)
assert isinstance(num_, list)
assert np.all([isinstance(part, list) for part in num_])
np.testing.assert_array_equal(num_[0][0], array([1.0], dtype=float))
def test_clean_part_tuple_all_types2(self):
"""Test tuple of a single value for all data types."""
for dtype in [int, int8, int16, int32, int64, float, float16, float32, float64, longdouble]:
num = (dtype(1), dtype(2))
num_ = _clean_part(num)
assert isinstance(num_, list)
assert np.all([isinstance(part, list) for part in num_])
np.testing.assert_array_equal(num_[0][0], array([1, 2], dtype=float))
def test_clean_part_list_list_list_int(self):
""" Test an int in a list of a list of a list."""
num = [[[1]]]
num_ = _clean_part(num)
assert isinstance(num_, list)
assert np.all([isinstance(part, list) for part in num_])
np.testing.assert_array_equal(num_[0][0], array([1.0], dtype=float))
def test_clean_part_list_list_list_float(self):
""" Test a float in a list of a list of a list."""
num = [[[1.0]]]
num_ = _clean_part(num)
assert isinstance(num_, list)
assert np.all([isinstance(part, list) for part in num_])
np.testing.assert_array_equal(num_[0][0], array([1.0], dtype=float))
def test_clean_part_list_list_list_ints(self):
"""Test 2 lists of ints in a list in a list."""
num = [[[1, 1], [2, 2]]]
num_ = _clean_part(num)
assert isinstance(num_, list)
assert np.all([isinstance(part, list) for part in num_])
np.testing.assert_array_equal(num_[0][0], array([1.0, 1.0], dtype=float))
np.testing.assert_array_equal(num_[0][1], array([2.0, 2.0], dtype=float))
def test_clean_part_list_list_list_floats(self):
"""Test 2 lists of ints in a list in a list."""
num = [[[1.0, 1.0], [2.0, 2.0]]]
num_ = _clean_part(num)
assert isinstance(num_, list)
assert np.all([isinstance(part, list) for part in num_])
np.testing.assert_array_equal(num_[0][0], array([1.0, 1.0], dtype=float))
np.testing.assert_array_equal(num_[0][1], array([2.0, 2.0], dtype=float))
def test_clean_part_list_list_array(self):
"""List of list of numpy arrays for all valid types."""
for dtype in int, int8, int16, int32, int64, float, float16, float32, float64, longdouble:
num = [[array([1, 1], dtype=dtype), array([2, 2], dtype=dtype)]]
num_ = _clean_part(num)
assert isinstance(num_, list)
assert np.all([isinstance(part, list) for part in num_])
np.testing.assert_array_equal(num_[0][0], array([1.0, 1.0], dtype=float))
np.testing.assert_array_equal(num_[0][1], array([2.0, 2.0], dtype=float))
def test_clean_part_tuple_list_array(self):
"""Tuple of list of numpy arrays for all valid types."""
for dtype in int, int8, int16, int32, int64, float, float16, float32, float64, longdouble:
num = ([array([1, 1], dtype=dtype), array([2, 2], dtype=dtype)],)
num_ = _clean_part(num)
assert isinstance(num_, list)
assert np.all([isinstance(part, list) for part in num_])
np.testing.assert_array_equal(num_[0][0], array([1.0, 1.0], dtype=float))
np.testing.assert_array_equal(num_[0][1], array([2.0, 2.0], dtype=float))
def test_clean_part_list_tuple_array(self):
"""List of tuple of numpy array for all valid types."""
for dtype in int, int8, int16, int32, int64, float, float16, float32, float64, longdouble:
num = [(array([1, 1], dtype=dtype), array([2, 2], dtype=dtype))]
num_ = _clean_part(num)
assert isinstance(num_, list)
assert np.all([isinstance(part, list) for part in num_])
np.testing.assert_array_equal(num_[0][0], array([1.0, 1.0], dtype=float))
np.testing.assert_array_equal(num_[0][1], array([2.0, 2.0], dtype=float))
def test_clean_part_tuple_tuples_arrays(self):
"""Tuple of tuples of numpy arrays for all valid types."""
for dtype in int, int8, int16, int32, int64, float, float16, float32, float64, longdouble:
num = ((array([1, 1], dtype=dtype), array([2, 2], dtype=dtype)),
(array([3, 4], dtype=dtype), array([4, 4], dtype=dtype)))
num_ = _clean_part(num)
assert isinstance(num_, list)
assert np.all([isinstance(part, list) for part in num_])
np.testing.assert_array_equal(num_[0][0], array([1.0, 1.0], dtype=float))
np.testing.assert_array_equal(num_[0][1], array([2.0, 2.0], dtype=float))
def test_clean_part_list_tuples_arrays(self):
"""List of tuples of numpy arrays for all valid types."""
for dtype in int, int8, int16, int32, int64, float, float16, float32, float64, longdouble:
num = [(array([1, 1], dtype=dtype), array([2, 2], dtype=dtype)),
(array([3, 4], dtype=dtype), array([4, 4], dtype=dtype))]
num_ = _clean_part(num)
assert isinstance(num_, list)
assert np.all([isinstance(part, list) for part in num_])
np.testing.assert_array_equal(num_[0][0], array([1.0, 1.0], dtype=float))
np.testing.assert_array_equal(num_[0][1], array([2.0, 2.0], dtype=float))
def test_clean_part_list_list_arrays(self):
"""List of list of numpy arrays for all valid types."""
for dtype in int, int8, int16, int32, int64, float, float16, float32, float64, longdouble:
num = [[array([1, 1], dtype=dtype), array([2, 2], dtype=dtype)],
[array([3, 3], dtype=dtype), array([4, 4], dtype=dtype)]]
num_ = _clean_part(num)
assert len(num_) == 2
assert np.all([isinstance(part, list) for part in num_])
assert np.all([len(part) == 2 for part in num_])
np.testing.assert_array_equal(num_[0][0], array([1.0, 1.0], dtype=float))
np.testing.assert_array_equal(num_[0][1], array([2.0, 2.0], dtype=float))
np.testing.assert_array_equal(num_[1][0], array([3.0, 3.0], dtype=float))
np.testing.assert_array_equal(num_[1][1], array([4.0, 4.0], dtype=float))
if __name__ == "__main__":
unittest.main()
| 45.861538 | 100 | 0.615062 | 11,505 | 0.964861 | 0 | 0 | 0 | 0 | 0 | 0 | 1,485 | 0.124539 |
52fc538b86844a30d2ac300d10bcb5488b65cb8c | 218 | py | Python | annodize/__init__.py | NowanIlfideme/annodize | d31f68b62f473990a43334ef50a0a176847b1f7a | [
"MIT"
] | null | null | null | annodize/__init__.py | NowanIlfideme/annodize | d31f68b62f473990a43334ef50a0a176847b1f7a | [
"MIT"
] | null | null | null | annodize/__init__.py | NowanIlfideme/annodize | d31f68b62f473990a43334ef50a0a176847b1f7a | [
"MIT"
] | null | null | null | """Python Annotations that are shockingly useful."""
__all__ = ["__version__", "Field", "FunctionFields", "NamespaceFields"]
from .field import Field, FunctionFields, NamespaceFields
from .version import __version__
| 31.142857 | 71 | 0.775229 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 105 | 0.481651 |
52fc73cfa1b8ed4c7016c7ea3dfc2e30bd7fa214 | 548 | py | Python | answers/leetcode/Summary Ranges/Summary Ranges.py | FeiZhan/Algo-Collection | 708c4a38112e0b381864809788b9e44ac5ae4d05 | [
"MIT"
] | 3 | 2015-09-04T21:32:31.000Z | 2020-12-06T00:37:32.000Z | answers/leetcode/Summary Ranges/Summary Ranges.py | FeiZhan/Algo-Collection | 708c4a38112e0b381864809788b9e44ac5ae4d05 | [
"MIT"
] | null | null | null | answers/leetcode/Summary Ranges/Summary Ranges.py | FeiZhan/Algo-Collection | 708c4a38112e0b381864809788b9e44ac5ae4d05 | [
"MIT"
] | null | null | null | class Solution(object):
def summaryRanges(self, nums):
"""
:type nums: List[int]
:rtype: List[str]
"""
range_list = []
for i in range(len(nums)):
if i > 0 and nums[i - 1] + 1 == nums[i]:
range_list[-1][1] = nums[i]
else:
range_list.append([nums[i], nums[i]])
str_list = [str(range_list[i][0]) + ("->" + str(range_list[i][1]) if range_list[i][0] != range_list[i][1] else "") for i in range(len(range_list))]
return str_list
| 34.25 | 155 | 0.492701 | 547 | 0.998175 | 0 | 0 | 0 | 0 | 0 | 0 | 77 | 0.140511 |
52fc7df4b4b16f1ad555d16b6d091a22b3e1319c | 751 | py | Python | tests/test_Policy.py | christophevg/py-mqfactory | 6681fea96efe6985f6dc8631cb96eb48c43146dd | [
"MIT"
] | null | null | null | tests/test_Policy.py | christophevg/py-mqfactory | 6681fea96efe6985f6dc8631cb96eb48c43146dd | [
"MIT"
] | 4 | 2020-03-24T16:51:35.000Z | 2021-06-01T23:37:00.000Z | tests/test_Policy.py | christophevg/py-mqfactory | 6681fea96efe6985f6dc8631cb96eb48c43146dd | [
"MIT"
] | null | null | null | from mqfactory.tools import Policy, Rule, CATCH_ALL
def test_empty_policy():
p = Policy()
assert p.match({"something": "something"}) == CATCH_ALL
assert p.match({}) == CATCH_ALL
def test_policy():
p = Policy([
Rule({ "a": 1, "b": 1, "c": 1 }, "a=1,b=1,c=1" ),
Rule({ "a": 1, "b": 1, }, "a=1,b=1" ),
Rule({ "a": 1, }, "a=1" ),
Rule({ "b": 1, }, "b=1" ),
])
assert p.match({"a": 1}).value == "a=1"
assert p.match({"b": 1}).value == "b=1"
assert p.match({"a": 2, "b": 1, "c": 1}).value == "b=1"
assert p.match({"a": 1, "b": 1, "c": 1}).value == "a=1,b=1,c=1"
assert p.match({"a": 2, "b": 2, "c": 2}).value is None
assert p.match({"d": 1}).value is None
| 35.761905 | 65 | 0.459387 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 139 | 0.185087 |
52fd271d2646685318f80cb843416d0abdde4775 | 1,837 | py | Python | test/test_request.py | Ryuno-Ki/webmention-tools | 54409786327c2b43516c2b5b96e5d0a18a96c0f3 | [
"MIT"
] | 26 | 2019-05-05T01:42:37.000Z | 2021-12-17T02:47:44.000Z | test/test_request.py | Ryuno-Ki/webmention-tools | 54409786327c2b43516c2b5b96e5d0a18a96c0f3 | [
"MIT"
] | 44 | 2019-04-16T12:35:13.000Z | 2021-09-19T05:21:22.000Z | test/test_request.py | vrypan/webmention-tools | 54409786327c2b43516c2b5b96e5d0a18a96c0f3 | [
"MIT"
] | 3 | 2016-12-23T12:11:36.000Z | 2019-01-13T20:14:46.000Z | import unittest
import pytest
import requests
import webmentiontools
from webmentiontools.request import (
is_successful_response,
request_get_url,
request_head_url,
request_post_url,
USER_AGENT
)
from .endpoints import WEBMENTION_ROCKS_TESTS
class RequestTestCase(unittest.TestCase):
def test_user_agent(self):
assert "Webmention Tools" in USER_AGENT
assert webmentiontools.__version__ in USER_AGENT
assert "requests" in USER_AGENT
assert requests.__version__ in USER_AGENT
@pytest.mark.integration
def test_is_successful_response(self):
for test in WEBMENTION_ROCKS_TESTS:
response = request_head_url(test["url"])
is_successful = is_successful_response(response)
assert is_successful is True
@pytest.mark.integration
def test_request_head_url(self):
for test in WEBMENTION_ROCKS_TESTS:
if test["source"] == "header":
response = request_head_url(test["url"])
assert isinstance(response, requests.models.Response)
@pytest.mark.integration
def test_request_get_url(self):
for test in WEBMENTION_ROCKS_TESTS:
if test["source"] == "html":
response = request_get_url(test["url"])
assert isinstance(response, requests.models.Response)
@pytest.mark.integration
def test_request_post_url(self):
source_url = "http://example.com"
endpoint = WEBMENTION_ROCKS_TESTS[0]["url"]
TARGETS = [
"https://webmention.rocks/update/1",
"https://webmention.rocks/update/1/part/2"
]
for target_url in TARGETS:
response = request_post_url(endpoint, source_url, target_url)
assert isinstance(response, requests.models.Response)
| 31.672414 | 73 | 0.672292 | 1,568 | 0.853566 | 0 | 0 | 1,277 | 0.695155 | 0 | 0 | 175 | 0.095264 |
52fd9f23e266b4e14ba35f92f1f82313c189267a | 26,257 | py | Python | sleep_utils.py | skjerns/NT1-HRV | cb6de312f6b2710c4d059bb2a4638b053617c2f7 | [
"MIT"
] | 1 | 2022-03-06T03:32:15.000Z | 2022-03-06T03:32:15.000Z | sleep_utils.py | skjerns/NT1-HRV | cb6de312f6b2710c4d059bb2a4638b053617c2f7 | [
"MIT"
] | null | null | null | sleep_utils.py | skjerns/NT1-HRV | cb6de312f6b2710c4d059bb2a4638b053617c2f7 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Thu Nov 21 20:19:26 2019
@author: skjerns
"""
from pyedflib.highlevel import *
import os
import gc
import warnings
import ospath #pip install https://github.com/skjerns/skjerns-utils
import numpy as np
import pyedflib #pip install https://github.com/skjerns/pyedflib/archive/custom_version.zip
import time
from tqdm import tqdm
from datetime import datetime
import dateparser
import logging
from joblib import Parallel, delayed
import matplotlib.pyplot as plt
from lspopt import spectrogram_lspopt
import matplotlib
def read_hypnogram(hypno_file, epochlen = 30, epochlen_infile=None, mode='auto', exp_seconds=None):
"""
reads a hypnogram file as created by VisBrain or as CSV type
:param hypno_file: a path to the hypnogram
:param epochlen: how many seconds per label in output
:param epochlen_infile: how many seconds per label in original file
:param mode: 'auto', 'time' or 'csv', see SleepDev/docs/hypnogram.md
:param exp_seconds: How many seconds does the matching recording have?
"""
assert str(type(epochlen)()) == '0'
assert epochlen_infile is None or str(type(epochlen_infile)()) == '0'
with open(hypno_file, 'r') as file:
content = file.read()
content = content.replace('\r', '') # remove windows style \r\n
#conversion dictionary
conv_dict = {'WAKE':0, 'WACH':0, 'WK':0, 'NWAKE': 0,
'N1': 1, 'NREM1': 1,
'N2': 2, 'NREM2': 2,
'N3': 3, 'NREM3': 3,
'N4':3, 'NREM4': 3,
'REM': 4,
0:0, 1:1, 2:2, 3:3, 4:4, -1:5, 5:5,
'ART': 5, 'A':5, 'ARTEFAKT':5, '8': 5,
'MT':5, 'BEWEGUNG':5, '9':5, '?': 5, ' ': 5, 'NAN': 5,
'UNSCORED': 5}
lines = content.split('\n')
if mode=='auto':
if lines[0].startswith('*'): # if there is a star, we assume it's the visbrain type
mode = 'visbrain'
elif lines[0].replace('-', '').isnumeric():
mode = 'csv'
elif lines[0].startswith('[HypnogramAASM]'):
mode = 'dreams'
elif lines[0].startswith(' Epoch Number ,Start Time ,Sleep Stage'):
mode = 'alice'
elif 'abstime' in lines[0]:
mode = 'dat'
elif lines[0].startswith('Signal ID:'):
mode = 'somnoscreen'
elif any(['luna-' in x for x in lines[:5]]):
mode = 'luna'
elif hypno_file.endswith('.eannot'):
mode = 'csv'
else :
mode==None
# reading file in format as used by Nihon Koden
# files with a datestamp per stage annotation
if mode=='dat':
if epochlen_infile is not None:
warnings.warn('epochlen_infile has been supplied, but hypnogram is'
'time based, will be ignored')
elif exp_seconds and not epochlen_infile:
epochlen_infile=exp_seconds//len(lines)
print('[INFO] Assuming csv annotations with one entry per {} seconds'.format(epochlen_infile))
stages = []
for line1, line2 in zip(lines[1:-1], lines[2:]):
if len(line1.strip())==0: continue # skip empty lines
if len(line2.strip())==0: continue # skip empty lines
curr_t, _, stage, *_ = line1.split('\t')
next_t,*_ = line2.split('\t')
curr_t = datetime.strptime(curr_t, '%Y-%m-%d %H:%M:%S')
next_t = datetime.strptime(next_t, '%Y-%m-%d %H:%M:%S')
assert next_t > curr_t, 'timestamp 2 is smaller than 1? {} < {}'.format(next_t, curr_t)
sec_diff = (next_t - curr_t).seconds
if exp_seconds and epochlen_infile!=sec_diff:
warnings.warn('Epochlen in file is {} but {} would be selected'.format(sec_diff, epochlen_infile))
stage = conv_dict[stage.upper()]
stages.extend([stage]*sec_diff)
elif mode=='somnoscreen':
if epochlen_infile is not None:
warnings.warn('epochlen_infile has been supplied, but information is in file, will be ignored')
epochlen_infile = int(lines[5].replace('Rate: ', '').replace('s',''))
stages = []
for line in lines[6:]:
if len(line.strip())==0: continue # skip empty lines
_,stage = line.split('; ')
stage = conv_dict[stage.upper()]
stages.extend([stage]*epochlen_infile)
# read hypnogram as written by visbrain (time based)
elif mode=='visbrain':
if epochlen_infile is not None:
warnings.warn('epochlen_infile has been supplied, but hypnogram is time based,'
'will be ignored')
stages = []
prev_t = 0
for line in lines:
if len(line.strip())==0: continue
if line[0] in '*#%/\\"\'': continue # this line seems to be a comment
s, t = line.split('\t')
t = float(t)
s = conv_dict[s.upper()]
l = int(np.round((t-prev_t))) # length of this stage
stages.extend([s]*l)
prev_t = t
# read hypnogram as simple CSV file, number based or string based
elif mode=='csv':
if exp_seconds and not epochlen_infile:
epochlen_infile=exp_seconds//len(lines)
print('[INFO] Assuming csv annotations with one entry per {} seconds'.format(epochlen_infile))
elif epochlen_infile is None:
if len(lines) < 2500: # we assume no recording is longer than 21 hours
epochlen_infile = 30
else:
epochlen_infile = 1
print('[INFO] Assuming csv annotations are per second')
lines = [conv_dict[l.upper()] if isinstance(l, str) else int(l) for l in lines if len(l)>0]
lines = [[line]*epochlen_infile for line in lines]
stages = np.array(lines).flatten()
# for the Dreams Database
# http://www.tcts.fpms.ac.be/~devuyst/Databases/DatabaseSubjects/
elif mode=='dreams':
epochlen_infile = 5
conv_dict = {-2:5,-1:5, 0:5, 1:3, 2:2, 3:1, 4:4, 5:0}
lines = [[int(line)] for line in lines[1:] if len(line)>0]
lines = [[line]*epochlen_infile for line in lines]
stages = np.array([conv_dict[l.upper()] for l in np.array(lines).flatten()])
# for hypnogram created with Alice 5 software
elif mode=='alice':
epochlen_infile = 30
lines = [line.split(',')[-1] for line in lines[1:] if len(line)>0]
lines = [[line]*epochlen_infile for line in lines]
try: stages = np.array([conv_dict[l] for l in np.array(lines).flatten()])
except KeyError as e:
print('Unknown sleep stage in file')
raise e
elif mode=='luna':
# hypnograms created by Luna software from sleepdata.org
if epochlen_infile is not None:
warnings.warn('epochlen_infile has been supplied, but information is in file, will be ignored')
import xml.etree.ElementTree as ET
root = ET.fromstringlist(lines)
# we don't actually properly parse it as it is intended, just
# assume that it always contains the same labels
instances = root[-1]
stages = []
for instance in instances:
stage_str = instance.attrib['class']
try: stage_nr = conv_dict[stage_str.upper()]
except KeyError as e:
print(f'Unknown sleep stage in file {hypno_file} : {stage_str}')
raise e
duration = int(instance.find('Duration').text)
if duration!=30:
raise ValueError(f'Duration!=30, not expected: {duration}')
stages.extend([stage_nr]*duration)
stages = np.array(stages)
else:
raise ValueError('This is not a recognized hypnogram: {}'.format(hypno_file))
stages = stages[::epochlen]
if len(stages)==0:
print('[WARNING] hypnogram loading failed, len == 0')
return np.array(stages)
def infer_eeg_channels(ch_names):
"""
This function receives a list of channel names and will return
one frontal, one central and one occipital channel.
"""
f = ['EEG Fz', 'EEG F4', 'EEG Fpz', 'EEG Fp1', 'EEG Fp2']
c = ['EEG C4', 'EEG C3']
o = ['EEG Oz', 'EEG O2', 'EEG O1']
found = []
# find frontal channel
for ch in ch_names:
if any([x in ch for x in f]):
found.append(ch)
break
# find central channel
for ch in ch_names:
if any([x in ch for x in c]):
found.append(ch)
break
# find occipital channel
for ch in ch_names:
if any([x in ch for x in o]):
found.append(ch)
break
return found
def infer_eog_channels(ch_names):
"""
This function receives a list of channel names and will return
one frontal, one central and one occipital channel.
"""
eog = ['EOG ROC', 'EOG LOC']
found = []
# find frontal channel
for ch in ch_names:
if any([x in ch for x in eog]):
found.append(ch)
return found
def infer_emg_channels(ch_names):
"""
This function receives a list of channel names and will return
one frontal, one central and one occipital channel.
"""
emg = ['EMG Chin']
found = []
# find frontal channel
for ch in ch_names:
if any([x in ch for x in emg]):
found.append(ch)
return found
def hypno2time(hypno, seconds_per_epoch=1):
"""
Converts a hypnogram based in epochs into the format as defined
by VisBrain: http://visbrain.org/sleep.html#save-hypnogram
"""
hypno = np.repeat(hypno, seconds_per_epoch)
s = '*Duration_sec {}\n'.format(len(hypno))
stages = ['Wake', 'N1', 'N2', 'N3', 'REM', 'Art']
d = dict(enumerate(stages))
hypno_str = [d[h] for h in hypno]
last_stage=hypno_str[0]
for second, stage in enumerate(hypno_str):
if stage!=last_stage:
s += '{}\t{}\n'.format(last_stage, second)
last_stage=stage
s += '{}\t{}\n'.format(stage, second+1)
return s
def write_hypnogram(hypno, filename, seconds_per_annotation=30,
comment=None, overwrite=False):
"""
Save a hypnogram based on annotations per epochs in VisBrain style
(ie. The exact onset of each sleep stage is annotated in time space.)
This format is recommended for saving hypnograms as it avoids ambiguity.
:param filename: where to save the data
:param hypno: The hypnogram either as list or np.array
:param seconds_per_epoch: How many seconds each annotation contains
:param comment: Add a comment to the beginning of the file
:param overwrite: overwrite file?
"""
assert not ospath.exists(filename) or overwrite, \
'File already exists, no overwrite'
hypno = np.repeat(hypno, seconds_per_annotation)
hypno_str = hypno2time(hypno)
if comment is not None:
comment = comment.replace('\n', '\n*')
hypno_str = '*' + comment + '\n' + hypno_str
hypno_str = hypno_str.replace('\n\n', '\n')
with open(filename, 'w') as f:
f.write(hypno_str)
return True
def minmax2lsb(dmin, dmax, pmin, pmax):
"""
converts the edf min/max values to lsb and offset (x*m+b)
"""
lsb = (pmax - pmin) / (dmax - dmin)
offset = pmax / lsb - dmax
return lsb, offset
def make_header(technician='', recording_additional='', patientname='',
patient_additional='', patientcode= '', equipment= '',
admincode= '', gender= '', startdate=None, birthdate= ''):
"""
A convenience function to create an EDF header (a dictionary) that
can be used by pyedflib to update the main header of the EDF
"""
if not( startdate is None or isinstance(startdate, datetime)):
warnings.warn('must be datetime or None, is {}: {},attempting convert'\
.format(type(startdate), startdate))
startdate = dateparser.parse(startdate)
if not (birthdate == '' or isinstance(birthdate, (datetime,str))):
warnings.warn('must be datetime or empty, is {}, {}'\
.format(type(birthdate), birthdate))
birthdate = dateparser.parse(birthdate)
if startdate is None:
now = datetime.now()
startdate = datetime(now.year, now.month, now.day,
now.hour, now.minute, now.second)
del now
if isinstance(birthdate, datetime):
birthdate = birthdate.strftime('%d %b %Y')
local = locals()
header = {}
for var in local:
if isinstance(local[var], datetime):
header[var] = local[var]
else:
header[var] = str(local[var])
return header
def make_signal_header(label, dimension='uV', sample_rate=256,
physical_min=-200, physical_max=200, digital_min=-32768,
digital_max=32767, transducer='', prefiler=''):
"""
A convenience function that creates a signal header for a given signal.
This can be used to create a list of signal headers that is used by
pyedflib to create an edf. With this, different sampling frequencies
can be indicated.
:param label: the name of the channel
"""
signal_header = {'label': label,
'dimension': dimension,
'sample_rate': sample_rate,
'physical_min': physical_min,
'physical_max': physical_max,
'digital_min': digital_min,
'digital_max': digital_max,
'transducer': transducer,
'prefilter': prefiler}
return signal_header
def make_signal_headers(list_of_labels, dimension='uV', sample_rate=256,
physical_min=-200, physical_max=200, digital_min=-32768,
digital_max=32767, transducer='', prefiler=''):
"""
A function that creates signal headers for a given list of channel labels.
This can only be used if each channel has the same sampling frequency
:param list_of_labels: A list with labels for each channel.
:returns: A dictionary that can be used by pyedflib to update the header
"""
signal_headers = []
for label in list_of_labels:
header = make_signal_header(label, dimension=dimension, sample_rate=sample_rate,
physical_min=physical_min, physical_max=physical_max,
digital_min=digital_min, digital_max=digital_max,
transducer=transducer, prefiler=prefiler)
signal_headers.append(header)
return signal_headers
def write_edf(edf_file, signals, signal_headers, header, digital=False,
correct=False):
"""
Write signals to an edf_file. Header can be generated on the fly.
:param signals: The signals as a list of arrays or a ndarray
:param signal_headers: a list with one signal header(dict) for each signal.
See pyedflib.EdfWriter.setSignalHeader
:param header: a main header (dict) for the EDF file, see
pyedflib.EdfWriter.setHeader for details
:param digital: whether signals are presented digitally
or in physical values
:returns: True if successful, False if failed
"""
assert header is None or isinstance(header, dict), \
'header must be dictioniary'
assert isinstance(signal_headers, list), \
'signal headers must be list'
assert len(signal_headers)==len(signals), \
'signals and signal_headers must be same length'
n_channels = len(signals)
# check min and max values
if digital==True and correct:
for sig, sigh in zip(signals,signal_headers):
dmin, dmax = sigh['digital_min'], sigh['digital_max']
pmin, pmax = sigh['physical_min'], sigh['physical_max']
ch_name=sigh['label']
if dmin>dmax:
logging.warning('{}: dmin>dmax, {}>{}, will correct'.format(\
ch_name, dmin, dmax))
dmin, dmax = dmax, dmin
sig *= -1
if pmin>pmax:
logging.warning('{}: pmin>pmax, {}>{}, will correct'.format(\
ch_name, pmin, pmax))
pmin, pmax = pmax, pmin
sig *= -1
dsmin, dsmax = round(sig.min()), round(sig.max())
psmin = dig2phys(dsmin, dmin, dmax, pmin, pmax)
psmax = dig2phys(dsmax, dmin, dmax, pmin, pmax)
min_dist = np.abs(dig2phys(1, dmin, dmax, pmin, pmax))
if dsmin<dmin:
logging.warning('{}:Digital signal minimum is {}'\
', but value range is {}, will correct'.format\
(ch_name, dmin, dsmin))
dsmin = min(dsmin, 32767)
sigh['digital_min'] = dsmin
if dsmax>dmax:
logging.warning('{}:Digital signal maximum is {}'\
', but value range is {}, will correct'.format\
(ch_name, dmax, dsmax))
dsmax = min(dsmax, 32767)
sigh['digital_max'] = dsmax
if psmax-min_dist>pmax:
logging.warning('{}:Phyiscal signal maximum is {}'\
', but value range is {}, will correct'.format\
(ch_name, pmax, psmax))
sigh['physical_max'] = psmax
if psmin+min_dist<pmin:
logging.warning('{}:Physical signal minimum is {}'\
', but value range is {}, will correct'.format\
(ch_name, pmin, psmin))
sigh['physical_min'] = psmin
# also add annotations
annotations = header.get('annotations', '')
with pyedflib.EdfWriter(edf_file, n_channels=n_channels) as f:
f.setSignalHeaders(signal_headers)
f.setHeader(header)
f.writeSamples(signals, digital=digital)
for annotation in annotations:
f.writeAnnotation(*annotation)
del f
return os.path.isfile(edf_file)
def change_polarity(edf_file, channels, new_file=None):
if new_file is None:
new_file = os.path.splitext(edf_file)[0] + '_inv.edf'
if isinstance(channels, str): channels=[channels]
channels = [c.lower() for c in channels]
signals, signal_headers, header = read_edf(edf_file, digital=True)
for i,sig in enumerate(signals):
shead = signal_headers[i]
label = signal_headers[i]['label'].lower()
if label in channels:
print('inverting {}'.format(label))
shead['physical_min']*=-1
shead['physical_max']*=-1
write_edf(new_file, signals, signal_headers, header, digital=True)
compare_edf(edf_file, new_file)
def specgram_multitaper(data, sfreq, sperseg=30, perc_overlap=1/3,
lfreq=0, ufreq=40, show_plot=True, title='', ax=None):
"""
Display EEG spectogram using a multitaper from 0-30 Hz
:param data: the data to visualize, should be of rank 1
:param sfreq: the sampling frequency of the data
:param sperseg: number of seconds to use per FFT
:param noverlap: percentage of overlap between segments
:param lfreq: Lower frequency to display
:param ufreq: Upper frequency to display
:param show_plot: If false, only the mesh is returned, but not Figure opened
:param ax: An axis where to plot. Else will create a new Figure
:returns: the resulting mesh as it would be plotted
"""
if ax is None:
plt.figure()
ax=plt.subplot(1,1,1)
assert isinstance(show_plot, bool), 'show_plot must be boolean'
nperseg = int(round(sperseg * sfreq))
overlap = int(round(perc_overlap * nperseg))
f_range = [lfreq, ufreq]
freq, xy, mesh = spectrogram_lspopt(data, sfreq, nperseg=nperseg,
noverlap=overlap, c_parameter=20.)
if mesh.ndim==3: mesh = mesh.squeeze().T
mesh = 20 * np.log10(mesh+0.0000001)
idx_notfinite = np.isfinite(mesh)==False
mesh[idx_notfinite] = np.min(mesh[~idx_notfinite])
f_range[1] = np.abs(freq - ufreq).argmin()
sls = slice(f_range[0], f_range[1] + 1)
freq = freq[sls]
mesh = mesh[sls, :]
mesh = mesh - mesh.min()
mesh = mesh / mesh.max()
if show_plot:
ax.imshow(np.flipud(mesh), aspect='auto')
formatter = matplotlib.ticker.FuncFormatter(lambda s, x: time.strftime('%H:%M', time.gmtime(int(s*(sperseg-overlap/sfreq)))))
ax.xaxis.set_major_formatter(formatter)
if xy[-1]<3600*7: # 7 hours is half hourly
tick_distance = max(np.argmax(xy>sperseg*60),5) #plot per half hour
else: # more than 7 hours hourly ticks
tick_distance = np.argmax(xy>sperseg*60)*2 #plot per half hour
two_hz_pos = np.argmax(freq>1.99999999)
ytick_pos = np.arange(0, len(freq), two_hz_pos)
ax.set_xticks(np.arange(0, mesh.shape[1], tick_distance))
ax.set_yticks(ytick_pos)
ax.set_yticklabels(np.arange(ufreq, lfreq-1, -2))
ax.set_xlabel('Time after onset')
ax.set_ylabel('Frequency')
ax.set_title(title)
warnings.filterwarnings("ignore", message='This figure includes Axes that are not compatible')
plt.tight_layout()
return mesh
def plot_hypnogram(stages, labeldict=None, title=None, epochlen=30, ax=None,
verbose=True, xlabel=True, ylabel=True, **kwargs,):
"""
Plot a hypnogram, the flexible way.
A labeldict should give a mapping which integer belongs to which class
E.g labeldict = {0: 'Wake', 4:'REM', 1:'S1', 2:'S2', 3:'SWS'}
or {0:'Wake', 1:'Sleep', 2:'Sleep', 3:'Sleep', 4:'Sleep', 5:'Artefact'}
The order of the labels on the plot will be determined by the order of the dictionary.
E.g. {0:'Wake', 1:'REM', 2:'NREM'} will plot Wake on top, then REM, then NREM
while {0:'Wake', 2:'NREM', 1:'NREM'} will plot Wake on top, then NREM, then REM
This dictionary can be infered automatically from the numbers that are present
in the hypnogram but this functionality does not cover all cases.
:param stages: An array with different stages annotated as integers
:param labeldict: An enumeration of labels that correspond to the integers of stages
:param title: Title of the window
:param epochlen: How many seconds is one epoch in this annotation
:param ax: the axis in which we plot
:param verbose: Print stuff or not.
:param xlabel: Display xlabel ('Time after record start')
:param ylabel: Display ylabel ('Sleep Stage')
:param kwargs: additional arguments passed to plt.plot(), e.g. c='red'
"""
if labeldict is None:
labeldict = {}
_defaultdict = {-1: 'A', 0:'Wake', 4:'REM', 1:'S1', 2:'S2', 3:'SWS', 5:'Artefact'}
if set(stages) == set([0, 1]):
labeldict = {0:'Wake', 1:'Sleep'}
elif set(stages) == set([0, 1, 2]):
labeldict = {0:'Wake', 2:'REM', 1:'NREM'}
else:
for stage in _defaultdict:
if stage in stages:
labeldict[stage] = _defaultdict[stage]
if verbose: print('Assuming {}'.format(labeldict))
# check if all stages that are in the hypnogram have a corresponding label in the dict
for stage in np.unique(stages):
if not stage in labeldict:
print('WARNING: {} is in stages, but not in labeldict, stage will be ??'.format(stage))
# create the label order
labels = [labeldict[l] for l in labeldict]
labels = sorted(set(labels), key=labels.index)
# we iterate through the stages and fetch the label for this stage
# then we append the position on the plot of this stage via the labels-dict
x = []
y = []
rem_start = []
rem_end = []
for i in np.arange(len(stages)):
s = stages[i]
label = labeldict.get(s)
if label is None:
p = 99
if '??' not in labels: labels.append('??')
else :
p = -labels.index(label)
# make some red line markers for REM, mark beginning and end of REM
if 'REM' in labels:
if label=='REM' and len(rem_start)==len(rem_end):
rem_start.append(i-2)
elif label!='REM' and len(rem_start)>len(rem_end):
rem_end.append(i-1)
if label=='REM' and i==len(stages)-1:
rem_end.append(i+1)
if i!=0:
y.append(p)
x.append(i-1)
y.append(p)
x.append(i)
assert len(rem_start)==len(rem_end), 'Something went wrong in REM length calculation'
x = np.array(x)*epochlen
y = np.array(y)
y[y==99] = y.min()-1 # make sure Unknown stage is plotted below all else
if ax is None:
plt.figure()
ax = plt.gca()
formatter = matplotlib.ticker.FuncFormatter(lambda s, x: time.strftime('%H:%M', time.gmtime(s)))
ax.plot(x,y, **kwargs)
ax.set_xlim(0, x[-1])
ax.xaxis.set_major_formatter(formatter)
ax.set_yticks(np.arange(len(np.unique(labels)))*-1)
ax.set_yticklabels(labels)
ax.set_xticks(np.arange(0,x[-1],3600))
if xlabel: plt.xlabel('Time after recording start')
if ylabel: plt.ylabel('Sleep Stage')
if title is not None:
plt.title(title)
try:
warnings.filterwarnings("ignore", message='This figure includes Axes that are not compatible')
plt.tight_layout()
except Exception: pass
# plot REM in RED here
for start, end in zip(rem_start, rem_end):
height = -labels.index('REM')
ax.hlines(height, start*epochlen, end*epochlen, color='r',
linewidth=4, zorder=99)
| 39.248132 | 133 | 0.589214 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9,224 | 0.351297 |
52fddc869556dcf56427929cc447dd8453bf656c | 789 | py | Python | amplpy/tests/TestBase.py | dish59742/amplpy | 9309a947b74dcc524a07809a68bf32d93e9f0a48 | [
"BSD-3-Clause"
] | null | null | null | amplpy/tests/TestBase.py | dish59742/amplpy | 9309a947b74dcc524a07809a68bf32d93e9f0a48 | [
"BSD-3-Clause"
] | 4 | 2021-06-08T22:16:26.000Z | 2022-03-12T00:48:56.000Z | amplpy/tests/TestBase.py | dish59742/amplpy | 9309a947b74dcc524a07809a68bf32d93e9f0a48 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function, absolute_import, division
from builtins import map, range, object, zip, sorted
from .context import amplpy
import unittest
import tempfile
import shutil
import os
class TestBase(unittest.TestCase):
def setUp(self):
self.ampl = amplpy.AMPL()
self.dirpath = tempfile.mkdtemp()
def str2file(self, filename, content):
fullpath = self.tmpfile(filename)
with open(fullpath, 'w') as f:
print(content, file=f)
return fullpath
def tmpfile(self, filename):
return os.path.join(self.dirpath, filename)
def tearDown(self):
self.ampl.close()
shutil.rmtree(self.dirpath)
if __name__ == '__main__':
unittest.main()
| 23.205882 | 64 | 0.665399 | 488 | 0.618504 | 0 | 0 | 0 | 0 | 0 | 0 | 57 | 0.072243 |
52fe203ea6a382182458a46a4edbd741b0f6208f | 919 | py | Python | lib/MetaboliteAtlasImpl.py | kbaseIncubator/metabolite_atlas | 229edbab6ce05a7d3ebacbc66570d82c9ad28578 | [
"MIT"
] | null | null | null | lib/MetaboliteAtlasImpl.py | kbaseIncubator/metabolite_atlas | 229edbab6ce05a7d3ebacbc66570d82c9ad28578 | [
"MIT"
] | null | null | null | lib/MetaboliteAtlasImpl.py | kbaseIncubator/metabolite_atlas | 229edbab6ce05a7d3ebacbc66570d82c9ad28578 | [
"MIT"
] | null | null | null | #BEGIN_HEADER
#END_HEADER
'''
Module Name:
MetaboliteAtlas
Module Description:
A web-based atlas to liquid chromatography–mass spectrometry (LCMS) data
'''
class MetaboliteAtlas:
#BEGIN_CLASS_HEADER
#END_CLASS_HEADER
def __init__(self, config): #config contains contents of config file in hash or
#None if it couldn't be found
#BEGIN_CONSTRUCTOR
#END_CONSTRUCTOR
pass
def loadDictionary(self, params):
# self.ctx is set by the wsgi application class
# return variables are: output
#BEGIN loadDictionary
output=[]
#END loadDictionary
#At some point might do deeper type checking...
if not isinstance(output, list):
raise ValueError('Method loadDictionary return value output is not type list as required.')
# return the results
return [ output ]
| 24.837838 | 103 | 0.645267 | 749 | 0.813246 | 0 | 0 | 0 | 0 | 0 | 0 | 566 | 0.614549 |
52fe443d7fc6fa4d7f3a127a6ec1ae62d3297059 | 302 | py | Python | instance/config.py | Alexotieno1717/News-API | 81ff1224862fdc74cbca6bf4a381916be382df83 | [
"MIT"
] | null | null | null | instance/config.py | Alexotieno1717/News-API | 81ff1224862fdc74cbca6bf4a381916be382df83 | [
"MIT"
] | null | null | null | instance/config.py | Alexotieno1717/News-API | 81ff1224862fdc74cbca6bf4a381916be382df83 | [
"MIT"
] | null | null | null | class Config:
"""
General configuration parent class
"""
pass
api_key = 'a493e30f11b147d0ba67b15ca60c5e4c'
SECRET_KEY = '1234567890'
class ProdConfig(Config):
"""
Production
"""
pass
class DevConfig(Config):
"""
development
"""
DEBUG = True
| 13.130435 | 48 | 0.596026 | 295 | 0.976821 | 0 | 0 | 0 | 0 | 0 | 0 | 149 | 0.493377 |
52fea36dd1d5595eb45032e212c7c224730f7fd2 | 2,179 | py | Python | week2/decrypt.py | vtnil/Cryptography-I-Homework | 1f2bcce5ecfdf8472ddaaaa45eb25266f0279224 | [
"MIT"
] | null | null | null | week2/decrypt.py | vtnil/Cryptography-I-Homework | 1f2bcce5ecfdf8472ddaaaa45eb25266f0279224 | [
"MIT"
] | 1 | 2020-07-31T05:21:07.000Z | 2020-10-23T17:28:12.000Z | week2/decrypt.py | vtnil/Cryptography-I-Homework | 1f2bcce5ecfdf8472ddaaaa45eb25266f0279224 | [
"MIT"
] | null | null | null | # vtnil write for Cryptography1 week2 homework
# ppt https://crypto.stanford.edu/~dabo/cs255/lectures/PRP-PRF.pdf
from Crypto.Cipher import AES
from binascii import a2b_hex
from math import ceil
questions = [
{"key": "140b41b22a29beb4061bda66b6747e14",
"ct": "4ca00ff4c898d61e1edbf1800618fb2828a226d160dad07883d04e008a7897ee2e4b7465d5290d0c0e6c6822236e1daafb94ffe0c5da05d9476be028ad7c1d81"},
{"key": "140b41b22a29beb4061bda66b6747e14",
"ct": "5b68629feb8606f9a6667670b75b38a5b4832d0f26e1ab7da33249de7d4afc48e713ac646ace36e872ad5fb8a512428a6e21364b0c374df45503473c5242a253"},
{"key": "36f18357be4dbd77f050515c73fcf9f2",
"ct": "69dda8455c7dd4254bf353b773304eec0ec7702330098ce7f7520d1cbbb20fc388d1b0adb5054dbd7370849dbf0b88d393f252e764f1f5f7ad97ef79d59ce29f5f51eeca32eabedd9afa9329"},
{"key": "36f18357be4dbd77f050515c73fcf9f2",
"ct": "770b80259ec33beb2561358a9f2dc617e46218c0a53cbeca695ae45faa8952aa0e311bde9d4e01726d3184c34451"},
]
BLOCK_SIZE = 16
MODEL_CBC = 'cbc'
MODEL_CTR = 'ctr'
AES.block_size = BLOCK_SIZE
def decrypt(question, mode):
key = a2b_hex(question['key'])
ctb = a2b_hex(question['ct'])
iv = ctb[:BLOCK_SIZE]
ct = ctb[BLOCK_SIZE:]
plain = []
cipher = AES.new(key)
if mode == MODEL_CBC:
_iv = iv
for i in range(0, int(len(ct) / BLOCK_SIZE)):
_b = ct[BLOCK_SIZE * i: BLOCK_SIZE * (i + 1)]
_k = cipher.decrypt(_b)
plain += [a ^ b for (a, b) in zip(_iv, _k)]
_iv = _b
# remove padding
_len = plain[-1]
if [_len] * _len == plain[-_len:]:
plain = plain[:-_len]
else:
for i in range(0, ceil(len(ct) / BLOCK_SIZE)):
# Be careful!!! Here is ENCRYPT!!
_k = cipher.encrypt((int.from_bytes(iv, 'big') + i).to_bytes(BLOCK_SIZE, 'big'))
_b = ct[BLOCK_SIZE * i: BLOCK_SIZE * (i + 1)]
plain += [_k[i] ^ _b[i] for i in range(0, len(_b))]
return ''.join([chr(a) for a in plain])
print(decrypt(questions[0], MODEL_CBC))
print(decrypt(questions[1], MODEL_CBC))
print(decrypt(questions[2], MODEL_CTR))
print(decrypt(questions[3], MODEL_CTR))
| 33.523077 | 167 | 0.679211 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 872 | 0.400184 |
5e0051e97518630cd66c32fa7fedd4d72f028d14 | 2,716 | py | Python | fastreid/modeling/meta_arch/AFF.py | SZLSP/reid2020NAIC | d0eaee768e0be606417a27ce5ea2b3071b5a9bc2 | [
"Apache-2.0"
] | 2 | 2021-05-12T13:36:46.000Z | 2021-08-15T10:35:08.000Z | fastreid/modeling/meta_arch/AFF.py | SZLSP/reid2020NAIC | d0eaee768e0be606417a27ce5ea2b3071b5a9bc2 | [
"Apache-2.0"
] | 1 | 2021-12-28T12:49:49.000Z | 2021-12-28T12:49:49.000Z | fastreid/modeling/meta_arch/AFF.py | SZLSP/reid2020NAIC | d0eaee768e0be606417a27ce5ea2b3071b5a9bc2 | [
"Apache-2.0"
] | null | null | null | import torch
from torch import nn
class MS_CAM(nn.Module):
def __init__(self, C, H, W, r):
"""
MS_CAM is a module of AFF.
Args:
C: Channel
H: Height
W: Width
r: channel reduction ratio. The channel will be reduced to C/r and back to C.
"""
super(MS_CAM, self).__init__()
self.get_global_feature = nn.Sequential(
)
self.get_local_feature = nn.Sequential(
)
interdim = max(C // r, 1)
self.globalavgpool = nn.AvgPool2d((H, W))
self.PWConv11 = nn.Conv2d(C, interdim, 1, 1, 0, bias=False)
self.bn11 = nn.BatchNorm2d(interdim)
self.PWConv12 = nn.Conv2d(interdim, C, 1, 1, 0, bias=False)
self.bn12 = nn.BatchNorm2d(C)
self.PWConv21 = nn.Conv2d(C, interdim, 1, 1, 0, bias=False)
self.bn21 = nn.BatchNorm2d(interdim)
self.relu = nn.ReLU(inplace=True)
self.PWConv22 = nn.Conv2d(interdim, C, 1, 1, 0, bias=False)
self.bn22 = nn.BatchNorm2d(C)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
global_feature = self.globalavgpool(x)
global_feature = self.PWConv11(global_feature)
global_feature = self.bn11(global_feature)
global_feature = self.relu(global_feature)
global_feature = self.PWConv12(global_feature)
global_feature = self.bn12(global_feature)
local_feature = self.PWConv21(x)
local_feature = self.bn21(local_feature)
local_feature = self.relu(local_feature)
local_feature = self.PWConv22(local_feature)
local_feature = self.bn22(local_feature)
x2 = self.sigmoid(global_feature + local_feature)
return x * x2
class AFF(nn.Module):
def __init__(self, C, H, W, r):
super(AFF, self).__init__()
self.MS_CAM = MS_CAM(C, H, W, r)
def forward(self, X, Y):
assert X.shape == Y.shape, "Input of AFF(X and Y) should be the same shape"
M = self.MS_CAM(X + Y)
Z = M * X + (1 - M) * Y
return Z
class iAFF(nn.Module):
def __init__(self, C, H, W, r):
super(iAFF, self).__init__()
self.AFF = AFF(C, H, W, r)
self.MS_CAM2 = MS_CAM(C, H, W, r)
def forward(self, X, Y):
assert X.shape == Y.shape, "Input of AFF(X and Y) should be the same shape"
M = self.AFF(X, Y)
M = self.MS_CAM2(M)
Z = M * X + (1 - M) * Y
return Z
if __name__ == '__main__':
i = torch.randn((50, 10, 5, 3))
m = MS_CAM(10, 5, 3, 2)
out0 = m(i)
a = AFF(10, 5, 3, 2)
b = iAFF(10, 5, 3, 2)
j = torch.randn((50, 10, 5, 3))
out1 = a(i, i)
out2 = b(i, i)
pass
| 28.893617 | 89 | 0.566642 | 2,431 | 0.895066 | 0 | 0 | 0 | 0 | 0 | 0 | 326 | 0.120029 |
5e012b3443d887f6bd91f7a94690e51a832da05e | 343 | py | Python | 17_process_thread/30_3_multithread_rlock.py | hemuke/python | bc99f2b5aee997083ae31f59a2b33db48c8255f3 | [
"Apache-2.0"
] | null | null | null | 17_process_thread/30_3_multithread_rlock.py | hemuke/python | bc99f2b5aee997083ae31f59a2b33db48c8255f3 | [
"Apache-2.0"
] | null | null | null | 17_process_thread/30_3_multithread_rlock.py | hemuke/python | bc99f2b5aee997083ae31f59a2b33db48c8255f3 | [
"Apache-2.0"
] | null | null | null | from threading import Thread
num = 0
def do_sth():
global num
for i in range(1000000):
num += 1
adda()
addb()
def adda():
global num
num += 1
def addb():
global num
num += 1
t1 = Thread(target=do_sth)
t2 = Thread(target=do_sth)
t1.start()
t2.start()
t1.join()
t2.join()
print(num)
# 这个计算数值错误
| 10.088235 | 28 | 0.580175 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 26 | 0.072423 |
5e02596f2098cd63767a99d6eb7f6f25245bc38b | 3,318 | py | Python | fn_aws_iam/fn_aws_iam/components/fn_aws_iam_deactivate_mfa_devices.py | nickpartner-goahead/resilient-community-apps | 097c0dbefddbd221b31149d82af9809420498134 | [
"MIT"
] | 65 | 2017-12-04T13:58:32.000Z | 2022-03-24T18:33:17.000Z | fn_aws_iam/fn_aws_iam/components/fn_aws_iam_deactivate_mfa_devices.py | nickpartner-goahead/resilient-community-apps | 097c0dbefddbd221b31149d82af9809420498134 | [
"MIT"
] | 48 | 2018-03-02T19:17:14.000Z | 2022-03-09T22:00:38.000Z | fn_aws_iam/fn_aws_iam/components/fn_aws_iam_deactivate_mfa_devices.py | nickpartner-goahead/resilient-community-apps | 097c0dbefddbd221b31149d82af9809420498134 | [
"MIT"
] | 95 | 2018-01-11T16:23:39.000Z | 2022-03-21T11:34:29.000Z | # -*- coding: utf-8 -*-
# (c) Copyright IBM Corp. 2010, 2020. All Rights Reserved.
# pragma pylint: disable=unused-argument, no-self-use
"""Function implementation"""
import logging
import re
from fn_aws_iam.lib.aws_iam_client import AwsIamClient
from fn_aws_iam.lib.helpers import CONFIG_DATA_SECTION, transform_kwargs, validate_opts
from resilient_circuits import ResilientComponent, function, handler, FunctionResult, FunctionError
from resilient_lib import ResultPayload, validate_fields
LOG = logging.getLogger(__name__)
class FunctionComponent(ResilientComponent):
"""Component that implements Resilient function 'fn_aws_iam_deactivate_mfa_devices'"""
def __init__(self, opts):
"""constructor provides access to the configuration options"""
super(FunctionComponent, self).__init__(opts)
self.options = opts.get("fn_aws_iam", {})
validate_opts(self)
@handler("reload")
def _reload(self, event, opts):
"""Configuration options have changed, save new values"""
self.options = opts.get("fn_aws_iam", {})
validate_opts(self)
@function("fn_aws_iam_deactivate_mfa_devices")
def _fn_aws_iam_deactivate_mfa_devices_function(self, event, *args, **kwargs):
"""Function: Deactivate an MFA device and remove it from association with the user name for which it
was originally enabled.
param aws_iam_user_name: An IAM user name.
param aws_iam_mfa_serial_numbers: A comma separated list of IAM MFA serial numbers or arns.
"""
try:
params = transform_kwargs(kwargs) if kwargs else {}
# Instantiate result payload object
rp = ResultPayload(CONFIG_DATA_SECTION, **kwargs)
aws_iam_user_name = kwargs.get("aws_iam_user_name") # text
aws_iam_mfa_serial_nums = kwargs.get("aws_iam_mfa_serial_nums") # text
LOG.info("aws_iam_user_name: %s", aws_iam_user_name)
LOG.info("aws_iam_mfa_serial_nums: %s", aws_iam_mfa_serial_nums)
validate_fields(["aws_iam_user_name", "aws_iam_mfa_serial_nums"], kwargs)
iam_cli = AwsIamClient(self.options)
# Delete 'MfaSerialNums' parameter from params.
if "MfaSerialNums" in params:
del params["MfaSerialNums"]
rtn = []
# Iterate over mfa serial numbers in the comma separated list in parameter
# 'param aws_iam_mfa_serial_numbers'. Add each in turn to the 'params' dict then attempt to deactivate each
# mfa for the user in parameter 'aws_iam_user_name'. Include the status of each attempt in the returned
# result.
for mfa_ser_num in re.split(r"\s*,\s*", aws_iam_mfa_serial_nums):
params.update({"SerialNumber": mfa_ser_num})
rtn.append({
"SerialNumber": mfa_ser_num,
"Status": iam_cli.post("deactivate_mfa_device", **params)}
)
results = rp.done(True, rtn)
# Produce a FunctionResult with the results
yield FunctionResult(results)
except Exception as aws_err:
LOG.exception("ERROR with Exception '%s' in Resilient Function for AWS IAM.", aws_err.__repr__())
yield FunctionError()
| 43.090909 | 119 | 0.670283 | 2,787 | 0.839964 | 2,157 | 0.65009 | 2,406 | 0.725136 | 0 | 0 | 1,462 | 0.440627 |
5e03010aadf0f1430a137f0fe8614a9be339de88 | 7,258 | py | Python | bluebottle/clients/management/commands/new_tenant.py | terrameijar/bluebottle | b4f5ba9c4f03e678fdd36091b29240307ea69ffd | [
"BSD-3-Clause"
] | 10 | 2015-05-28T18:26:40.000Z | 2021-09-06T10:07:03.000Z | bluebottle/clients/management/commands/new_tenant.py | terrameijar/bluebottle | b4f5ba9c4f03e678fdd36091b29240307ea69ffd | [
"BSD-3-Clause"
] | 762 | 2015-01-15T10:00:59.000Z | 2022-03-31T15:35:14.000Z | bluebottle/clients/management/commands/new_tenant.py | terrameijar/bluebottle | b4f5ba9c4f03e678fdd36091b29240307ea69ffd | [
"BSD-3-Clause"
] | 9 | 2015-02-20T13:19:30.000Z | 2022-03-08T14:09:17.000Z | # coding=utf-8
from builtins import input
from optparse import make_option
from django.core import exceptions
from django.utils.encoding import force_str
from django.conf import settings
from django.db.utils import IntegrityError
from django.core.management import call_command
from tenant_schemas.utils import get_tenant_model
from bluebottle.members.models import Member
from bluebottle.common.management.commands.base import Command as BaseCommand
from bluebottle.utils.models import Language
class Command(BaseCommand):
help = 'Create a tenant'
option_list = BaseCommand.options + (
make_option('--full-name',
help='Specifies the full name for the tenant (e.g. "Our New Tenant").'),
make_option('--schema-name',
help='Specifies the schema name for the tenant (e.g. "new_tenant").'),
make_option('--domain-url',
help='Specifies the domain_url for the tenant (e.g. "new-tenant.localhost").'),
make_option('--client-name',
help='Specifies the client name for the tenant (e.g. "new-tenant").'),
make_option('--languages',
default='en',
help='Specifies the client languages (e.g. "en,nl").'),
make_option('--post-command',
help='Calls another management command after the tenant is created.')
)
def handle(self, *args, **options):
name = options.get('full_name', None)
client_name = options.get('client_name', None)
schema_name = options.get('schema_name', None)
domain_url = options.get('domain_url', None)
languages = options.get('languages', 'en')
post_command = options.get('post_command', None)
# If full-name is specified then don't prompt for any values.
if name:
if not client_name:
client_name = ''.join(ch if ch.isalnum() else '-' for ch in name).lower()
if not schema_name:
schema_name = client_name.replace('-', '_')
if not domain_url:
base_domain = getattr(settings, 'TENANT_BASE_DOMAIN', 'localhost')
domain_url = '{0}.{1}'.format(client_name, base_domain)
client_name.replace('_', '-')
client = self.store_client(
name=name,
client_name=client_name,
domain_url=domain_url,
schema_name=schema_name
)
if client is False:
return
if not client:
name = None
while name is None:
if not name:
input_msg = 'Tenant name'
name = eval(input(force_str('%s: ' % input_msg)))
default_client_name = ''.join(ch if ch.isalnum() else '-' for ch in name).lower()
default_schema_name = default_client_name.replace('-', '_')
base_domain = getattr(settings, 'TENANT_BASE_DOMAIN', 'localhost')
default_domain_url = '{0}.{1}'.format(default_client_name, base_domain)
while client_name is None:
if not client_name:
input_msg = 'Client name'
input_msg = "%s (leave blank to use '%s')" % (input_msg, default_client_name)
client_name = eval(input(force_str('%s: ' % input_msg))) or default_client_name
while schema_name is None:
if not schema_name:
input_msg = 'Database schema name'
input_msg = "%s (leave blank to use '%s')" % (input_msg, default_schema_name)
schema_name = eval(input(force_str('%s: ' % input_msg))) or default_schema_name
while domain_url is None:
if not domain_url:
input_msg = 'Domain url'
input_msg = "%s (leave blank to use '%s')" % (input_msg, default_domain_url)
domain_url = eval(input(force_str('%s: ' % input_msg))) or default_domain_url
client_name.replace('_', '-')
client = self.store_client(
name=name,
client_name=client_name,
domain_url=domain_url,
schema_name=schema_name
)
if client is False:
break
if not client:
name = None
continue
if client and client_name:
from django.db import connection
connection.set_tenant(client)
self.create_languages(languages)
self.create_client_superuser()
call_command('loaddata', 'geo_data')
call_command('loaddata', 'geo_data')
call_command('loaddata', 'skills')
call_command('search_index', '--rebuild', '-f')
call_command('loadlinks', '-f', 'links.json')
call_command('loadpages', '-f', 'pages.json')
if client and post_command:
call_command(post_command, *args, **options)
return
def create_languages(self, languages):
for lang in languages.split(","):
if lang == 'nl':
Language.objects.get_or_create(
code='nl',
defaults={
'language_name': 'Dutch',
'native_name': 'Nederlands'
}
)
if lang == 'en':
Language.objects.get_or_create(
code='en',
defaults={
'language_name': 'English',
'native_name': 'English'
}
)
if lang == 'fr':
Language.objects.get_or_create(
code='fr',
defaults={
'language_name': 'French',
'native_name': 'Français'
}
)
def create_client_superuser(self):
password = 'pbkdf2_sha256$12000$MKnW1lFPvfhP$IFidWIsLSjfaWErZa4NFK2N40kbdYhn4PiebBGIgMLg='
su = Member.objects.create(first_name='admin',
last_name='example',
email='admin@example.com',
password=password,
is_active=True,
is_staff=True,
is_superuser=True)
su.save()
def store_client(self, name, client_name, domain_url, schema_name):
try:
client = get_tenant_model().objects.create(
name=name,
client_name=client_name,
domain_url=domain_url.split(":", 1)[0], # strip optional port
schema_name=schema_name
)
client.save()
return client
except exceptions.ValidationError as e:
self.stderr.write("Error: %s" % '; '.join(e.messages))
name = None
return False
except IntegrityError:
self.stderr.write("Error: We've already got a tenant with that name or property.")
return False
| 38.812834 | 99 | 0.533067 | 6,757 | 0.930844 | 0 | 0 | 0 | 0 | 0 | 0 | 1,446 | 0.199201 |
5e0306f967c71be1d0eb14d40dbc5b84ec045a83 | 3,565 | py | Python | tests/util_image.py | parmarsuraj99/objax | 111cd78960f5812885505b5ec02552b98a789973 | [
"Apache-2.0"
] | 2 | 2021-02-23T18:23:40.000Z | 2022-03-09T09:38:37.000Z | tests/util_image.py | parmarsuraj99/objax | 111cd78960f5812885505b5ec02552b98a789973 | [
"Apache-2.0"
] | null | null | null | tests/util_image.py | parmarsuraj99/objax | 111cd78960f5812885505b5ec02552b98a789973 | [
"Apache-2.0"
] | 1 | 2020-09-20T23:56:29.000Z | 2020-09-20T23:56:29.000Z | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unittests for objax.util.image."""
import io
import unittest
from typing import Tuple
import jax.numpy as jn
import numpy as np
from PIL import Image
import objax
class TestUtilImage(unittest.TestCase):
def ndimarange(self, dims: Tuple[int, ...]):
return np.arange(np.prod(dims), dtype=float).reshape(dims)
def test_nchw(self):
"""Test nchw."""
x = self.ndimarange((2, 3, 4, 5))
self.assertEqual(objax.util.image.nchw(x).tolist(), x.transpose((0, 3, 1, 2)).tolist())
self.assertEqual(objax.util.image.nchw(jn.array(x)).tolist(), x.transpose((0, 3, 1, 2)).tolist())
x = self.ndimarange((2, 3, 4, 5, 6))
self.assertEqual(objax.util.image.nchw(x).tolist(), x.transpose((0, 1, 4, 2, 3)).tolist())
self.assertEqual(objax.util.image.nchw(jn.array(x)).tolist(), x.transpose((0, 1, 4, 2, 3)).tolist())
def test_nhwc(self):
"""Test nhwc."""
x = self.ndimarange((2, 3, 4, 5))
self.assertEqual(objax.util.image.nhwc(x).tolist(), x.transpose((0, 2, 3, 1)).tolist())
self.assertEqual(objax.util.image.nhwc(jn.array(x)).tolist(), x.transpose((0, 2, 3, 1)).tolist())
x = self.ndimarange((2, 3, 4, 5, 6))
self.assertEqual(objax.util.image.nhwc(x).tolist(), x.transpose((0, 1, 3, 4, 2)).tolist())
self.assertEqual(objax.util.image.nhwc(jn.array(x)).tolist(), x.transpose((0, 1, 3, 4, 2)).tolist())
def test_normalize(self):
"""Test normalize methods."""
x = np.arange(256)
y = objax.util.image.normalize_to_unit_float(x)
self.assertEqual((x / 128 - (1 - 1 / 256)).tolist(), y.tolist())
self.assertEqual(y.tolist(), y.clip(-1, 1).tolist())
z = objax.util.image.normalize_to_uint8(y)
self.assertEqual(x.tolist(), z.tolist())
z = objax.util.image.normalize_to_uint8(y + 1 / 128)
self.assertEqual((x + 1).clip(0, 255).tolist(), z.tolist())
z = objax.util.image.normalize_to_uint8(y - 1 / 128)
self.assertEqual((x - 1).clip(0, 255).tolist(), z.tolist())
def test_to_png(self):
"""Test to_png."""
x = np.zeros((3, 32, 32), np.float) + 1 / 255
x[:, :12, :12] = 1
x[:, -12:, -12:] = -1
y = objax.util.image.to_png(x)
self.assertEqual(y, b'\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00 \x00\x00\x00 \x08\x02\x00\x00\x00\xfc'
b'\x18\xed\xa3\x00\x00\x00FIDATx\x9cc\xfc\xff\xff?\x03!\xd0\xd8\xd8HP\r.\xc0D\xb6\xceQ'
b'\x0bF-\x18\xb5`\x04Y\xc0BI9C\x0c\x18\xfaA4j\xc1\x08\xb0\x80\x85\x12\xcd\r\r\r\x04\xd5'
b'\x0c\xfd \x1a\xb5`\xd4\x82Q\x0b\xe8`\x01\x00\xe3\xf1\x07\xc7\x82\x83p\xa5\x00\x00\x00\x00'
b'IEND\xaeB`\x82')
z = np.array(Image.open(io.BytesIO(y)))
z = (z.transpose((2, 0, 1)) - 127.5) / 127.5
self.assertEqual(x.tolist(), z.tolist())
if __name__ == '__main__':
unittest.main()
| 44.012346 | 120 | 0.609818 | 2,768 | 0.776438 | 0 | 0 | 0 | 0 | 0 | 0 | 1,060 | 0.297335 |
5e043abcf5a29bdcffd9f45c532c8fa1ae641968 | 311 | py | Python | alpyro_msgs/actionlib/testrequestresult.py | rho2/alpyro_msgs | b5a680976c40c83df70d61bb2db1de32a1cde8d3 | [
"MIT"
] | 1 | 2020-12-13T13:07:10.000Z | 2020-12-13T13:07:10.000Z | alpyro_msgs/actionlib/testrequestresult.py | rho2/alpyro_msgs | b5a680976c40c83df70d61bb2db1de32a1cde8d3 | [
"MIT"
] | null | null | null | alpyro_msgs/actionlib/testrequestresult.py | rho2/alpyro_msgs | b5a680976c40c83df70d61bb2db1de32a1cde8d3 | [
"MIT"
] | null | null | null | from alpyro_msgs import RosMessage, boolean, int32
class TestRequestResult(RosMessage):
__msg_typ__ = "actionlib/TestRequestResult"
__msg_def__ = "aW50MzIgdGhlX3Jlc3VsdApib29sIGlzX3NpbXBsZV9zZXJ2ZXIKCg=="
__md5_sum__ = "61c2364524499c7c5017e2f3fce7ba06"
the_result: int32
is_simple_server: boolean
| 28.272727 | 74 | 0.829582 | 257 | 0.826367 | 0 | 0 | 0 | 0 | 0 | 0 | 121 | 0.389068 |
5e05ca6490bebc35ad6718a788941b84ebc6f86b | 1,313 | py | Python | tests/test_pycolor.py | edonyzpc/toolkitem | 3a09ebf45eee8ecd9ff0e441392d5fc746b996e5 | [
"MIT"
] | 3 | 2015-04-20T08:17:09.000Z | 2020-07-07T15:22:06.000Z | tests/test_pycolor.py | edonyzpc/toolkitem | 3a09ebf45eee8ecd9ff0e441392d5fc746b996e5 | [
"MIT"
] | 24 | 2015-11-14T14:54:59.000Z | 2017-10-23T15:14:45.000Z | tests/test_pycolor.py | edonyzpc/toolkitem | 3a09ebf45eee8ecd9ff0e441392d5fc746b996e5 | [
"MIT"
] | 1 | 2017-02-28T06:35:44.000Z | 2017-02-28T06:35:44.000Z | from __future__ import print_function
import sys
sys.path.append('./')
from colorprinter.pycolor import PyColor
from colorprinter.pycolor import cprint
@PyColor('ured')
def printer(string):
a = 1
b = 2
print(str((a + b)**4) + string)
class TestClass(object):
def test_pycolor(self):
printer('edony')
def test_cprint(self):
cprint('ugreen', 'hello edony')
def test_setformat(self):
py_color = PyColor('green')
py_color.format = 'ucyan'
cprint(py_color.format, 'this is test')
def test_disableformat(self):
py_color = PyColor('ured')
cprint(py_color.format, 'this is test')
py_color.disable()
cprint(py_color.format, 'this is disable')
assert py_color.format is ''
def test_colorstr(self):
str1 = 'this is a test'
py_color = PyColor('green')
str2 = py_color.colorstr(str1)
str3 = py_color.colorstr(str1, 'red')
assert((str2 == '\033[0;32;40mthis is a test\033[0m') and
(str3 == '\033[0;31;40mthis is a test\033[0m'))
#if __name__ == "__main__":
# cprint('ugreen', 'hello edony')
# printer('edony')
# py_color = PyColor('green')
# py_color.format = 'ucyan'
# print(py_color.format)
# cprint(py_color.format, 'this is test')
| 27.93617 | 65 | 0.619193 | 839 | 0.638995 | 0 | 0 | 93 | 0.07083 | 0 | 0 | 422 | 0.321401 |
5e05f2cccefdec04a8fd2cca1ee7503f900daacf | 292 | py | Python | app/main/views.py | chushijituan/job_analysis | a99d8f12b9dafa93de448a27d2f76ee6ddbde469 | [
"MIT"
] | 45 | 2016-07-07T08:53:04.000Z | 2022-01-10T11:00:40.000Z | app/main/views.py | chushijituan/job_analysis | a99d8f12b9dafa93de448a27d2f76ee6ddbde469 | [
"MIT"
] | 1 | 2016-07-09T03:40:13.000Z | 2017-02-02T06:58:27.000Z | app/main/views.py | chushijituan/job_analysis | a99d8f12b9dafa93de448a27d2f76ee6ddbde469 | [
"MIT"
] | 20 | 2016-07-08T02:18:49.000Z | 2019-06-09T14:21:26.000Z | # coding: utf-8
from . import main
from flask import render_template, jsonify, flash, request, current_app, url_for, Response, g, abort
@main.route('/')
def index():
return render_template('index.html')
@main.route('/about')
def about_page():
return render_template('about.html')
| 20.857143 | 100 | 0.715753 | 0 | 0 | 0 | 0 | 150 | 0.513699 | 0 | 0 | 50 | 0.171233 |
5e0820cd30478afe8761f3ecbdb9b91773ce5cd6 | 9,918 | py | Python | da4py/main/conformanceChecking/antiAlignmentBetweenNets.py | BoltMaud/da4py | 535372c9cbce2f6adfff181d3b2e1b33422fed8a | [
"MIT"
] | 2 | 2020-01-22T15:46:20.000Z | 2020-12-26T19:15:18.000Z | da4py/main/conformanceChecking/antiAlignmentBetweenNets.py | BoltMaud/da4py | 535372c9cbce2f6adfff181d3b2e1b33422fed8a | [
"MIT"
] | 1 | 2019-10-07T07:08:03.000Z | 2019-10-07T07:08:03.000Z | da4py/main/conformanceChecking/antiAlignmentBetweenNets.py | BoltMaud/da4py | 535372c9cbce2f6adfff181d3b2e1b33422fed8a | [
"MIT"
] | 1 | 2019-10-04T13:14:12.000Z | 2019-10-04T13:14:12.000Z | import itertools
from pm4py.objects.petri.petrinet import PetriNet
from da4py.main.objects.pnToFormulas import petri_net_to_SAT
from da4py.main.utils import variablesGenerator as vg, formulas
from da4py.main.utils.formulas import Or, And
from da4py.main.utils.unSat2qbfReader import writeQDimacs, cadetOutputQDimacs, runCadet
BOOLEAN_VAR_MARKING_PN_1="m1_ip"
BOOLEAN_VAR_MARKING_PN_2="m2_ip"
BOOLEAN_VAR_FIRING_TRANSITION_PN_1="tau1_ia"
BOOLEAN_VAR_FIRING_TRANSITION_PN_2="tau2_ia"
BOOLEAN_VAR_DIFF1="diff_1i"
BOOLEAN_VAR_DIFF2="diff_2i"
def apply(net1, m01, mf1, net2, m02, mf2, size_of_run, d, silent_label=None):
vars = vg.VariablesGenerator()
#we=add_wait_net_end(net1,"wf")
w1=add_wait_net(net1,"wf")
adapted_size_of_run = size_of_run * size_of_run +size_of_run
pn1_formula, pn1_places, pn1_transitions, pn1_silent_transitions=petri_net_to_SAT(net1, m01, mf1, vars,
adapted_size_of_run,
reach_final=True,
label_m=BOOLEAN_VAR_MARKING_PN_1,
label_t=BOOLEAN_VAR_FIRING_TRANSITION_PN_1,
silent_transition=silent_label,
space_between_fired=1+size_of_run)
print("etape1")
pn1_force_wait_transitions=force_wait_transition(vars,w1,pn1_transitions, adapted_size_of_run,size_of_run+1)
print("etape2")
w2=add_wait_net(net2,"wf")
pn2_formula, pn2_places, pn2_transitions, pn2_silent_transitions=petri_net_to_SAT(net2, m02, mf2, vars,
adapted_size_of_run,
reach_final=True,
label_m=BOOLEAN_VAR_MARKING_PN_2,
label_t=BOOLEAN_VAR_FIRING_TRANSITION_PN_2,
silent_transition=silent_label)
dist_formulas = distanceNets(vars,adapted_size_of_run,vars.getFunction(BOOLEAN_VAR_FIRING_TRANSITION_PN_1),
vars.getFunction(BOOLEAN_VAR_FIRING_TRANSITION_PN_2),pn1_transitions,pn2_transitions,w1,w2)
print("etape3")
maxDist_formulas=maxDistance(vars,vars.getFunction(BOOLEAN_VAR_DIFF1),vars.getFunction(BOOLEAN_VAR_DIFF2),d,adapted_size_of_run)
#notTooManyW=numberOfWaitInRun(vars,size_of_run,vars.getFunction(BOOLEAN_VAR_FIRING_TRANSITION_PN_1),pn1_transitions,w1,we)
print("etape4")
from pm4py.visualization.petrinet import factory as vizu
#vizu.apply(net2,m02,mf2).view()
listOfForAll=vars.getAll(BOOLEAN_VAR_MARKING_PN_1)+vars.getAll(BOOLEAN_VAR_FIRING_TRANSITION_PN_1)
listOfExist=vars.getAll(BOOLEAN_VAR_MARKING_PN_2)+vars.getAll(BOOLEAN_VAR_FIRING_TRANSITION_PN_2)+vars.getAll(BOOLEAN_VAR_DIFF1)+vars.getAll(BOOLEAN_VAR_DIFF2)
full_formula=Or([],[],[And([],[],[pn1_formula,pn1_force_wait_transitions]).negation(),And([],[],[dist_formulas,maxDist_formulas,pn2_formula])])
print("etape5")
cnf=full_formula.operatorToCnf(vars.iterator)
print("etape6")
listOfExist+=list(range(vars.iterator,full_formula.nbVars))
writeQDimacs(full_formula.nbVars,listOfForAll, listOfExist, cnf)
print("mais voila")
runCadet()
positives,negatives=cadetOutputQDimacs()
for var in positives:
if vars.getVarName(var) != None and vars.getVarName(var).startswith("tau1_ia"):
print(vars.getVarName(var),pn1_transitions[int(vars.getVarName(var).split(", ")[1].split("]")[0])])
print("....")
for var in negatives:
if vars.getVarName(var) != None and vars.getVarName(var).startswith("tau1_ia"):
print(vars.getVarName(var),pn1_transitions[int(vars.getVarName(var).split(", ")[1].split("]")[0])])
def force_wait_transition(vars,w1, pn1_transitions,adapted_size_of_run, space_between_fired):
pos=[]
neg=[]
for i in range(0,adapted_size_of_run+1):
if i%space_between_fired!=0:
for t in pn1_transitions:
if t!=w1:
neg.append(vars.get(BOOLEAN_VAR_FIRING_TRANSITION_PN_1,[i,pn1_transitions.index(t)]))
else :
pos.append(vars.get(BOOLEAN_VAR_FIRING_TRANSITION_PN_1,[i,pn1_transitions.index(t)]))
return And(pos,neg,[])
def add_wait_net(net,wait_label):
'''
Words don't have the same length. To compare them we add a "wait" transition at the end of the model and the
traces.
:return:
'''
wait_transition = PetriNet.Transition(wait_label, wait_label)
net.transitions.add(wait_transition)
return wait_transition
def add_wait_net_end(pn, wait_label):
'''
Words don't have the same length. To compare them we add a "wait" transition at the end of the model and the
traces.
:return:
'''
wait_transition = PetriNet.Transition(wait_label, wait_label)
for place in pn.places:
if len(place.out_arcs) == 0:
arcIn = PetriNet.Arc(place, wait_transition)
arcOut = PetriNet.Arc(wait_transition, place)
pn.arcs.add(arcIn)
pn.arcs.add(arcOut)
wait_transition.in_arcs.add(arcIn)
wait_transition.out_arcs.add(arcOut)
place.out_arcs.add(arcIn)
place.in_arcs.add(arcOut)
pn.transitions.add(wait_transition)
return wait_transition
def numberOfWaitInRun(vars,size_of_run, tau1,pn1_transitions,w1,we):
list_to_size_of_run= list(range(1,size_of_run*2+1))
minw1=int((size_of_run)/2)
# IDEA : there are at least max_distance number of w1 variables to false
combinaisons_of_instants=list(itertools.combinations(list_to_size_of_run,minw1))
w1ToTrue=[]
for instants in combinaisons_of_instants:
listOfW1ToTrue=[]
for i in instants:
if i <=int(size_of_run):
listOfW1ToTrue.append(tau1([i,pn1_transitions.index(w1)]))
else :
listOfW1ToTrue.append(tau1([i-int(size_of_run),pn1_transitions.index(we)]))
w1ToTrue.append(And(listOfW1ToTrue,[],[]))
return Or([],[],w1ToTrue)
def distanceNets(vars,size_of_run, tau1,tau2,pn1_transitions,pn2_transitions,w1,w2):
formula=[]
vars.add(BOOLEAN_VAR_DIFF1,[(1,size_of_run+1)])
vars.add(BOOLEAN_VAR_DIFF2,[(1,size_of_run+1)])
for i in range (1,size_of_run+1):
for t1 in pn1_transitions:
'''
listOfSameLabels=[tau2([i,pn2_transitions.index(t2)]) for t2 in pn2_transitions if t2.label==t1.label]
listOfSameLabels.append(vars.getFunction(BOOLEAN_VAR_DIFF)([i]))
formula.append(Or(listOfSameLabels,[tau1([i,pn1_transitions.index(t1)]) ],[]))
'''
if t1 != w1:
listOfSameLabels=[tau2([i,pn2_transitions.index(t2)]) for t2 in pn2_transitions if t2.label==t1.label]
listOfSameLabels.append(tau2([i,pn2_transitions.index(w2)]))
formula.append(Or(listOfSameLabels,[tau1([i,pn1_transitions.index(t1)]) ],[And([vars.getFunction(BOOLEAN_VAR_DIFF1)([i]),
vars.getFunction(BOOLEAN_VAR_DIFF2)([i])],[],[])]))
formula.append(Or([vars.getFunction(BOOLEAN_VAR_DIFF1)([i])],[tau2([i,pn2_transitions.index(w2)]),
tau1([i,pn1_transitions.index(t1)])],[]))
else :
formula.append(Or([vars.getFunction(BOOLEAN_VAR_DIFF2)([i]),tau2([i,pn2_transitions.index(w2)])],
[tau1([i,pn1_transitions.index(t1)])],[]))
return And([],[],formula)
def maxDistance(vars,diff1,diff2, max_d,size_of_run):
list_to_size_of_run= list(range(1,size_of_run*2+1))
max_distance=size_of_run*2-max_d
# IDEA : there are at least max_distance number of false variables
combinaisons_of_instants=list(itertools.combinations(list_to_size_of_run,max_distance))
print("wala")
distFalseVariables=[]
for instants in combinaisons_of_instants:
list_distances=[]
for i in instants:
if i <=size_of_run:
list_distances.append(diff1([i]))
else :
list_distances.append(diff2([i-size_of_run]))
distFalseVariables.append(And([],list_distances,[]))
return Or([],[],distFalseVariables)
def maxDistance2(vars,diff1,diff2, max_d,size_of_run):
list_to_size_of_run= list(range(1,size_of_run*2+1))
max_distance=max_d
# IDEA : there are at least max_distance number of false variables
combinaisons_of_instants=list(itertools.combinations(list_to_size_of_run,max_distance))
distFalseVariables=[]
for instants in combinaisons_of_instants:
list_distances=[]
for i in instants:
if i <=size_of_run:
list_distances.append(diff1([i]))
else :
list_distances.append(diff2([i-size_of_run]))
list_distances2=[]
for i in range(1,size_of_run*2+1):
if i not in instants:
if i <=size_of_run:
list_distances2.append(diff1([i]))
else :
list_distances2.append(diff2([i-size_of_run]))
distFalseVariables.append(Or([],list_distances,[And([],list_distances2,[])]))
return Or([],[],distFalseVariables)
| 49.59 | 163 | 0.619984 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,153 | 0.116253 |
5e08de868f4d13f8b2e2b7a064674d9d0ec35a63 | 6,215 | py | Python | projects-import.py | kprussing/resume | b7df1db6ec1709152b2e4727c90b12eac81f258b | [
"BSD-2-Clause"
] | null | null | null | projects-import.py | kprussing/resume | b7df1db6ec1709152b2e4727c90b12eac81f258b | [
"BSD-2-Clause"
] | null | null | null | projects-import.py | kprussing/resume | b7df1db6ec1709152b2e4727c90b12eac81f258b | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python3
__doc__ = """Process a dump from the 'Charge Activity Report by Employee
- Project Detail Information' report from Webwise. We only need the
table view because we simply want to extract the fields. For this to
work, we _must_ have the table headers. Those are used as the keys in
the YAML formatting.
"""
import argparse
import datetime
import os
import re
import yaml
_reference_format = """ - title: {Project Title}
project: {Project}
contract: {Contract No}
sponsor: {Sponsor}
PD:
project: {PD of Project}
subtask: {PD of Subtask}
role: '[Program manager, P.D./P.I., Co-P.I./P.D. Task leader]'
budget: '[Did Candidate have budgetary authority?]'
subtask: {Subtask Title}
amount-funded:
task: {Budget of Subtask}
project: {Funded Amount includes Fee}
number-supervised: '[15 (3 PRE, 1 SRE, 1 REII, 1 RE1, 9 students)]'
performance:
project:
- year: {Contract Start Date.year}
month: {Contract Start Date.month}
day: {Contract Start Date.day}
- year: {Contract End Date.year}
month: {Contract End Date.month}
day: {Contract End Date.day}
candidate:
- year: {Employee First Month Worked on Project.year}
month: {Employee First Month Worked on Project.month}
day: {Employee First Month Worked on Project.day}
- year: {Employee Last Month Worked on Project.year}
month: {Employee Last Month Worked on Project.month}
day: {Employee Last Month Worked on Project.day}
hours-worked: {Total Hours Worked}
contributions: '[Briefly describe you contributions in 2--3 sentences.]'
"""
# These were part of an attempt to update a reference YAML with new
# information from the table, but I think that's going to take too much
# effort. Maybe we'll do that, but not now.
# _empty_row = {
# "title" : "",
# "project" : "",
# "contract" : "",
# "sponsor" : "",
# "PD-project" : "",
# "PD-subtask" : "",
# "role" : "'[Program manager, P.D./P.I., Co-P.I./P.D. Task leader]'",
# "budget" : "'[Did Candidate have budgetary authority?]'",
# "subtask" : "",
# "amount-funded-task" : "",
# "amount-funded-project" : "",
# "number-supervised" : "'[15 (3 PRE, 1 SRE, 1 REII, 1 RE1, 9 students)]'",
# "contract-start" : None,
# "contract-end" : None,
# "candidate-start" : None,
# "candidate-end" : None,
# "hour-worked" : "",
# "contributions" : "'[Briefly describe you contributions in 2--3 sentences.]'",
# }
# _from_to_keys = (
# ("Project Title", "title"),
# ("Project", "project"),
# ("Contract No", "contract"),
# ("Sponsor", "sponsor"),
# ("PD of Project", "pd-project"),
# ("PD of Subtask", "pd-subtask"),
# ("Subtask Title", "subtask"),
# ("Budget of Subtask", "amount-funded-task"),
# ("Funded Amount includes Fee", "amount-funded-project"),
# ("Contract Start Date", "contract-start"),
# ("Contract End Date", "contract-end"),
# ("Employee First Month Worked on Project", "candidate-start"),
# ("Employee Last Month Worked on Project", "candidate-end"),
# ("Total Hours Worked", "hour-worked"),
# )
# This is the worked out regular expression for copying the vita
# view over. All of the information is in the table and it's easier
# to parse that. But I don't want to loose the careful work I did
# to figure this out.
# pattern = re.compile(r"\s*\d+\s*" \
# + r"Project\s*Title\s*(?P<title>[-&\w\s]+)" \
# + r"Contract\s*No(?:[.]|umber)\s*(?P<contract>[\w-]*)\s*" \
# + r"Sponsor\s*(?P<sponsor>[-&\w/\s]+)\s*" \
# + r"P[.]\s*I[.]\s*(?P<pi>[\w,\s]+)" \
# + r"Candidate['’]s\s+Role\s*(?P<role>[\w\s-]*)" \
# + r"Budgetary\s*Authority[?]\s*(?P<budget>\w*)\s*" \
# + r"Subtask\s*Title[?]?\s*(?P<subtask>[-&\w\s]*)" \
# + r"Amount\s*Funded\s*for\s*Task:?\s*(?P<task_amount>\$[\d,.]+)?\s*" \
# + r"Amount\s*Funded\s*for\s*Project:?\s*(?P<project_amount>\$[\d,.]+)?\s*" \
# + r"Number\s*and\s*Rank\s*of\s*Persons\s*Supervised:?\s*(?P<supervised>[\w\s]*)" \
# + r"Period\s*of\s*Performance\s*\(Project\):?\s*(?P<project_performance>[-/\d\s]*)" \
# + r"Period\s*of\s*Performance\s*\(Candidate\):?\s*(?P<candidate_performance>[-/\d\s]*)" \
# + r"Contributions:?\s*(?P<contributions>\w|\s)*"
# )
# We define two entries as the same if they have the same entries
# same_entry = lambda l, r: all(l[k] == r[k] for k in ("title",
# "subtask",
# "contract"))
if __name__ == "__main__":
prog, _ = os.path.splitext(".".join(__file__.split(os.sep)[-3:]))
parser = argparse.ArgumentParser(prog=prog, description=__doc__)
parser.add_argument("-o", "--output", default="-",
type=argparse.FileType("w"),
help="Output file")
parser.add_argument("table", type=argparse.FileType("r"),
help="Input table view")
args = parser.parse_args()
keys = [k.strip() for k in args.table.readline().split("\t")]
# Sanitize the bad lines that start with a tab. This is most likely
# due to the poor formatting or bad copy/paste.
lines = []
for line in args.table.readlines():
if line.startswith("\t") and len(lines) > 0:
lines[-1] = lines[-1][:-len(os.linesep)] + line
else:
lines.append(line)
func = lambda k, x: datetime.datetime.strptime(x, "%m/%d/%Y") \
if k in keys[-4:] else x
args.output.write("projects:\n")
for line in lines:
row = {k:func(k, e.strip())
for k, e in zip(keys, line.split("\t"))}
args.output.write(_reference_format.format(**row))
| 43.159722 | 99 | 0.545455 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,608 | 0.741194 |
5e094e096519c145228a9361bab704ba9763c4d7 | 4,240 | py | Python | jenkins/docker_diff.py | smarterclayton/test-infra | 13bff73612f370ad8096e8a8d731faa5e3697adb | [
"Apache-2.0"
] | null | null | null | jenkins/docker_diff.py | smarterclayton/test-infra | 13bff73612f370ad8096e8a8d731faa5e3697adb | [
"Apache-2.0"
] | 1 | 2021-03-20T05:41:39.000Z | 2021-03-20T05:41:39.000Z | jenkins/docker_diff.py | smarterclayton/test-infra | 13bff73612f370ad8096e8a8d731faa5e3697adb | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Output the differences between two Docker images.
Usage:
python docker_diff.py [--deep=path] <image_1> <image_2>
"""
import argparse
import json
import logging
import os
import shutil
import subprocess
import tarfile
import tempfile
def call(cmd, **kwargs):
logging.info('exec %s', ' '.join(cmd))
return subprocess.call(cmd, **kwargs)
def check_call(cmd):
logging.info('exec %s', ' '.join(cmd))
return subprocess.check_call(cmd)
def dockerfile_layers(tf):
'''Given a `docker save` tarball, return the layer metadata in order.'''
layer_by_parent = {}
for m in tf.getmembers():
if m.name.endswith('/json'):
layer = json.load(tf.extractfile(m))
layer_by_parent[layer.get('parent')] = layer
# assemble layers by following parent pointers
layers = []
parent = None # base image has no parent
while parent in layer_by_parent:
layer = layer_by_parent[parent]
layers.append(layer)
parent = layer['id']
return layers
def is_whiteout(fname):
return fname.startswith('.wh.') or '/.wh.' in fname
def extract_layers(tf, layers, outdir):
'''Extract docker layers to a specific directory (fake a union mount).'''
for l in layers:
obj = tf.extractfile('%s/layer.tar' % l['id'])
with tarfile.open(fileobj=obj) as f:
# Complication: .wh. files indicate deletions.
# https://github.com/docker/docker/blob/master/image/spec/v1.md
members = f.getmembers()
members_good = [m for m in members if not is_whiteout(m.name)]
f.extractall(outdir, members_good)
for m in members:
name = m.name
if is_whiteout(name):
path = os.path.join(outdir, name.replace('.wh.', ''))
if os.path.isdir(path):
shutil.rmtree(path)
elif os.path.exists(path):
os.unlink(path)
def docker_diff(image_a, image_b, tmpdir, deep):
# dump images for inspection
tf_a_path = '%s/a.tar' % tmpdir
tf_b_path = '%s/b.tar' % tmpdir
check_call(['docker', 'save', '-o', tf_a_path, image_a])
check_call(['docker', 'save', '-o', tf_b_path, image_b])
tf_a = tarfile.open(tf_a_path)
tf_b = tarfile.open(tf_b_path)
# find layers in order
layers_a = dockerfile_layers(tf_a)
layers_b = dockerfile_layers(tf_b)
# minor optimization: skip identical layers
common = len(os.path.commonprefix([layers_a, layers_b]))
tf_a_out = '%s/a' % tmpdir
tf_b_out = '%s/b' % tmpdir
extract_layers(tf_a, layers_a[common:], tf_a_out)
extract_layers(tf_b, layers_b[common:], tf_b_out)
# actually compare the resulting directories
# just show whether something changed (OS upgrades change a lot)
call(['diff', '-qr', 'a', 'b'], cwd=tmpdir)
if deep:
# if requested, do a more in-depth content diff as well.
call([
'diff', '-rU5',
os.path.join('a', deep),
os.path.join('b', deep)],
cwd=tmpdir)
def main():
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument('--deep', help='Show full differences for specific directory')
parser.add_argument('image_a')
parser.add_argument('image_b')
options = parser.parse_args()
tmpdir = tempfile.mkdtemp(prefix='docker_diff_')
try:
docker_diff(options.image_a, options.image_b, tmpdir, options.deep)
finally:
shutil.rmtree(tmpdir)
if __name__ == '__main__':
main()
| 29.241379 | 86 | 0.640802 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,581 | 0.372877 |
5e099ec7e7075858d79dcc6c43ed33bc1961d2a0 | 25,459 | py | Python | crawler/libs/extractors.py | riszkymf/pricefinder_full | dc70332757d4487826204fa2a249deb22148bb7b | [
"MIT"
] | 1 | 2019-07-26T12:40:25.000Z | 2019-07-26T12:40:25.000Z | crawler/libs/extractors.py | riszkymf/pricefinder_full | dc70332757d4487826204fa2a249deb22148bb7b | [
"MIT"
] | 1 | 2020-03-02T08:27:51.000Z | 2020-03-02T08:27:51.000Z | crawler/libs/extractors.py | riszkymf/pricefinder_full | dc70332757d4487826204fa2a249deb22148bb7b | [
"MIT"
] | null | null | null | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver import ActionChains
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import WebDriverWait
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.support import expected_conditions as EC
from crawler.libs.util import get_path, flatten_dictionaries,kurs
from crawler.libs.regex import RegexHandler
from crawler import logging
import json
from time import sleep
import re
import inspect
import operator
DRIVER_PATH = {"chrome": get_path('chromedriver'),
"firefox": get_path('geckodriver')}
elementFilterTool = {"id": By.ID,
"xpath": By.XPATH,
"link_text": By.LINK_TEXT,
"partial_link_text": By.PARTIAL_LINK_TEXT,
"name": By.NAME,
"tag_name": By.TAG_NAME,
"class_name": By.CLASS_NAME,
"css_selector": By.CSS_SELECTOR}
class Extractors(object):
value_name = None
is_postprocessed = False
_pre_actions = None
is_preaction = False
_preactions_chains = None
static = False
attribute = None
def __init__(self, **kwargs):
self.postprocess = list()
self.extractor = SeleniumElementsExtractor(**kwargs)
self.postprocessed_value = list()
def _configure_preactions(self,action,driver):
action_chains = list()
chain = self._pre_actions
for i in chain:
tmp = ActionsHandler(action,driver,
i['chain'],i['chain_name'])
action_chains.append(tmp)
self._preactions_chains = action_chains
def dump_value(self):
key = self.value_name
if self.is_preaction:
for act in self._preactions_chains:
act.run()
values = self.extractor.run()
else:
if self.extractor.static:
values = [{"static": self.extractor.run()[0]}]
else:
values = self.extractor.run()
if values and self.is_postprocessed:
postprocessed_value = list()
for i in values:
val = self.generate_postprocess(i)
postprocessed_value.append(val)
values = postprocessed_value
return {key: values}
def generate_postprocess(self, value):
postprocess_ = self.postprocess
postprocess = list()
for i in postprocess_:
kwargs = self.post_process_kwargs(i)
kwargs['value'] = value
process = PostProcess(**kwargs)
process_ = process.extractor(**process.kkwargs)
value = process_.result
return value
def post_process_kwargs(self, data):
d = {}
if isinstance(data, dict):
for key, val in data.items():
d['type_'] = key
d = {**d, **data}
else:
raise TypeError("YAML Configuration must be list of dictionary")
return d
class SeleniumElementExtractor(object):
filter_ = None
def __init__(self, type_, value, driver):
self.driver = driver
type_ = type_.lower()
self.filter_ = elementFilterTool[type_]
self.value = value
def run(self):
return driver.find_element(self.filter_, self.value).text
class SeleniumElementsExtractor(object):
filter_ = None
attribute = None
properties = None
current_height = 0
max_height = None
cycle = 0
grab = "text"
def __init__(self, type_, static, value, driver, attribute=None, properties=None):
self.driver = driver
self.attribute = attribute
type_ = type_.lower()
self.filter_ = elementFilterTool[type_]
self.value = value
self.static = static
self.properties = properties
self._define_grab()
def _define_grab(self):
if self.attribute:
self.grab = "attribute"
elif self.properties:
self.grab = "properties"
else:
self.grab = "text"
if self.attribute and self.properties:
logging.warning("Configuration only allow either properties or attribute")
def _extract_text(self,driver):
if not self.max_height:
self.get_page_height()
self.get_steps()
text_result = []
self.cycle = 0
while "" in text_result or not text_result:
self.scroll_page()
result = driver.find_elements(self.filter_, self.value)
text_result = [i.text for i in result]
if self.cycle < 3:
self.reset_scroll()
else:
logging.warning("Unable To Obtain Data: {}".format(self.value))
break
result = [i.text for i in result]
return result
def _extract_attributes(self,driver):
result = driver.find_elements(self.filter_, self.value)
result = [i.get_attribute(self.attribute) for i in result]
return result
def _extract_properties(self,driver):
result = driver.find_elements(self.filter_, self.value)
result = [i.get_property(self.properties) for i in result]
return result
def run(self):
driver = self.driver
method = self.ExtractorsMethods[self.grab]
result = method(self,driver)
# if not self.attribute:
# if not self.max_height:
# self.get_page_height()
# self.get_steps()
# text_result = []
# self.cycle = 0
# while "" in text_result or not text_result:
# self.scroll_page()
# result = driver.find_elements(self.filter_, self.value)
# text_result = [i.text for i in result]
# if self.cycle < 3:
# self.reset_scroll()
# else:
# print("Unable To Obtain Data: {}".format(self.value))
# break
# result = [i.text for i in result]
# else:
# result = driver.find_elements(self.filter_, self.value)
# result = [i.get_attribute(self.attribute) for i in result]
return result
def get_page_height(self):
driver = self.driver
script = "window.scrollTo(0,document.body.scrollHeight);"
driver.execute_script(script)
height = driver.execute_script("return window.scrollY")
self.max_height = int(height)
def scroll_page(self, cycle=0, startswith=0):
driver = self.driver
delta = self.max_height - self.current_height
step_scroll = self.step_scroll + self.current_height
if delta < self.step_scroll:
step_scroll = self.max_height
script = "document.documentElement.scrollTo(0,{});".format(step_scroll)
driver.execute_script(script)
sleep(1)
self.current_height = self.get_current_height()
def reset_scroll(self):
self.current_height = 0
driver = self.driver
script = "window.scrollTo(0,0);"
driver.execute_script(script)
self.cycle = self.cycle + 1
def get_current_height(self):
driver = self.driver
script = "return document.documentElement.scrollTop"
return driver.execute_script(script)
def get_steps(self):
driver = self.driver
script = "return window.innerHeight"
steps = driver.execute_script(script)
self.step_scroll = steps
ExtractorsMethods = {
"text" : _extract_text,
"attribute": _extract_attributes,
"properties": _extract_properties
}
class PostProcess(object):
def __init__(self, type_, **kwargs):
extractor = ExtractorPostProcess[type_]
if type_ == 'math':
kkwargs = self.generate_math_args(**kwargs)
kkwargs = self.parse_arguments(extractor, **kwargs)
self.extractor = extractor
self.kkwargs = kkwargs
def parse_arguments(self, extractor=None, **kwargs):
func = getattr(extractor, '__init__')
argspecs = inspect.getargspec(func)
args = argspecs.args
args.remove('self')
extractor_args = {'value': kwargs.pop('value')}
args.remove('value')
keys = list(kwargs.keys())
if args:
for k1, k2 in zip(args, keys):
extractor_args[k1] = kwargs.pop(k2)
return extractor_args
def generate_math_args(self,**kwargs):
d = {'query': kwargs['math'], 'value': kwargs['value']}
return d
class RegexExtractBefore(PostProcess):
def __init__(self, value, character):
regex = '(.*)\{}'.format(character)
result = re.search(regex, repr(value))
if not result:
self.result = value
else:
result = result.group(1)
result = result.replace("'","").replace('"','')
self.result = result
class RegexExtractAfter(PostProcess):
def __init__(self, value, character):
regex = "\{}(.*)".format(character)
result = re.search(regex, repr(value))
if not result:
result = value
self.result = value
else:
result = result.group(1)
result = result.replace("'","").replace('"','')
self.result = result
class RemoveExtendedAscii(PostProcess):
def __init__(self,value):
pass
class RegexRaw(PostProcess):
def __init__(self, value, query):
regex_ = RegexHandler(value,query)
try:
result_ = regex_.__getvalue__()
except Exception:
if not result:
result_ = value
self.result = result_
class ExtractNumbers(PostProcess):
def __init__(self, value):
result = re.sub("\D", "", value)
if not result:
result = value
self.result = result
class ExtractFloatNumber(PostProcess):
def __init__(self,value):
result = re.findall('\d+\.\d+',value)
if not result:
result_ = value
else:
result_ = result[0]
self.result = result_
class ExtractConvertInt(PostProcess):
def __init__(self, value):
num = re.sub("\D", "", value)
if not num:
self.result = value
else:
self.result = int(num)
class ExtractConvertFloat(PostProcess):
def __init__(self, value):
num = re.sub("\D", "", value)
if not num:
self.result = value
else:
self.result = float(num)
class ConvertCurrency(PostProcess):
def __init__(self,value,currency):
try:
tmp=int(value)
result = kurs(tmp,currency.upper(),"IDR")
self.result = int(result)
except Exception:
self.result = value
class InsertStringAfter(PostProcess):
def __init__(self,value,string):
tmp = str(value)+" {}".format(string)
self.result = tmp
class InsertStringBefore(PostProcess):
def __init__(self,value,string):
tmp = "{} ".format(string)+str(value)
self.result = tmp
class MathProcess(PostProcess):
OPERATIONS = {
"+": lambda x, y: operator.add(x, y),
"-": lambda x, y: operator.sub(x, y),
"/": lambda x, y: operator.truediv(x, y),
"//": lambda x, y: operator.floordiv(x, y),
"*": lambda x, y: operator.mul(x, y),
"x": lambda x, y: operator.mul(x, y)
}
def __init__(self, query, value, *args, **kwargs):
self.operator = query['operator']
self.x = value
self.y = query['y']
@property
def result(self):
""" Query Model : {x: x, y:y, operation: (+-/*)}
If x or y is using obtained data, use key::key_name"""
try:
x = float(self.x)
y = float(self.y)
operator = self.operator
result_ = self.OPERATIONS[operator](x,y)
return round(result_,2)
except Exception:
return self.x
class RemoveStrings(PostProcess):
def __init__(self,value,character):
value = str(value)
try:
result = re.sub(character,'',value,flags=re.IGNORECASE)
except Exception :
result = value.strip(character)
result = value.replace(character,"")
result = result.lstrip(" ")
self.result = result
ExtractorPostProcess = {
'extract_before': RegexExtractBefore,
'extract_after': RegexExtractAfter,
'raw_regex': RegexRaw,
'extract_numbers': ExtractNumbers,
'extract_float_numbers': ExtractFloatNumber,
'extract_convert_int': ExtractConvertInt,
'extract_convert_float': ExtractConvertFloat,
'math': MathProcess,
'convert_currency': ConvertCurrency,
'remove_strings': RemoveStrings,
'insert_string_after': InsertStringAfter,
'insert_string_before': InsertStringBefore
}
class ActionsHandler(object):
action = None
name = 'Default'
repeat = 1
query = None
def __init__(self, action, driver, query, name="Default"):
self.action = action
self.driver = driver
self.query = query
self.name = name
for i in query:
if 'run' in i:
self.config_run(i['run'])
def config_run(self, data):
if isinstance(data, str) or isinstance(data, int):
self.repeat = int(data)
elif isinstance(data, dict):
extractor = data['extractor']
pass
@property
def act(self):
return self.action
def run(self):
try:
self.generate_actions(self.query)
self.execute()
except Exception as e:
logging.error(str(e))
def execute(self):
for i in self.action_chains:
i.run()
self.action.reset_actions()
def generate_actions(self, data):
action_chains = list()
for i in data:
if 'run' in i:
pass
else:
query = self.parse_action(i)
act_ = self.generate_action(query)
action_chains.append(act_)
self.action_chains = action_chains
def parse_action(self, data):
d = {}
for key, value in data.items():
d = {'action': key}
d_ = {}
if value:
for child_key, child_value in value.items():
d_[child_key] = child_value
d.update(d_)
return d
def generate_action(self, query):
action = self.action
driver = self.driver
act_ = Actions(action, driver, query)
return act_
class Actions(ActionsHandler):
action = None
query = None
action_execute = None
move_to_center = False
move_to_center_delay = 1
run_count = None
def __init__(self, action, driver, query):
self.action = action
self.driver = driver
self.action_type = query.pop('action')
if 'run' in query:
self.run_count = query.pop('run')
query['driver'] = driver
query['action'] = action
try:
if isinstance(query['move_to_window_center'],bool):
self.move_to_center = query['move_to_window_center']
elif isinstance(query['move_to_window_center'],int) or isinstance(query['move_to_window_center'],float):
self.move_to_center = True
self.move_to_center_delay = query['move_to_window_center']
elif isinstance(flatten_dictionaries(query['move_to_window_center']),dict):
node = flatten_dictionaries(query['move_to_window_center'])
self.move_to_center = True
self.move_to_center_delay = node['delay']
except Exception:
self.move_to_center = False
self.query = self.parse_arguments(self.action_type, query)
def run(self):
return self.__getattribute__(self.action_type)(**self.query)
def parse_arguments(self, action_type, query):
func = getattr(self, action_type)
argspecs = inspect.signature(func)
param = list(argspecs.parameters.keys())
q = {}
for i in param:
if argspecs.parameters[i].default is None:
q[i] = flatten_dictionaries(query.get(i, {}))
else:
q[i] = flatten_dictionaries(query[i])
return q
def click(self, action, on_element=None):
if on_element:
on_element = self.get_element(on_element)
if self.move_to_center:
self._move_element_to_center(on_element)
d = {'on_element': on_element}
return action.click(**d).perform()
def click_and_hold(self, action, on_element=None):
if on_element:
on_element = self.get_element(element)
if self.move_to_center:
self._move_element_to_center(on_element)
d = {'on_element': on_element}
return action.click_and_hold(**d).perform()
def context_click(self, action, on_element=None):
if on_element:
on_element = self.get_element(element)
if self.move_to_center:
self._move_element_to_center(on_element)
d = {'on_element': on_element}
return action.context_click(**d).perform()
def double_click(self, action, on_element=None):
if on_element:
on_element = self.get_element(element)
if self.move_to_center:
self._move_element_to_center(on_element)
d = {'on_element': on_element}
return action.double_click(**d).perform()
def drag_and_drop(self, action, source, target):
source = self.get_element(source)
target = self.get_element(target)
if self.move_to_center:
self._move_element_to_center(source)
d = {'source': source, 'target': target}
return action.drag_and_drop(**d).perform()
def drag_and_drop_by_offset(self, action, source, xoffset, yoffset):
driver = self.driver
source = self.get_element(source)
if self.move_to_center:
self._move_element_to_center(source)
d = {'source': source, 'xoffset': xoffset, 'yoffset': yoffset}
return action.drag_and_drop_by_offset(**d).perform()
def key_down(self, action, value, element=None):
value_ = self.modifier_key(value)
if element:
element = self.get_element(element)
if self.move_to_center:
self._move_element_to_center(element)
if value == value_:
return 0
else:
d = {'value': value, 'element': element}
return action.key_down(**d).perform()
def key_up(self, action, value, element=None):
value_ = self.modifier_key(value)
if element:
element = self.get_element(element)
if self.move_to_center:
self._move_element_to_center(element)
if value_ == value:
return 0
else:
d = {'value': value, 'element': element}
return action.key_up(**d).perform()
def move_by_offset(self, action, xoffset, yoffset):
d = {'xoffset': xoffset, 'yoffset': yoffset}
return action.move_by_offset(**d).perform()
def move_to_element(self, action, to_element):
driver = self.driver
to_element = self.get_element(to_element)
if self.move_to_center:
self._move_element_to_center(to_element)
d = {'to_element': to_element}
return action.move_to_element(**d).perform()
def move_to_element_with_offset(self, action, to_element, xoffset, yoffset):
driver = self.driver
to_element = self.get_element(to_element)
if self.move_to_center:
self._move_element_to_center(to_element)
d = {'xoffset': xoffset, 'yoffset': yoffset, 'to_element': to_element}
return action.move_to_element_with_offset(**d).perform()
def pause(self, action, seconds):
d = {'seconds': seconds}
return action.pause(**d).perform()
def release(self, action, on_element=None):
if on_element:
on_element = self.get_element(element)
d = {'on_element': on_element}
return action.release(**d).perform()
def perform(self, action):
return action.perform()
def reset_actions(self, action):
return action
def send_keys(self, action, value):
if value['type'] == 'modifier':
key_to_send = self.modifier_key(value['key'])
else:
key_to_send = value['key']
return action.send_keys(key_to_send).perform()
def send_keys_to_element(self, action, element, value):
if value['type'] == 'modifier':
key_to_send = self.modifier_key(value['key'])
else:
key_to_send = value['key']
element = self.get_element(element)
if self.move_to_center:
self._move_element_to_center(element)
d = [element, key_to_send]
return action.send_keys_to_element(*d).perform()
def get_element(self, elements):
driver = self.driver
to_element = None
for locator_, value in elements.items():
delay = 3
type_ = elementFilterTool[locator_]
try:
myElem = WebDriverWait(driver, delay).until(EC.presence_of_element_located((type_,value)))
except TimeoutException:
logging.error ("Loading took too much time!")
to_element = driver.find_element(type_, value)
return to_element
def _move_element_to_center(self, element):
driver = self.driver
delay = self.move_to_center_delay
x = element.location['x']
y = element.location['y']
windowHeight = driver.execute_script("return window.innerHeight")
centerHeightY = (y-(windowHeight/3))
driver.execute_script("return window.scrollTo(0,{});".format(centerHeightY))
sleep(delay)
def modifier_key(self, value):
mod_key = {'ADD': "u'\ue025'",
'ALT': "u'\ue00a'",
'ARROW_DOWN': "u'\ue015'",
'ARROW_LEFT': "u'\ue012'",
'ARROW_RIGHT': "u'\ue014'",
'ARROW_UP': "u'\ue013'",
'BACKSPACE': "u'\ue003'",
'BACK_SPACE': "u'\ue003'",
'CANCEL': "u'\ue001'",
'CLEAR': "u'\ue005'",
'COMMAND': "u'\ue03d'",
'CONTROL': "u'\ue009'",
'DECIMAL': "u'\ue028'",
'DELETE': "u'\ue017'",
'DIVIDE': "u'\ue029'",
'DOWN': "u'\ue015'",
'END': "u'\ue010'",
'ENTER': "u'\ue007'",
'EQUALS': "u'\ue019'",
'ESCAPE': "u'\ue00c'",
'F1': "u'\ue031'",
'F10': "u'\ue03a'",
'F11': "u'\ue03b'",
'F12': "u'\ue03c'",
'F2': "u'\ue032'",
'F3': "u'\ue033'",
'F4': "u'\ue034'",
'F5': "u'\ue035'",
'F6': "u'\ue036'",
'F7': "u'\ue037'",
'F8': "u'\ue038'",
'F9': "u'\ue039'",
'HELP': "u'\ue002'",
'HOME': "u'\ue011'",
'INSERT': "u'\ue016'",
'LEFT': "u'\ue012'",
'LEFT_ALT': "u'\ue00a'",
'LEFT_CONTROL': "u'\ue009'",
'LEFT_SHIFT': "u'\ue008'",
'META': "u'\ue03d'",
'MULTIPLY': "u'\ue024'",
'NULL': "u'\ue000'",
'NUMPAD0': "u'\ue01a'",
'NUMPAD1': "u'\ue01b'",
'NUMPAD2': "u'\ue01c'",
'NUMPAD3': "u'\ue01d'",
'NUMPAD4': "u'\ue01e'",
'NUMPAD5': "u'\ue01f'",
'NUMPAD6': "u'\ue020'",
'NUMPAD7': "u'\ue021'",
'NUMPAD8': "u'\ue022'",
'NUMPAD9': "u'\ue023'",
'PAGE_DOWN': "u'\ue00f'",
'PAGE_UP': "u'\ue00e'",
'PAUSE': "u'\ue00b'",
'RETURN': "u'\ue006'",
'RIGHT': "u'\ue014'",
'SEMICOLON': "u'\ue018'",
'SEPARATOR': "u'\ue026'",
'SHIFT': "u'\ue008'",
'SPACE': "u'\ue00d'",
'SUBTRACT': "u'\ue027'",
'TAB': "u'\ue004'",
'UP': "u'\ue013"}
if value in mod_key:
return mod_key[value]
else:
return value
class Counter(object):
total_run = None
count = None
remains = None
def __init__(self,total):
self.total_run = total
def negate(self):
self.count = self.count + 1
self.remains = self.total_run - self.count | 33.587071 | 116 | 0.558977 | 23,797 | 0.934719 | 0 | 0 | 457 | 0.01795 | 0 | 0 | 3,556 | 0.139676 |
5e0b345e85244d00d3751d3404f05d4e2bfd639f | 269 | py | Python | bo/files/applicant/registulang/fileupload/pdf2txt/main.py | isoneday/KMS | 7d27e16af7626afd27a0980735985f5f2590f500 | [
"MIT"
] | null | null | null | bo/files/applicant/registulang/fileupload/pdf2txt/main.py | isoneday/KMS | 7d27e16af7626afd27a0980735985f5f2590f500 | [
"MIT"
] | null | null | null | bo/files/applicant/registulang/fileupload/pdf2txt/main.py | isoneday/KMS | 7d27e16af7626afd27a0980735985f5f2590f500 | [
"MIT"
] | null | null | null | from src import mining
from tkinter import filedialog
from PyQt4 import QtGui
dir=filedialog.askdirectory()
direcciones, nomArchivo = mining.path(dir)
cont=mining.coincidencias(direcciones,nomArchivo)
for i in range(len(nomArchivo)):
print(nomArchivo[i],cont[i]) | 24.454545 | 49 | 0.795539 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
5e0cc7316c8aff667a7e4fcdd2c61cb61f60b410 | 380 | py | Python | src/platform/jboss/fingerprints/JBoss71Manage.py | 0x27/clusterd | 0f04a4955c61aa523274e9ae35d750f4339b1e59 | [
"MIT"
] | 539 | 2015-01-08T23:59:32.000Z | 2022-03-29T17:53:02.000Z | src/platform/jboss/fingerprints/JBoss71Manage.py | M31MOTH/clusterd | d190b2cbaa93820e928a7ce5471c661d4559fb7c | [
"MIT"
] | 21 | 2015-01-17T21:51:21.000Z | 2019-09-20T09:23:18.000Z | src/platform/jboss/fingerprints/JBoss71Manage.py | M31MOTH/clusterd | d190b2cbaa93820e928a7ce5471c661d4559fb7c | [
"MIT"
] | 192 | 2015-01-26T20:44:14.000Z | 2021-12-22T01:39:50.000Z | from src.platform.jboss.interfaces import JINTERFACES
from cprint import FingerPrint
class FPrint(FingerPrint):
def __init__(self):
self.platform = "jboss"
self.version = "7.1"
self.title = JINTERFACES.MM
self.uri = "/console/app/gwt/chrome/chrome_rtl.css"
self.port = 9990
self.hash = "14755bd918908c2703c57bd1a52046b6"
| 27.142857 | 59 | 0.668421 | 292 | 0.768421 | 0 | 0 | 0 | 0 | 0 | 0 | 86 | 0.226316 |
5e0cdea905b5b41aac70d7b34ce67692a178cd47 | 455 | py | Python | ip_system/models.py | 9dev/django-ip-system | 8d53fea89c0fcef3bdea27a893ce6a484b8b900a | [
"MIT"
] | null | null | null | ip_system/models.py | 9dev/django-ip-system | 8d53fea89c0fcef3bdea27a893ce6a484b8b900a | [
"MIT"
] | null | null | null | ip_system/models.py | 9dev/django-ip-system | 8d53fea89c0fcef3bdea27a893ce6a484b8b900a | [
"MIT"
] | null | null | null | from django.db import models
from .utils import get_ip_from_request
class Ip(models.Model):
address = models.GenericIPAddressField(unique=True, db_index=True)
@classmethod
def get_or_create(cls, request):
raw_ip = get_ip_from_request(request)
if not raw_ip:
return None
obj, _ = cls.objects.get_or_create(address=raw_ip)
return obj
def __str__(self):
return self.address.__str__()
| 22.75 | 70 | 0.679121 | 383 | 0.841758 | 0 | 0 | 221 | 0.485714 | 0 | 0 | 0 | 0 |
5e0eb2ef8cc5a92c789db67d4485bba124df7161 | 1,640 | py | Python | qs_backend/qs_backend/backend.py | Praneesh/quickstocks | 2ad4f985b7cc11721209cc81c36937e9cf25fb60 | [
"MIT"
] | 2 | 2016-12-28T18:08:23.000Z | 2017-04-01T18:09:55.000Z | qs_backend/qs_backend/backend.py | Praneesh/quickstocks | 2ad4f985b7cc11721209cc81c36937e9cf25fb60 | [
"MIT"
] | null | null | null | qs_backend/qs_backend/backend.py | Praneesh/quickstocks | 2ad4f985b7cc11721209cc81c36937e9cf25fb60 | [
"MIT"
] | null | null | null | #! /usr/bin/env python3
# __author__ = "Praneesh Kataru"
# __credits__ = []
# __version__ = "0.2.1"
# __maintainer__ = "Praneesh Kataru"
# __email__ = "pranuvitmsse05@gmail.com"
# __status__ = "Prototype"
#
# Responsible for starting the required number of processes and threads
import threading
import time
from qs_backend.workers.worker_fetch_stock import StockWorker
from qs_backend.publisher.publish_stock import PublishStock
from qs_backend.dal.user_stock_pref_dal import UserStockPrefDAL
class Backend:
def __init__(self):
pass
def start_stock_tickers(self):
# Fetch all the stocks that users have chosen.
while True:
user_stock_pref_dal_obj = UserStockPrefDAL()
stock_exception, available_stocks = user_stock_pref_dal_obj.get_all_stock_preferences()
for stock in available_stocks:
stock_key = stock
# Start FetchStock Threads
stock_worker_obj = StockWorker()
ft_stock_thread = threading.Thread(target=stock_worker_obj.fetch_stock_price, args=(stock_key,))
ft_stock_thread.daemon = True
ft_stock_thread.start()
time.sleep(60)
if __name__ == '__main__':
backend_process = Backend()
# Start all the Stock Worker Threads
stock_ticker_thread = threading.Thread(target=backend_process.start_stock_tickers)
stock_ticker_thread.start()
time.sleep(5)
# Wait for sometime, before you start the publisher.
# The below makes a blocking call !
pub_stock = PublishStock()
pub_stock.start_publishing()
| 30.37037 | 112 | 0.690854 | 714 | 0.435366 | 0 | 0 | 0 | 0 | 0 | 0 | 496 | 0.302439 |
5e0ee7490d2f9d87de60db0d3047b19a4411a85c | 1,267 | py | Python | binary_tree/tests/m_create_from_pre_in_test.py | dhrubach/python-code-recipes | 14356c6adb1946417482eaaf6f42dde4b8351d2f | [
"MIT"
] | null | null | null | binary_tree/tests/m_create_from_pre_in_test.py | dhrubach/python-code-recipes | 14356c6adb1946417482eaaf6f42dde4b8351d2f | [
"MIT"
] | null | null | null | binary_tree/tests/m_create_from_pre_in_test.py | dhrubach/python-code-recipes | 14356c6adb1946417482eaaf6f42dde4b8351d2f | [
"MIT"
] | null | null | null | from binary_tree.m_create_from_pre_in import BinaryTree
class TestBinaryTree:
def test_lc_data_1(self):
bt = BinaryTree()
preorder = [3, 9, 20, 15, 7]
inorder = [9, 3, 15, 20, 7]
ans = bt.buildFromPreInOrder(preorder=preorder, inorder=inorder)
assert ans.val == 3
assert ans.left.val == 9
assert ans.right.left.val == 15
preorder = [3, 9, 20, 15, 7]
inorder = [9, 3, 15, 20, 7]
ans = bt.buildFromPreInOrderOptimized(preorder=preorder, inorder=inorder)
assert ans.val == 3
assert ans.left.val == 9
assert ans.right.left.val == 15
def test_lc_data_2(self):
bt = BinaryTree()
preorder = [1, 2, 3]
inorder = [1, 3, 2]
ans = bt.buildFromPreInOrder(preorder=preorder, inorder=inorder)
assert ans.val == 1
assert ans.right.val == 2
assert ans.right.left.val == 3
def test_lc_data_3(self):
bt = BinaryTree()
preorder = [3, 9, 4, 2, 1, 5, 20, 15, 7]
inorder = [2, 4, 1, 9, 5, 3, 15, 20, 7]
ans = bt.buildFromPreInOrder(preorder=preorder, inorder=inorder)
assert ans.val == 3
assert ans.right.val == 20
assert ans.left.left.right.val == 1
| 29.465116 | 81 | 0.573796 | 1,208 | 0.953433 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
5e0f2b1bb17bd2fdf8410f34a353e67db6f461bb | 1,111 | py | Python | 3/part_1.py | szabolcs-dekany/advent-of-code-2021 | ff8d5fb864ce81c2d18eeb5e50446729df64b3b4 | [
"MIT"
] | null | null | null | 3/part_1.py | szabolcs-dekany/advent-of-code-2021 | ff8d5fb864ce81c2d18eeb5e50446729df64b3b4 | [
"MIT"
] | null | null | null | 3/part_1.py | szabolcs-dekany/advent-of-code-2021 | ff8d5fb864ce81c2d18eeb5e50446729df64b3b4 | [
"MIT"
] | null | null | null | from collections import Counter
from utils import read_bits
def run_part_one():
print("--------------------------------")
print("Advent of Code 2021 - 3 - Part 1")
bits = read_bits('input.txt')
gamma_rate = ""
epsilon_rate = ""
for x in range(len(bits[0])):
relevant_bits = get_relevant_bits_for_row(bits, x)
gamma_rate += relevant_bits[0]
epsilon_rate += relevant_bits[1]
print('Binary - Gamma rate: {0}, Epsilon rate: {1}'.format(gamma_rate, epsilon_rate))
print('Decimal - Gamma rate: {0}, Epsilon rate: {1}'.format(int(gamma_rate, 2), int(epsilon_rate, 2)))
print('Power consumption: {0}'.format(int(gamma_rate, 2) * int(epsilon_rate, 2)))
def get_relevant_bits_for_row(bits: [[str]], row: int):
row_bits = []
for x in range(len(bits)):
row_bits.append(bits[x][row])
number_of_bits = dict(Counter(row_bits))
most_significant_bit = '0' if number_of_bits['0'] > number_of_bits['1'] else '1'
least_significant_bit = '0' if most_significant_bit == '1' else '1'
return most_significant_bit, least_significant_bit
| 35.83871 | 106 | 0.648065 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 219 | 0.19712 |