hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5cdf5ea0cb38ecd3281d9d8456f1ae7471e2176b
| 1,546
|
py
|
Python
|
demo_setup/utils/InterfaceStats.py
|
tum-lkn/appaware
|
6335dbca67e3d73a431c1f0433fc3819c45d1b2b
|
[
"MIT"
] | null | null | null |
demo_setup/utils/InterfaceStats.py
|
tum-lkn/appaware
|
6335dbca67e3d73a431c1f0433fc3819c45d1b2b
|
[
"MIT"
] | null | null | null |
demo_setup/utils/InterfaceStats.py
|
tum-lkn/appaware
|
6335dbca67e3d73a431c1f0433fc3819c45d1b2b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
import logging
import threading
import time
import queue
import socket
from netstatspy.readers.ip import IpStatsReader
IFACE_STATS_MESSAGE_TYPE = 'iface_stats'
class InterfaceStats(threading.Thread):
def __init__(self, iface, comm_queue=None, interval=1):
super(InterfaceStats, self).__init__()
self._logger = logging.getLogger("AppAware." + self.__class__.__name__)
self._iface = iface
self._interval = interval
self._comm_queue = comm_queue
self._running = False
# precompile regex
IpStatsReader.compile_regex()
def run(self):
self._logger.debug("InterfaceStats.run()")
self._running = True
next_call = time.perf_counter()
while self._running:
self._logger.debug("wrote interface stats")
data = IpStatsReader.get_interface_stats(self._iface)
data['host_name'] = socket.gethostname()
msg = {'type': IFACE_STATS_MESSAGE_TYPE,
'data': data}
if self._comm_queue is None:
self._logger.error("No target msg queue available! Msg was: %s" )
else:
self._comm_queue.put(msg)
next_call += self._interval
time_diff = next_call - time.perf_counter()
if time_diff >= 0:
time.sleep(time_diff)
def stop(self):
self._logger.debug("InterfaceStats.stop()")
self._running = False
self.join()
| 26.655172
| 81
| 0.615783
|
c9b3a9298c9395842d02032a540d848b98c8fd20
| 6,438
|
py
|
Python
|
flare/kindling/buffers.py
|
jfpettit/reinforcement-learning
|
20ffac1cbf0f00ec9f0e0cca1fddab6eeba82081
|
[
"MIT"
] | 2
|
2020-04-22T03:17:34.000Z
|
2021-02-02T14:51:11.000Z
|
flare/kindling/buffers.py
|
jfpettit/reinforcement-learning
|
20ffac1cbf0f00ec9f0e0cca1fddab6eeba82081
|
[
"MIT"
] | 3
|
2020-04-22T23:17:58.000Z
|
2021-04-30T21:18:23.000Z
|
flare/kindling/buffers.py
|
jfpettit/reinforcement-learning
|
20ffac1cbf0f00ec9f0e0cca1fddab6eeba82081
|
[
"MIT"
] | null | null | null |
import numpy as np
import scipy
from typing import Optional, Any, Union
from flare.kindling.mpi_tools import mpi_statistics_scalar
import torch
class PGBuffer:
"""
A buffer for storing trajectories experienced by an agent interacting
with the environment, and using Generalized Advantage Estimation (GAE-Lambda)
for calculating the advantages of state-action pairs.
"""
def __init__(
self,
obs_dim: Union[tuple, int],
act_dim: Union[tuple, int],
size: int,
gamma: Optional[float] = 0.99,
lam: Optional[float] = 0.95,
):
self.obs_buf = np.zeros(self._combined_shape(size, obs_dim), dtype=np.float32)
self.act_buf = np.zeros(self._combined_shape(size, act_dim), dtype=np.float32)
self.adv_buf = np.zeros(size, dtype=np.float32)
self.rew_buf = np.zeros(size, dtype=np.float32)
self.ret_buf = np.zeros(size, dtype=np.float32)
self.val_buf = np.zeros(size, dtype=np.float32)
self.logp_buf = np.zeros(size, dtype=np.float32)
self.gamma, self.lam = gamma, lam
self.ptr, self.path_start_idx, self.max_size = 0, 0, size
def store(
self,
obs: np.array,
act: np.array,
rew: Union[int, float, np.array],
val: Union[int, float, np.array],
logp: Union[float, np.array],
):
"""
Append one timestep of agent-environment interaction to the buffer.
"""
assert self.ptr < self.max_size # buffer has to have room so you can store
self.obs_buf[self.ptr] = obs
self.act_buf[self.ptr] = act
self.rew_buf[self.ptr] = rew
self.val_buf[self.ptr] = val
self.logp_buf[self.ptr] = logp
self.ptr += 1
def finish_path(self, last_val: Optional[Union[int, float, np.array]] = 0):
"""
Call this at the end of a trajectory, or when one gets cut off
by an epoch ending. This looks back in the buffer to where the
trajectory started, and uses rewards and value estimates from
the whole trajectory to compute advantage estimates with GAE-Lambda,
as well as compute the rewards-to-go for each state, to use as
the targets for the value function.
The "last_val" argument should be 0 if the trajectory ended
because the agent reached a terminal state (died), and otherwise
should be V(s_T), the value function estimated for the last state.
This allows us to bootstrap the reward-to-go calculation to account
for timesteps beyond the arbitrary episode horizon (or epoch cutoff).
"""
path_slice = slice(self.path_start_idx, self.ptr)
rews = np.append(self.rew_buf[path_slice], last_val)
vals = np.append(self.val_buf[path_slice], last_val)
# the next two lines implement GAE-Lambda advantage calculation
deltas = rews[:-1] + self.gamma * vals[1:] - vals[:-1]
self.adv_buf[path_slice] = self._discount_cumsum(deltas, self.gamma * self.lam)
# the next line computes rewards-to-go, to be targets for the value function
self.ret_buf[path_slice] = self._discount_cumsum(rews, self.gamma)[:-1]
self.path_start_idx = self.ptr
def get(self):
"""
Call this at the end of an epoch to get all of the data from
the buffer, with advantages appropriately normalized (shifted to have
mean zero and std one). Also, resets some pointers in the buffer.
"""
assert self.ptr == self.max_size # buffer has to be full before you can get
self.ptr, self.path_start_idx = 0, 0
# the next two lines implement the advantage normalization trick
adv_mean, adv_std = mpi_statistics_scalar(self.adv_buf)
# adv_mean, adv_std = np.mean(self.adv_buf), np.std(self.adv_buf)
self.adv_buf = (self.adv_buf - adv_mean) / adv_std
return [self.obs_buf, self.act_buf, self.adv_buf, self.ret_buf, self.logp_buf]
def _combined_shape(
self, length: Union[int, np.array], shape: Optional[Union[int, tuple]] = None
):
if shape is None:
return (length,)
return (length, shape) if np.isscalar(shape) else (length, *shape)
def _discount_cumsum(self, x: np.array, discount: float):
"""
magic from rllab for computing discounted cumulative sums of vectors.
input:
vector x,
[x0,
x1,
x2]
output:
[x0 + discount * x1 + discount^2 * x2,
x1 + discount * x2,
x2]
"""
return scipy.signal.lfilter([1], [1, float(-discount)], x[::-1], axis=0)[::-1]
class ReplayBuffer(PGBuffer):
"""
A replay buffer for off-policy RL agents.
"""
def __init__(
self, obs_dim: Union[tuple, int], act_dim: Union[tuple, int], size: int
):
self.obs1_buf = np.zeros(self._combined_shape(size, obs_dim), dtype=np.float32)
self.obs2_buf = np.zeros(self._combined_shape(size, obs_dim), dtype=np.float32)
self.act_buf = np.zeros(self._combined_shape(size, act_dim), dtype=np.float32)
self.rew_buf = np.zeros(size, dtype=np.float32)
self.done_buf = np.zeros(size, dtype=np.float32)
self.ptr, self.size, self.max_size = 0, 0, size
def store(
self,
obs: np.array,
act: Union[float, int, np.array],
rew: Union[float, int],
next_obs: np.array,
done: bool,
):
"""
Append one timestep of agent-environment interaction to the buffer.
"""
self.obs1_buf[self.ptr] = obs
self.obs2_buf[self.ptr] = next_obs
self.act_buf[self.ptr] = act
self.rew_buf[self.ptr] = rew
self.done_buf[self.ptr] = done
self.ptr = (self.ptr + 1) % self.max_size
self.size = min(self.size + 1, self.max_size)
def sample_batch(self, batch_size=32):
idxs = np.random.randint(0, self.size, size=batch_size)
batch = dict(
obs=self.obs1_buf[idxs],
obs2=self.obs2_buf[idxs],
act=self.act_buf[idxs],
rew=self.rew_buf[idxs],
done=self.done_buf[idxs],
)
return tuple(torch.as_tensor(v, dtype=torch.float32) for _, v in batch.items())
def get(self):
return [self.obs1_buf, self.obs2_buf, self.act_buf, self.rew_buf, self.done_buf]
| 39.256098
| 88
| 0.62333
|
2325ea2f718a0ac6b17f95c1c61f47189cdb8b85
| 867
|
py
|
Python
|
ex107/moeda.py
|
brunocorbetta/exerciciocursoemvideo
|
b6ef52f3426f70f211ad70f233f0222c703a2c41
|
[
"MIT"
] | null | null | null |
ex107/moeda.py
|
brunocorbetta/exerciciocursoemvideo
|
b6ef52f3426f70f211ad70f233f0222c703a2c41
|
[
"MIT"
] | null | null | null |
ex107/moeda.py
|
brunocorbetta/exerciciocursoemvideo
|
b6ef52f3426f70f211ad70f233f0222c703a2c41
|
[
"MIT"
] | null | null | null |
def aumentar(preço=0, taxa=0, formato=False):
res = preço + (preço * taxa / 100)
return res if formato is False else moeda(res)
def diminuir(preço=0, taxa=0, formato=False):
res = preço - (preço * taxa / 100)
return res if formato is False else moeda(res)
def dobro(preço=0, formato=False):
res = preço * 2
return res if not formato else moeda(res)
def metade(preço=0, formato=False):
res = preço / 2
return res if not formato else moeda(res)
def moeda(preço=0, moeda= 'R$'):
return f'{moeda}{preço: >.2f}'.replace('.', ',')
def resumo(preço=0, taxaa=10, taxar=5):
print('-' * 30)
print('RESUMO DO VALOR'.center(30))
print('-' * 30)
print(f'Preço analisado: \t{moeda(preço)}')
print(f'Dobro do preço: \t{dobro(preço, True)}')
print(f'Metade do preço: \t{metade(preço, True)}')
print('-' * 30)
| 26.272727
| 54
| 0.621684
|
d7b0c76ebf0a8d4d8ad73cd66a5a471bc6feb313
| 146
|
py
|
Python
|
TestGenerate.py
|
nogw/lisp-to-python
|
b77d2c9a5bbdbbc0aa673442f098af7f4b74ffe9
|
[
"BSD-3-Clause"
] | null | null | null |
TestGenerate.py
|
nogw/lisp-to-python
|
b77d2c9a5bbdbbc0aa673442f098af7f4b74ffe9
|
[
"BSD-3-Clause"
] | null | null | null |
TestGenerate.py
|
nogw/lisp-to-python
|
b77d2c9a5bbdbbc0aa673442f098af7f4b74ffe9
|
[
"BSD-3-Clause"
] | null | null | null |
def memhelper (pred, op):
def scheme_lambda_x000 (acc, next):
if and(not(acc), pred(op, next)):
return next
else:
return acc
| 24.333333
| 37
| 0.616438
|
ea0929e7d44389a46d6e1a1b54f5a31dec51eff8
| 322
|
py
|
Python
|
octave_nick.py
|
brentru/NYCR-CPY
|
1b79b863397288977e17efca97da64da5ce6f17f
|
[
"MIT"
] | null | null | null |
octave_nick.py
|
brentru/NYCR-CPY
|
1b79b863397288977e17efca97da64da5ce6f17f
|
[
"MIT"
] | null | null | null |
octave_nick.py
|
brentru/NYCR-CPY
|
1b79b863397288977e17efca97da64da5ce6f17f
|
[
"MIT"
] | null | null | null |
import time
from adafruit_circuitplayground.express import cpx
a = 440
b_flat = 466
b = 494
c = 523
notes = [a, b_flat, b, c]
note = a
while True:
for note_fu in range(0, len(notes)):
print("Playing ", note_fu)
note = note * pow(2, .083333333)
cpx.play_tone(note, 1)
time.sleep(0.5)
| 17.888889
| 50
| 0.618012
|
02df97733aad8611030f0f3a6a8a4b8915351e20
| 10,233
|
py
|
Python
|
aiida/backends/tests/orm/test_groups.py
|
lekah/aiida_core
|
54b22a221657b47044483dc9d4f51788ce8ab6b2
|
[
"BSD-2-Clause"
] | 1
|
2019-07-31T04:08:13.000Z
|
2019-07-31T04:08:13.000Z
|
aiida/backends/tests/orm/test_groups.py
|
PercivalN/aiida-core
|
b215ed5a7ce9342bb7f671b67e95c1f474cc5940
|
[
"BSD-2-Clause"
] | null | null | null |
aiida/backends/tests/orm/test_groups.py
|
PercivalN/aiida-core
|
b215ed5a7ce9342bb7f671b67e95c1f474cc5940
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""Test for the Group ORM class."""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from aiida import orm
from aiida.backends.testbase import AiidaTestCase
from aiida.common import exceptions
class TestGroups(AiidaTestCase):
"""Test backend entities and their collections"""
def setUp(self):
"""Remove all existing Groups."""
for group in orm.Group.objects.all():
orm.Group.objects.delete(group.id)
def test_count(self):
"""Test the `count` method."""
node_00 = orm.Data().store()
node_01 = orm.Data().store()
nodes = [node_00, node_01]
group = orm.Group(label='label', description='description').store()
group.add_nodes(nodes)
self.assertEqual(group.count(), len(nodes))
def test_creation(self):
"""Test the creation of Groups."""
node = orm.Data()
stored_node = orm.Data().store()
group = orm.Group(label='testgroup')
with self.assertRaises(exceptions.ModificationNotAllowed):
# group unstored
group.add_nodes(node)
with self.assertRaises(exceptions.ModificationNotAllowed):
# group unstored
group.add_nodes(stored_node)
group.store()
with self.assertRaises(ValueError):
# node unstored
group.add_nodes(node)
group.add_nodes(stored_node)
nodes = list(group.nodes)
self.assertEqual(len(nodes), 1)
self.assertEqual(nodes[0].pk, stored_node.pk)
def test_node_iterator(self):
"""Test the indexing and slicing functionality of the node iterator."""
node_00 = orm.Data().store()
node_01 = orm.Data().store()
node_02 = orm.Data().store()
node_03 = orm.Data().store()
nodes = [node_00, node_01, node_02, node_03]
group = orm.Group(label='label', description='description').store()
group.add_nodes(nodes)
# Indexing
node_indexed = group.nodes[0]
self.assertTrue(isinstance(node_indexed, orm.Data))
self.assertIn(node_indexed.uuid, [node.uuid for node in nodes])
# Slicing
nodes_sliced = group.nodes[1:3]
self.assertTrue(isinstance(nodes_sliced, list))
self.assertEqual(len(nodes_sliced), 2)
self.assertTrue(all([isinstance(node, orm.Data) for node in nodes_sliced]))
self.assertTrue(all([node.uuid in set(node.uuid for node in nodes) for node in nodes_sliced]))
def test_description(self):
"""Test the update of the description both for stored and unstored groups."""
node = orm.Data().store()
group_01 = orm.Group(label='testgroupdescription1', description='group_01').store()
group_01.add_nodes(node)
group_02 = orm.Group(label='testgroupdescription2', description='group_02')
# Preliminary checks
self.assertTrue(group_01.is_stored)
self.assertFalse(group_02.is_stored)
self.assertEqual(group_01.description, 'group_01')
self.assertEqual(group_02.description, 'group_02')
# Change
group_01.description = 'new1'
group_02.description = 'new2'
# Test that the groups remained in their proper stored state and that
# the description was updated
self.assertTrue(group_01.is_stored)
self.assertFalse(group_02.is_stored)
self.assertEqual(group_01.description, 'new1')
self.assertEqual(group_02.description, 'new2')
# Store group_02 and check that the description is OK
group_02.store()
self.assertTrue(group_02.is_stored)
self.assertEqual(group_02.description, 'new2')
def test_add_nodes(self):
"""Test different ways of adding nodes."""
node_01 = orm.Data().store()
node_02 = orm.Data().store()
node_03 = orm.Data().store()
nodes = [node_01, node_02, node_03]
group = orm.Group(label='test_adding_nodes').store()
# Single node
group.add_nodes(node_01)
# List of nodes
group.add_nodes([node_02, node_03])
# Check
self.assertEqual(set(_.pk for _ in nodes), set(_.pk for _ in group.nodes))
# Try to add a node that is already present: there should be no problem
group.add_nodes(node_01)
self.assertEqual(set(_.pk for _ in nodes), set(_.pk for _ in group.nodes))
def test_remove_nodes(self):
"""Test node removal."""
node_01 = orm.Data().store()
node_02 = orm.Data().store()
node_03 = orm.Data().store()
node_04 = orm.Data().store()
nodes = [node_01, node_02, node_03]
group = orm.Group(label='test_remove_nodes').store()
# Add initial nodes
group.add_nodes(nodes)
self.assertEqual(set(_.pk for _ in nodes), set(_.pk for _ in group.nodes))
# Remove a node that is not in the group: nothing should happen
group.remove_nodes(node_04)
self.assertEqual(set(_.pk for _ in nodes), set(_.pk for _ in group.nodes))
# Remove one orm.Node
nodes.remove(node_03)
group.remove_nodes(node_03)
self.assertEqual(set(_.pk for _ in nodes), set(_.pk for _ in group.nodes))
# Remove a list of Nodes and check
nodes.remove(node_01)
nodes.remove(node_02)
group.remove_nodes([node_01, node_02])
self.assertEqual(set(_.pk for _ in nodes), set(_.pk for _ in group.nodes))
def test_clear(self):
"""Test the `clear` method to remove all nodes."""
node_01 = orm.Data().store()
node_02 = orm.Data().store()
node_03 = orm.Data().store()
nodes = [node_01, node_02, node_03]
group = orm.Group(label='test_clear_nodes').store()
# Add initial nodes
group.add_nodes(nodes)
self.assertEqual(set(_.pk for _ in nodes), set(_.pk for _ in group.nodes))
group.clear()
self.assertEqual(list(group.nodes), [])
def test_name_desc(self):
"""Test Group description."""
group = orm.Group(label='testgroup2', description='some desc')
self.assertEqual(group.label, 'testgroup2')
self.assertEqual(group.description, 'some desc')
self.assertTrue(group.is_user_defined)
group.store()
# Same checks after storing
self.assertEqual(group.label, 'testgroup2')
self.assertTrue(group.is_user_defined)
self.assertEqual(group.description, 'some desc')
# To avoid to find it in further tests
orm.Group.objects.delete(group.pk)
def test_delete(self):
"""Test Group deletion."""
node = orm.Data().store()
group = orm.Group(label='testgroup3', description='some other desc').store()
group_copy = orm.Group.get(label='testgroup3')
self.assertEqual(group.uuid, group_copy.uuid)
group.add_nodes(node)
self.assertEqual(len(group.nodes), 1)
orm.Group.objects.delete(group.pk)
with self.assertRaises(exceptions.NotExistent):
# The group does not exist anymore
orm.Group.get(label='testgroup3')
def test_rename(self):
"""Test the renaming of a Group."""
label_original = 'groupie'
label_changed = 'nogroupie'
group = orm.Group(label=label_original, description='I will be renamed')
# Check name changes work before storing
self.assertEqual(group.label, label_original)
group.label = label_changed
self.assertEqual(group.label, label_changed)
# Revert the name to its original and store it
group.label = label_original
group.store()
# Check name changes work after storing
self.assertEqual(group.label, label_original)
group.label = label_changed
self.assertEqual(group.label, label_changed)
def test_rename_existing(self):
"""Test that renaming to an already existing name is not permitted."""
label_group_a = 'group_a'
label_group_b = 'group_b'
orm.Group(label=label_group_a, description='I am the Original G').store()
# Before storing everything should be fine
group_b = orm.Group(label=label_group_a, description='They will try to rename me')
# Storing for duplicate group name should trigger UniquenessError
with self.assertRaises(exceptions.IntegrityError):
group_b.store()
# Reverting to unique name before storing
group_b.label = label_group_b
group_b.store()
# After storing name change to existing should raise
with self.assertRaises(exceptions.IntegrityError):
group_b.label = label_group_a
def test_group_uuid_hashing_for_querybuidler(self):
"""QueryBuilder results should be reusable and shouldn't brake hashing."""
group = orm.Group(label='test_group')
group.store()
# Search for the UUID of the stored group
builder = orm.QueryBuilder()
builder.append(orm.Group, project=['uuid'], filters={'label': {'==': 'test_group'}})
[uuid] = builder.first()
# Look the node with the previously returned UUID
builder = orm.QueryBuilder()
builder.append(orm.Group, project=['id'], filters={'uuid': {'==': uuid}})
# Check that the query doesn't fail
builder.all()
# And that the results are correct
self.assertEqual(builder.count(), 1)
self.assertEqual(builder.first()[0], group.id)
| 36.677419
| 102
| 0.620737
|
b58a771c82efea9864a9a3cb742c970e39810cda
| 276
|
py
|
Python
|
partner_ngos/meal/doctype/complain_mechanism_details/complain_mechanism_details.py
|
AkramMutaher/partner_ngos
|
4a345fb6989ff5a21db7fca07aa4e5174dca8f59
|
[
"MIT"
] | 1
|
2021-06-03T17:14:08.000Z
|
2021-06-03T17:14:08.000Z
|
partner_ngos/meal/doctype/complain_mechanism_details/complain_mechanism_details.py
|
AkramMutaher/partner_ngos
|
4a345fb6989ff5a21db7fca07aa4e5174dca8f59
|
[
"MIT"
] | null | null | null |
partner_ngos/meal/doctype/complain_mechanism_details/complain_mechanism_details.py
|
AkramMutaher/partner_ngos
|
4a345fb6989ff5a21db7fca07aa4e5174dca8f59
|
[
"MIT"
] | 1
|
2021-10-09T16:20:09.000Z
|
2021-10-09T16:20:09.000Z
|
# -*- coding: utf-8 -*-
# Copyright (c) 2020, Akram Mutaher and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
# import frappe
from frappe.model.document import Document
class ComplainMechanismDetails(Document):
pass
| 25.090909
| 52
| 0.786232
|
afaaa35d5ec57a352e722c9e322d1f8f8f7cc13d
| 14,164
|
py
|
Python
|
pypy/objspace/std/test/test_obj.py
|
pymtl/pypy-pymtl3
|
d2f66f87686e48aeb1eecabeaa3de1381a149f2c
|
[
"Apache-2.0",
"OpenSSL"
] | 1
|
2021-06-02T23:02:09.000Z
|
2021-06-02T23:02:09.000Z
|
pypy/objspace/std/test/test_obj.py
|
pymtl/pypy-pymtl3
|
d2f66f87686e48aeb1eecabeaa3de1381a149f2c
|
[
"Apache-2.0",
"OpenSSL"
] | 1
|
2021-03-30T18:08:41.000Z
|
2021-03-30T18:08:41.000Z
|
pypy/objspace/std/test/test_obj.py
|
pymtl/pypy-pymtl3
|
d2f66f87686e48aeb1eecabeaa3de1381a149f2c
|
[
"Apache-2.0",
"OpenSSL"
] | null | null | null |
from __future__ import with_statement
from pypy.conftest import option
class AppTestObject:
spaceconfig = {'usemodules': ['itertools']}
def setup_class(cls):
from pypy.interpreter import gateway
import sys
space = cls.space
cls.w_cpython_behavior = space.wrap(not option.runappdirect)
cls.w_cpython_version = space.wrap(tuple(sys.version_info))
cls.w_appdirect = space.wrap(option.runappdirect)
cls.w_cpython_apptest = space.wrap(option.runappdirect and not hasattr(sys, 'pypy_translation_info'))
def w_unwrap_wrap_unicode(space, w_obj):
return space.newutf8(space.utf8_w(w_obj), w_obj._length)
cls.w_unwrap_wrap_unicode = space.wrap(gateway.interp2app(w_unwrap_wrap_unicode))
def w_unwrap_wrap_bytes(space, w_obj):
return space.newbytes(space.bytes_w(w_obj))
cls.w_unwrap_wrap_bytes = space.wrap(gateway.interp2app(w_unwrap_wrap_bytes))
def test_hash_builtin(self):
if not self.cpython_behavior:
skip("on pypy-c id == hash is not guaranteed")
if self.cpython_version >= (2, 7):
skip("on CPython >= 2.7, id != hash")
import sys
o = object()
assert (hash(o) & sys.maxsize) == (id(o) & sys.maxsize)
def test_hash_method(self):
o = object()
assert hash(o) == o.__hash__()
def test_hash_list(self):
l = list(range(5))
raises(TypeError, hash, l)
def test_no_getnewargs(self):
o = object()
assert not hasattr(o, '__getnewargs__')
def test_hash_subclass(self):
import sys
class X(object):
pass
x = X()
if self.cpython_behavior and self.cpython_version < (2, 7):
assert (hash(x) & sys.maxsize) == (id(x) & sys.maxsize)
assert hash(x) == object.__hash__(x)
def test_reduce_recursion_bug(self):
class X(object):
def __reduce__(self):
return object.__reduce__(self) + (':-)',)
s = X().__reduce__()
assert s[-1] == ':-)'
def test_getnewargs_ex(self):
class NamedInt(int):
def __new__(cls, name, **kwargs):
if len(kwargs) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, kwargs['value'])
self._name = name
return self
def __getnewargs_ex__(self):
return (self._name,), dict(value=int(self))
import copyreg
for protocol in [2, 3, 4]:
assert NamedInt("Name", value=42).__reduce_ex__(protocol) == (
copyreg.__newobj_ex__,
(NamedInt, ('Name',), dict(value=42)),
dict(_name='Name'), None, None)
def test_reduce_ex_does_getattr(self):
seen = []
class X:
def __getattribute__(self, name):
seen.append(name)
return object.__getattribute__(self, name)
X().__reduce_ex__(2)
# it is the case at least on CPython 3.5.2, like PyPy:
assert '__reduce__' in seen
# but these methods, which are also called, are not looked up
# with getattr:
assert '__getnewargs__' not in seen
assert '__getnewargs_ex__' not in seen
def test_reduce_ex_errors(self):
# cf. lib-python/3/test/test_descr.py::PicklingTests.test_reduce()
args = (-101, "spam")
kwargs = {'bacon': -201, 'fish': -301}
class C2:
def __getnewargs__(self):
return "bad args"
excinfo = raises(TypeError, C2().__reduce_ex__, 4)
assert str(excinfo.value) == \
"__getnewargs__ should return a tuple, not 'str'"
class C4:
def __getnewargs_ex__(self):
return (args, "bad dict")
excinfo = raises(TypeError, C4().__reduce_ex__, 4)
assert str(excinfo.value) == ("second item of the tuple "
"returned by __getnewargs_ex__ must be a dict, not 'str'")
class C5:
def __getnewargs_ex__(self):
return ("bad tuple", kwargs)
excinfo = raises(TypeError, C5().__reduce_ex__, 4)
assert str(excinfo.value) == ("first item of the tuple "
"returned by __getnewargs_ex__ must be a tuple, not 'str'")
class C6:
def __getnewargs_ex__(self):
return ()
excinfo = raises(ValueError, C6().__reduce_ex__, 4)
assert str(excinfo.value) == \
"__getnewargs_ex__ should return a tuple of length 2, not 0"
class C7:
def __getnewargs_ex__(self):
return "bad args"
excinfo = raises(TypeError, C7().__reduce_ex__, 4)
assert str(excinfo.value) == \
"__getnewargs_ex__ should return a tuple, not 'str'"
def test_reduce_state_empty_dict(self):
class X(object):
pass
assert X().__reduce_ex__(2)[2] is None
def test_reduce_arguments(self):
# since python3.7 object.__reduce__ doesn't take an argument anymore
# (used to be proto), and __reduce_ex__ requires one
with raises(TypeError):
object().__reduce__(0)
with raises(TypeError):
object().__reduce_ex__()
def test_default_format(self):
class x(object):
def __str__(self):
return "Pickle"
res = format(x())
assert res == "Pickle"
assert isinstance(res, str)
def test_format(self):
class B:
pass
excinfo = raises(TypeError, format, B(), 's')
assert 'B.__format__' in str(excinfo.value)
def test_subclasshook(self):
class x(object):
pass
assert x().__subclasshook__(object()) is NotImplemented
assert x.__subclasshook__(object()) is NotImplemented
def test_object_init(self):
import warnings
class A(object):
pass
raises(TypeError, A().__init__, 3)
raises(TypeError, A().__init__, a=3)
class B(object):
def __new__(cls):
return super(B, cls).__new__(cls)
def __init__(self):
super(B, self).__init__(a=3)
raises(TypeError, B)
def test_object_init_not_really_overridden(self):
class A(object):
def __new__(cls, value):
return object.__new__(cls)
__init__ = object.__init__ # see issue #3239
assert isinstance(A(1), A)
def test_object_new_not_really_overridden(self):
class A(object):
def __init__(self, value):
self.value = value
__new__ = object.__new__
assert A(42).value == 42
def test_object_init_cant_call_parent_with_args(self):
class A(object):
def __init__(self, value):
object.__init__(self, value)
raises(TypeError, A, 1)
def test_object_new_cant_call_parent_with_args(self):
class A(object):
def __new__(cls, value):
return object.__new__(cls, value)
raises(TypeError, A, 1)
def test_object_init_and_new_overridden(self):
class A(object):
def __new__(cls, value):
result = object.__new__(cls)
result.other_value = value + 1
return result
def __init__(self, value):
self.value = value
assert A(42).value == 42
assert A(42).other_value == 43
def test_object_str(self):
# obscure case: __str__() must delegate to __repr__() without adding
# type checking on its own
class A(object):
def __repr__(self):
return 123456
assert A().__str__() == 123456
def test_object_dir(self):
class A(object):
a_var = None
assert hasattr(object, '__dir__')
obj = A()
obj_items = dir(obj)
assert obj_items == sorted(obj_items)
assert obj_items == sorted(object.__dir__(obj))
def test_is_on_primitives(self):
if self.cpython_apptest:
skip("cpython behaves differently")
assert 1 is 1
x = 1000000
assert x + 1 is int(str(x + 1))
assert 1 is not 1.0
assert 1.1 is 1.1
assert 0.0 is not -0.0
for x in range(10):
assert x + 0.1 is x + 0.1
for x in range(10):
assert x + 1 is x + 1
for x in range(10):
assert x+1j is x+1j
assert 1+x*1j is 1+x*1j
l = [1]
assert l[0] is l[0]
def test_is_on_strs(self):
if self.appdirect:
skip("cannot run this test as apptest")
l = ["a"]
assert l[0] is l[0]
u = "a"
assert self.unwrap_wrap_unicode(u) is u
s = b"a"
assert self.unwrap_wrap_bytes(s) is s
def test_is_on_subclasses(self):
for typ in [int, float, complex, str]:
class mytyp(typ):
pass
if not self.cpython_apptest and typ is not str:
assert typ(42) is typ(42)
assert mytyp(42) is not mytyp(42)
assert mytyp(42) is not typ(42)
assert typ(42) is not mytyp(42)
x = mytyp(42)
assert x is x
assert x is not "43"
assert x is not None
assert "43" is not x
assert None is not x
x = typ(42)
assert x is x
assert x is not "43"
assert x is not None
assert "43" is not x
assert None is not x
def test_id_on_primitives(self):
if self.cpython_apptest:
skip("cpython behaves differently")
assert id(1) == (1 << 4) + 1
class myint(int):
pass
assert id(myint(1)) != id(1)
assert id(1.0) & 7 == 5
assert id(-0.0) != id(0.0)
assert hex(id(2.0)) == '0x40000000000000005'
assert id(0.0) == 5
def test_id_on_strs(self):
if self.appdirect:
skip("cannot run this test as apptest")
for u in [u"", u"a", u"aa"]:
assert id(self.unwrap_wrap_unicode(u)) == id(u)
s = u.encode()
assert id(self.unwrap_wrap_bytes(s)) == id(s)
#
assert id(b'') == (256 << 4) | 11 # always
assert id(u'') == (257 << 4) | 11
assert id(b'a') == (ord('a') << 4) | 11
# we no longer cache unicodes <128
# assert id(u'\u1234') == ((~0x1234) << 4) | 11
def test_id_of_tuples(self):
l = []
x = (l,)
assert id(x) != id((l,)) # no caching at all
if self.appdirect:
skip("cannot run this test as apptest")
assert id(()) == (258 << 4) | 11 # always
def test_id_of_frozensets(self):
x = frozenset([4])
assert id(x) != id(frozenset([4])) # no caching at all
if self.appdirect:
skip("cannot run this test as apptest")
assert id(frozenset()) == (259 << 4) | 11 # always
assert id(frozenset([])) == (259 << 4) | 11 # always
def test_identity_vs_id_primitives(self):
import sys
l = list(range(-10, 10, 2))
for i in [0, 1, 3]:
l.append(float(i))
l.append(i + 0.1)
l.append(i + sys.maxsize)
l.append(i - sys.maxsize)
l.append(i + 1j)
l.append(i - 1j)
l.append(1 + i * 1j)
l.append(1 - i * 1j)
l.append((i,))
l.append(frozenset([i]))
l.append(-0.0)
l.append(None)
l.append(True)
l.append(False)
l.append(())
l.append(tuple([]))
l.append(frozenset())
for i, a in enumerate(l):
for b in l[i:]:
assert (a is b) == (id(a) == id(b))
if a is b:
assert a == b
def test_identity_vs_id_str(self):
if self.appdirect:
skip("cannot run this test as apptest")
l = []
def add(s, u):
l.append(s)
l.append(self.unwrap_wrap_bytes(s))
l.append(s[:1] + s[1:])
l.append(u)
l.append(self.unwrap_wrap_unicode(u))
l.append(u[:1] + u[1:])
for i in range(3, 18):
add(str(i).encode(), str(i))
add(b"s", u"s")
add(b"", u"")
for i, a in enumerate(l):
for b in l[i:]:
assert (a is b) == (id(a) == id(b))
if a is b:
assert a == b
def test_identity_bug(self):
x = 0x4000000000000000
y = 2j
assert id(x) != id(y)
def test_object_hash_immutable(self):
x = 42
y = 40
y += 2
assert object.__hash__(x) == object.__hash__(y)
def test_richcompare(self):
o = object()
o2 = object()
assert o.__eq__(o) is True
assert o.__eq__(o2) is NotImplemented
assert o.__ne__(o) is False
assert o.__ne__(o2) is NotImplemented
assert o.__le__(o2) is NotImplemented
assert o.__lt__(o2) is NotImplemented
assert o.__ge__(o2) is NotImplemented
assert o.__gt__(o2) is NotImplemented
def test_init_subclass(self):
object().__init_subclass__() # does not crash
object.__init_subclass__() # does not crash
raises(TypeError, object.__init_subclass__, 1)
def test_better_error_init(self):
class A: pass
with raises(TypeError) as excinfo:
A(1)
assert "A() takes no arguments" in str(excinfo.value)
def test_isinstance_shortcut():
from pypy.objspace.std import objspace
space = objspace.StdObjSpace()
w_a = space.newtext("a")
space.type = None
# if it crashes, it means that space._type_isinstance didn't go through
# the fast path, and tries to call type() (which is set to None just
# above)
space.isinstance_w(w_a, space.w_text) # does not crash
| 32.863109
| 109
| 0.546032
|
c11e5c4828bc5fffa4f2bddb42eccc9c9f2fb6e1
| 1,683
|
py
|
Python
|
src/_cffi_src/openssl/err.py
|
aganders3/cryptography
|
dcf82c6c00bb006b6355c51d02c816769cb534a3
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
src/_cffi_src/openssl/err.py
|
aganders3/cryptography
|
dcf82c6c00bb006b6355c51d02c816769cb534a3
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
src/_cffi_src/openssl/err.py
|
aganders3/cryptography
|
dcf82c6c00bb006b6355c51d02c816769cb534a3
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
INCLUDES = """
#include <openssl/err.h>
"""
TYPES = """
static const int EVP_F_EVP_ENCRYPTFINAL_EX;
static const int EVP_R_DATA_NOT_MULTIPLE_OF_BLOCK_LENGTH;
static const int EVP_R_BAD_DECRYPT;
static const int EVP_R_UNSUPPORTED_PRIVATE_KEY_ALGORITHM;
static const int PKCS12_R_PKCS12_CIPHERFINAL_ERROR;
static const int PEM_R_UNSUPPORTED_ENCRYPTION;
static const int EVP_R_XTS_DUPLICATED_KEYS;
static const int ERR_LIB_EVP;
static const int ERR_LIB_PEM;
static const int ERR_LIB_PROV;
static const int ERR_LIB_ASN1;
static const int ERR_LIB_PKCS12;
static const int SSL_TLSEXT_ERR_OK;
static const int SSL_TLSEXT_ERR_ALERT_FATAL;
static const int SSL_TLSEXT_ERR_NOACK;
static const int X509_R_CERT_ALREADY_IN_HASH_TABLE;
"""
FUNCTIONS = """
void ERR_error_string_n(unsigned long, char *, size_t);
const char *ERR_lib_error_string(unsigned long);
const char *ERR_func_error_string(unsigned long);
const char *ERR_reason_error_string(unsigned long);
unsigned long ERR_get_error(void);
unsigned long ERR_peek_error(void);
void ERR_clear_error(void);
void ERR_put_error(int, int, int, const char *, int);
int ERR_GET_LIB(unsigned long);
int ERR_GET_REASON(unsigned long);
"""
CUSTOMIZATIONS = """
/* This define is tied to provider support and is conditionally
removed if Cryptography_HAS_PROVIDERS is false */
#ifndef ERR_LIB_PROV
#define ERR_LIB_PROV 0
#endif
#if !CRYPTOGRAPHY_OPENSSL_111D_OR_GREATER || CRYPTOGRAPHY_IS_BORINGSSL
static const int EVP_R_XTS_DUPLICATED_KEYS = 0;
#endif
"""
| 29.017241
| 79
| 0.81224
|
06749b1edffd14435a8732d475e69468f6bf6abf
| 5,335
|
py
|
Python
|
tensorflow_privacy/privacy/dp_query/nested_query_test.py
|
cchoquette/privacy
|
6a48a45afd17c211af4f3c4087316f236e9ccb34
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_privacy/privacy/dp_query/nested_query_test.py
|
cchoquette/privacy
|
6a48a45afd17c211af4f3c4087316f236e9ccb34
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_privacy/privacy/dp_query/nested_query_test.py
|
cchoquette/privacy
|
6a48a45afd17c211af4f3c4087316f236e9ccb34
|
[
"Apache-2.0"
] | 1
|
2021-01-13T06:34:19.000Z
|
2021-01-13T06:34:19.000Z
|
# Copyright 2018, The TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for NestedQuery."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow_privacy.privacy.dp_query import gaussian_query
from tensorflow_privacy.privacy.dp_query import nested_query
from tensorflow_privacy.privacy.dp_query import test_utils
_basic_query = gaussian_query.GaussianSumQuery(1.0, 0.0)
class NestedQueryTest(tf.test.TestCase, parameterized.TestCase):
def test_nested_gaussian_sum_no_clip_no_noise(self):
with self.cached_session() as sess:
query1 = gaussian_query.GaussianSumQuery(
l2_norm_clip=10.0, stddev=0.0)
query2 = gaussian_query.GaussianSumQuery(
l2_norm_clip=10.0, stddev=0.0)
query = nested_query.NestedQuery([query1, query2])
record1 = [1.0, [2.0, 3.0]]
record2 = [4.0, [3.0, 2.0]]
query_result, _ = test_utils.run_query(query, [record1, record2])
result = sess.run(query_result)
expected = [5.0, [5.0, 5.0]]
self.assertAllClose(result, expected)
def test_nested_gaussian_average_no_clip_no_noise(self):
with self.cached_session() as sess:
query1 = gaussian_query.GaussianAverageQuery(
l2_norm_clip=10.0, sum_stddev=0.0, denominator=5.0)
query2 = gaussian_query.GaussianAverageQuery(
l2_norm_clip=10.0, sum_stddev=0.0, denominator=5.0)
query = nested_query.NestedQuery([query1, query2])
record1 = [1.0, [2.0, 3.0]]
record2 = [4.0, [3.0, 2.0]]
query_result, _ = test_utils.run_query(query, [record1, record2])
result = sess.run(query_result)
expected = [1.0, [1.0, 1.0]]
self.assertAllClose(result, expected)
def test_nested_gaussian_average_with_clip_no_noise(self):
with self.cached_session() as sess:
query1 = gaussian_query.GaussianAverageQuery(
l2_norm_clip=4.0, sum_stddev=0.0, denominator=5.0)
query2 = gaussian_query.GaussianAverageQuery(
l2_norm_clip=5.0, sum_stddev=0.0, denominator=5.0)
query = nested_query.NestedQuery([query1, query2])
record1 = [1.0, [12.0, 9.0]] # Clipped to [1.0, [4.0, 3.0]]
record2 = [5.0, [1.0, 2.0]] # Clipped to [4.0, [1.0, 2.0]]
query_result, _ = test_utils.run_query(query, [record1, record2])
result = sess.run(query_result)
expected = [1.0, [1.0, 1.0]]
self.assertAllClose(result, expected)
def test_complex_nested_query(self):
with self.cached_session() as sess:
query_ab = gaussian_query.GaussianSumQuery(
l2_norm_clip=1.0, stddev=0.0)
query_c = gaussian_query.GaussianAverageQuery(
l2_norm_clip=10.0, sum_stddev=0.0, denominator=2.0)
query_d = gaussian_query.GaussianSumQuery(
l2_norm_clip=10.0, stddev=0.0)
query = nested_query.NestedQuery(
[query_ab, {'c': query_c, 'd': [query_d]}])
record1 = [{'a': 0.0, 'b': 2.71828}, {'c': (-4.0, 6.0), 'd': [-4.0]}]
record2 = [{'a': 3.14159, 'b': 0.0}, {'c': (6.0, -4.0), 'd': [5.0]}]
query_result, _ = test_utils.run_query(query, [record1, record2])
result = sess.run(query_result)
expected = [{'a': 1.0, 'b': 1.0}, {'c': (1.0, 1.0), 'd': [1.0]}]
self.assertAllClose(result, expected)
def test_nested_query_with_noise(self):
with self.cached_session() as sess:
sum_stddev = 2.71828
denominator = 3.14159
query1 = gaussian_query.GaussianSumQuery(
l2_norm_clip=1.5, stddev=sum_stddev)
query2 = gaussian_query.GaussianAverageQuery(
l2_norm_clip=0.5, sum_stddev=sum_stddev, denominator=denominator)
query = nested_query.NestedQuery((query1, query2))
record1 = (3.0, [2.0, 1.5])
record2 = (0.0, [-1.0, -3.5])
query_result, _ = test_utils.run_query(query, [record1, record2])
noised_averages = []
for _ in range(1000):
noised_averages.append(tf.nest.flatten(sess.run(query_result)))
result_stddev = np.std(noised_averages, 0)
avg_stddev = sum_stddev / denominator
expected_stddev = [sum_stddev, avg_stddev, avg_stddev]
self.assertArrayNear(result_stddev, expected_stddev, 0.1)
@parameterized.named_parameters(
('type_mismatch', [_basic_query], (1.0,), TypeError),
('too_many_queries', [_basic_query, _basic_query], [1.0], ValueError),
('query_too_deep', [_basic_query, [_basic_query]], [1.0, 1.0], TypeError))
def test_record_incompatible_with_query(
self, queries, record, error_type):
with self.assertRaises(error_type):
test_utils.run_query(nested_query.NestedQuery(queries), [record])
if __name__ == '__main__':
tf.test.main()
| 37.048611
| 80
| 0.680975
|
d50ae92e2c4d593a2aefa6688b3edcf1f4faf1bd
| 3,729
|
py
|
Python
|
Profiles/binary_search_tree.py
|
hemishv111/Hacktoberfest2k19
|
ead7c6c7cdd569e730a48257c7ca1ba2f76dfbae
|
[
"MIT"
] | 22
|
2019-10-10T12:16:58.000Z
|
2020-10-28T09:09:52.000Z
|
Profiles/binary_search_tree.py
|
hemishv111/Hacktoberfest2k19
|
ead7c6c7cdd569e730a48257c7ca1ba2f76dfbae
|
[
"MIT"
] | 49
|
2019-10-09T11:23:06.000Z
|
2020-10-01T07:26:14.000Z
|
Profiles/binary_search_tree.py
|
hemishv111/Hacktoberfest2k19
|
ead7c6c7cdd569e730a48257c7ca1ba2f76dfbae
|
[
"MIT"
] | 80
|
2019-10-06T16:40:06.000Z
|
2020-10-22T16:34:58.000Z
|
class BSTNode:
def __init__(self, key):
self.key = key
self.left = None
self.right = None
self.parent = None
def insert(self, node):
if self.key > node.key:
if self.left is None:
self.left = node
node.parent = self
else:
self.left.insert(node)
elif self.key < node.key:
if self.right is None:
self.right = node
node.parent = self
else:
self.right.insert(node)
def inorder(self):
if self.left is not None:
self.left.inorder()
print(self.key, end=' ')
if self.right is not None:
self.right.inorder()
def replace_node_of_parent(self, new_node):
if self.parent is not None:
if new_node is not None:
new_node.parent = self.parent
if self.parent.left == self:
self.parent.left = new_node
elif self.parent.right == self:
self.parent.right = new_node
else:
self.key = new_node.key
self.left = new_node.left
self.right = new_node.right
if new_node.left is not None:
new_node.left.parent = self
if new_node.right is not None:
new_node.right.parent = self
def find_min(self):
current = self
while current.left is not None:
current = current.left
return current
def remove(self):
if (self.left is not None and self.right is not None):
successor = self.right.find_min()
self.key = successor.key
successor.remove()
elif self.left is not None:
self.replace_node_of_parent(self.left)
elif self.right is not None:
self.replace_node_of_parent(self.right)
else:
self.replace_node_of_parent(None)
def search(self, key):
if self.key > key:
if self.left is not None:
return self.left.search(key)
else:
return None
elif self.key < key:
if self.right is not None:
return self.right.search(key)
else:
return None
return self
class BSTree:
def __init__(self):
self.root = None
def inorder(self):
if self.root is not None:
self.root.inorder()
def add(self, key):
new_node = BSTNode(key)
if self.root is None:
self.root = new_node
else:
self.root.insert(new_node)
def remove(self, key):
to_remove = self.search(key)
if (self.root == to_remove
and self.root.left is None and self.root.right is None):
self.root = None
else:
to_remove.remove()
def search(self, key):
if self.root is not None:
return self.root.search(key)
bstree = BSTree()
print('Menu (this assumes no duplicate keys)')
print('add <key>')
print('remove <key>')
print('inorder')
print('quit')
while True:
do = input('What would you like to do? ').split()
operation = do[0].strip().lower()
if operation == 'add':
key = int(do[1])
bstree.add(key)
elif operation == 'remove':
key = int(do[1])
bstree.remove(key)
elif operation == 'inorder':
print('Inorder traversal: ', end='')
bstree.inorder()
print()
elif operation == 'quit':
break
| 28.906977
| 69
| 0.507911
|
584f7908f47f90c7cfc6eeb27a6bac18c77d778f
| 7,872
|
py
|
Python
|
pytests/eventing/eventing_logging.py
|
couchbaselabs/testrunner-bharath
|
96af90070da2140cc11c549db7403f5ea3b76d34
|
[
"Apache-2.0"
] | 1
|
2020-08-31T18:51:45.000Z
|
2020-08-31T18:51:45.000Z
|
pytests/eventing/eventing_logging.py
|
couchbaselabs/testrunner-bharath
|
96af90070da2140cc11c549db7403f5ea3b76d34
|
[
"Apache-2.0"
] | null | null | null |
pytests/eventing/eventing_logging.py
|
couchbaselabs/testrunner-bharath
|
96af90070da2140cc11c549db7403f5ea3b76d34
|
[
"Apache-2.0"
] | 2
|
2020-07-24T07:12:01.000Z
|
2022-03-17T23:43:28.000Z
|
from pytests.eventing.eventing_base import EventingBaseTest
from logredaction.log_redaction_base import LogRedactionBase
from pytests.security.auditmain import audit
from lib.testconstants import STANDARD_BUCKET_PORT
import logging
import copy
import json
from lib.couchbase_helper.tuq_helper import N1QLHelper
from lib.membase.api.rest_client import RestConnection
from lib.remote.remote_util import RemoteMachineShellConnection
from lib.testconstants import STANDARD_BUCKET_PORT
from lib.memcached.helper.data_helper import MemcachedClientHelper
from pytests.eventing.eventing_constants import HANDLER_CODE
log = logging.getLogger()
class EventingLogging(EventingBaseTest, LogRedactionBase):
def setUp(self):
super(EventingLogging, self).setUp()
self.rest.set_service_memoryQuota(service='memoryQuota', memoryQuota=1200)
if self.create_functions_buckets:
self.bucket_size = 100
log.info(self.bucket_size)
bucket_params = self._create_bucket_params(server=self.server, size=self.bucket_size,
replicas=self.num_replicas)
self.cluster.create_standard_bucket(name=self.src_bucket_name, port=STANDARD_BUCKET_PORT + 1,
bucket_params=bucket_params)
self.src_bucket = RestConnection(self.master).get_buckets()
self.cluster.create_standard_bucket(name=self.dst_bucket_name, port=STANDARD_BUCKET_PORT + 1,
bucket_params=bucket_params)
self.cluster.create_standard_bucket(name=self.metadata_bucket_name, port=STANDARD_BUCKET_PORT + 1,
bucket_params=bucket_params)
self.buckets = RestConnection(self.master).get_buckets()
self.gens_load = self.generate_docs(self.docs_per_day)
self.expiry = 3
auditing = audit(host=self.master)
log.info("Enabling Audit")
auditing.setAuditEnable('true')
self.sleep(30)
def tearDown(self):
super(EventingLogging, self).tearDown()
def check_config(self, event_id, host, expected_results):
auditing = audit(eventID=event_id, host=host)
_, value_verification = auditing.validateEvents(expected_results)
self.assertTrue(value_verification, "Values for one of the fields is not matching")
def test_eventing_audit_logging(self):
eventing_node = self.get_nodes_from_services_map(service_type="eventing", get_all_nodes=False)
self.load(self.gens_load, buckets=self.src_bucket, flag=self.item_flag, verify_data=False,
batch_size=self.batch_size)
body = self.create_save_function_body(self.function_name, HANDLER_CODE.BUCKET_OPS_ON_UPDATE)
self.deploy_function(body)
expected_results_deploy = {"real_userid:source": "builtin", "real_userid:user": "Administrator",
"context": self.function_name, "id": 32768, "name": "Create Function",
"description": "Eventing function definition was created or updated"}
# check audit log if the deploy operation is present in audit log
self.check_config(32768, eventing_node, expected_results_deploy)
# Wait for eventing to catch up with all the create mutations and verify results
self.verify_eventing_results(self.function_name, self.docs_per_day * 2016)
self.undeploy_and_delete_function(body)
expected_results_undeploy = {"real_userid:source": "builtin", "real_userid:user": "Administrator",
"context": self.function_name, "id": 32779, "name": "Set Settings",
"description": "Save settings for a given app"}
expected_results_delete_draft = {"real_userid:source": "builtin", "real_userid:user": "Administrator",
"context": self.function_name, "id": 32773, "name": "Delete Drafts",
"description": "Eventing function draft definitions were deleted"}
expected_results_delete = {"real_userid:source": "builtin", "real_userid:user": "Administrator",
"context": self.function_name, "id": 32769, "name": "Delete Function",
"description": "Eventing function definition was deleted"}
# check audit log if the un deploy operation is present in audit log
self.check_config(32779, eventing_node, expected_results_undeploy)
# check audit log if the delete operation is present in audit log
self.check_config(32773, eventing_node, expected_results_delete_draft)
self.check_config(32769, eventing_node, expected_results_delete)
# intentionally added , as it requires some time for eventing-consumers to shutdown
self.sleep(60)
self.assertTrue(self.check_if_eventing_consumers_are_cleaned_up(),
msg="eventing-consumer processes are not cleaned up even after undeploying the function")
def test_eventing_with_log_redaction(self):
self.log_redaction_level = self.input.param("redaction_level", "partial")
eventing_node = self.get_nodes_from_services_map(service_type="eventing", get_all_nodes=False)
log.info("eventing_node : {0}".format(eventing_node))
self.load(self.gens_load, buckets=self.src_bucket, flag=self.item_flag, verify_data=False,
batch_size=self.batch_size)
body = self.create_save_function_body(self.function_name, HANDLER_CODE.BUCKET_OPS_ON_UPDATE)
self.deploy_function(body)
self.verify_eventing_results(self.function_name, self.docs_per_day * 2016)
self.undeploy_and_delete_function(body)
self.set_redaction_level()
self.start_logs_collection()
result = self.monitor_logs_collection()
self.log.info("cb collect result: {}".format(result))
node = "ns_1@"+eventing_node.ip
if result["perNode"][node]["path"] == "failed":
raise Exception("log collection failed")
logs_path = result["perNode"][node]["path"]
redactFileName = logs_path.split('/')[-1]
nonredactFileName = logs_path.split('/')[-1].replace('-redacted', '')
remotepath = logs_path[0:logs_path.rfind('/') + 1]
log.info("redactFileName : {0}".format(redactFileName))
log.info("nonredactFileName : {0}".format(nonredactFileName))
log.info("remotepath : {0}".format(remotepath))
self.sleep(120)
self.verify_log_files_exist(remotepath=remotepath,
redactFileName=redactFileName,
nonredactFileName=nonredactFileName)
self.verify_log_redaction(remotepath=remotepath,
redactFileName=redactFileName,
nonredactFileName=nonredactFileName,
logFileName="ns_server.eventing.log")
def test_log_rotation(self):
self.load_sample_buckets(self.server, "travel-sample")
self.src_bucket_name="travel-sample"
body = self.create_save_function_body(self.function_name, "handler_code/logger.js")
body['settings']['app_log_max_size']=3768300
# deploy a function without any alias
self.deploy_function(body)
self.verify_eventing_results(self.function_name, 31591)
number=self.check_number_of_files()
if number ==1:
raise Exception("Files not rotated")
matched, count=self.check_word_count_eventing_log(self.function_name,"docId:",31591)
self.skip_metabucket_check = True
if not matched:
raise Exception("Not all data logged in file")
| 58.311111
| 113
| 0.668445
|
05fd96d9673eebff3bb99a88dd1ef0b167adc45d
| 1,651
|
py
|
Python
|
baseStation/src/ui/domain/indicator/charge.py
|
olgam4/design3
|
6e05d123a24deae7dda646df535844a158ef5cc0
|
[
"WTFPL"
] | null | null | null |
baseStation/src/ui/domain/indicator/charge.py
|
olgam4/design3
|
6e05d123a24deae7dda646df535844a158ef5cc0
|
[
"WTFPL"
] | null | null | null |
baseStation/src/ui/domain/indicator/charge.py
|
olgam4/design3
|
6e05d123a24deae7dda646df535844a158ef5cc0
|
[
"WTFPL"
] | null | null | null |
import tkinter as tk
from tkinter import ttk
from application.domain.iObserver import IObserver
from prehensor.service.prehensorService import PrehensorService
class Charge(ttk.Frame, IObserver):
def __init__(self, master, prehensor_service: PrehensorService, **kwargs) -> None:
super().__init__(master, **kwargs)
self._prehensor_service = prehensor_service
self._prehensor_service.attach(self)
self._create_widgets()
def _create_widgets(self) -> None:
# self._label_zero = ttk.Label(self, text="empty")
# self._label_full = ttk.Label(self, text="full")
# self._slider = ttk.Scale(self, from_=0, to=1, orient=tk.HORIZONTAL)
# self._slider.state(['disabled'])
self._voltage_text_var = tk.StringVar(value=" 0V")
self._voltage = ttk.Label(self, textvariable=self._voltage_text_var)
def draw(self, **kwargs) -> None:
self.columnconfigure(0, weight=1)
self.columnconfigure(1, weight=1)
self.columnconfigure(2, weight=1)
self.columnconfigure(3, weight=1)
self.rowconfigure(0, weight=1)
self.grid(**kwargs)
# self._label_zero.grid(row=0, column=0, sticky=tk.E)
# self._slider.grid(row=0, column=1)
# self._label_full.grid(row=0, column=2, sticky=tk.W)
self._voltage.grid(row=0, column=3)
def update(self) -> None:
charge = self._prehensor_service.get_prehensor().charge
# self._slider.state(['!disabled'])
# self._slider.set(charge * 100)
# self._slider.state(['disabled'])
self._voltage_text_var.set(" {: 2.3f}V".format(charge * 35))
| 40.268293
| 86
| 0.6596
|
23147e1d53a7726c421e053b48a8e216c8fb17fb
| 848
|
py
|
Python
|
examples.py
|
mfdeux/aionewsapi
|
c42f3ca42cfb5ccf986eb8d8dba644d60ee04e5f
|
[
"MIT"
] | 4
|
2019-10-03T20:58:41.000Z
|
2019-10-04T01:20:07.000Z
|
examples.py
|
mfdeux/aionewsapi
|
c42f3ca42cfb5ccf986eb8d8dba644d60ee04e5f
|
[
"MIT"
] | null | null | null |
examples.py
|
mfdeux/aionewsapi
|
c42f3ca42cfb5ccf986eb8d8dba644d60ee04e5f
|
[
"MIT"
] | null | null | null |
import asyncio
from aionewsapi import NewsAPI
news_api = NewsAPI()
async def sources():
return await news_api.sources()
async def many():
sources = await news_api.sources()
fetch_tasks = []
for source in sources:
fetch_tasks.append(
asyncio.ensure_future(
news_api.latest_articles(
source.get('id'),
source.get('sorts_available')[0])))
for index, task in enumerate(asyncio.as_completed(fetch_tasks)):
result = await task
print('FETCH Task ret {}: {}'.format(index, result))
async def latest_articles():
return await news_api.latest_articles('the-washington-post', 'top')
if __name__ == '__main__':
loop = asyncio.get_event_loop()
task = loop.create_task(latest_articles())
print(loop.run_until_complete(task))
| 24.941176
| 71
| 0.645047
|
230e2144091dbf05e6eba775a1e59bbb7fac127e
| 384
|
py
|
Python
|
Inventory_Home/adminInv/migrations/0006_auto_20200615_1434.py
|
shaykhsiddique/Inventory-management-with-Django
|
2313c5b460dafe817399213cabf91b66dd2c0b24
|
[
"Apache-2.0"
] | 1
|
2020-06-18T23:42:55.000Z
|
2020-06-18T23:42:55.000Z
|
Inventory_Home/adminInv/migrations/0006_auto_20200615_1434.py
|
shaykhsiddique/Inventory-management-with-Django
|
2313c5b460dafe817399213cabf91b66dd2c0b24
|
[
"Apache-2.0"
] | 11
|
2020-06-20T09:12:11.000Z
|
2020-06-20T09:33:07.000Z
|
Inventory_Home/adminInv/migrations/0006_auto_20200615_1434.py
|
MarzanulHoque/Inventory-management-with-Django
|
2313c5b460dafe817399213cabf91b66dd2c0b24
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 3.0.5 on 2020-06-15 14:34
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('adminInv', '0005_user_sessionid'),
]
operations = [
migrations.AlterField(
model_name='user',
name='sessionid',
field=models.CharField(max_length=50),
),
]
| 20.210526
| 50
| 0.596354
|
86cb960dd468b6dbae48912132fa9f1e7cf57d75
| 98
|
py
|
Python
|
tests/tests/test_brew_control_dash.py
|
mcannamela/brew-control-dash
|
04dcd1bad2b82a7432c766100ab62fb41f247545
|
[
"MIT"
] | null | null | null |
tests/tests/test_brew_control_dash.py
|
mcannamela/brew-control-dash
|
04dcd1bad2b82a7432c766100ab62fb41f247545
|
[
"MIT"
] | 4
|
2020-12-05T20:57:21.000Z
|
2021-01-10T22:27:24.000Z
|
tests/tests/test_brew_control_dash.py
|
mcannamela/brew-control-dash
|
04dcd1bad2b82a7432c766100ab62fb41f247545
|
[
"MIT"
] | null | null | null |
from brew_control_dash import __version__
def test_version():
assert __version__ == '0.1.0'
| 16.333333
| 41
| 0.744898
|
5fc7639f56fbdf82491625ae12d96548e965f738
| 17,932
|
py
|
Python
|
public/Python27/Lib/gzip.py
|
NingrumFadillah/cekmutasi
|
1fccb6cafb874c2a80ece9b71d7c682fd44dbd48
|
[
"MIT"
] | 1
|
2020-11-26T18:53:46.000Z
|
2020-11-26T18:53:46.000Z
|
public/Python27/Lib/gzip.py
|
NingrumFadillah/cekmutasi
|
1fccb6cafb874c2a80ece9b71d7c682fd44dbd48
|
[
"MIT"
] | null | null | null |
public/Python27/Lib/gzip.py
|
NingrumFadillah/cekmutasi
|
1fccb6cafb874c2a80ece9b71d7c682fd44dbd48
|
[
"MIT"
] | 3
|
2017-04-07T12:02:22.000Z
|
2020-03-23T12:11:55.000Z
|
"""Functions that read and write gzipped files.
The user of the file doesn't have to worry about the compression,
but random access is not allowed."""
# based on Andrew Kuchling's minigzip.py distributed with the zlib module
import struct, sys, time, os
import zlib
import io
import __builtin__
__all__ = ["GzipFile","open"]
FTEXT, FHCRC, FEXTRA, FNAME, FCOMMENT = 1, 2, 4, 8, 16
READ, WRITE = 1, 2
def write32u(output, value):
# The L format writes the bit pattern correctly whether signed
# or unsigned.
output.write(struct.pack("<L", value))
def read32(input):
return struct.unpack("<I", input.read(4))[0]
def open(filename, mode="rb", compresslevel=9):
"""Shorthand for GzipFile(filename, mode, compresslevel).
The filename argument is required; mode defaults to 'rb'
and compresslevel defaults to 9.
"""
return GzipFile(filename, mode, compresslevel)
class GzipFile(io.BufferedIOBase):
"""The GzipFile class simulates most of the methods of a file object with
the exception of the readinto() and truncate() methods.
"""
myfileobj = None
max_read_chunk = 10 * 1024 * 1024 # 10Mb
def __init__(self, filename=None, mode=None,
compresslevel=9, fileobj=None, mtime=None):
"""Constructor for the GzipFile class.
At least one of fileobj and filename must be given a
non-trivial value.
The new class instance is based on fileobj, which can be a regular
file, a StringIO object, or any other object which simulates a file.
It defaults to None, in which case filename is opened to provide
a file object.
When fileobj is not None, the filename argument is only used to be
included in the gzip file header, which may includes the original
filename of the uncompressed file. It defaults to the filename of
fileobj, if discernible; otherwise, it defaults to the empty string,
and in this case the original filename is not included in the header.
The mode argument can be any of 'r', 'rb', 'a', 'ab', 'w', or 'wb',
depending on whether the file will be read or written. The default
is the mode of fileobj if discernible; otherwise, the default is 'rb'.
Be aware that only the 'rb', 'ab', and 'wb' values should be used
for cross-platform portability.
The compresslevel argument is an integer from 1 to 9 controlling the
level of compression; 1 is fastest and produces the least compression,
and 9 is slowest and produces the most compression. The default is 9.
The mtime argument is an optional numeric timestamp to be written
to the stream when compressing. All gzip compressed streams
are required to contain a timestamp. If omitted or None, the
current time is used. This module ignores the timestamp when
decompressing; however, some programs, such as gunzip, make use
of it. The format of the timestamp is the same as that of the
return value of time.time() and of the st_mtime member of the
object returned by os.stat().
"""
# guarantee the file is opened in binary mode on platforms
# that care about that sort of thing
if mode and 'b' not in mode:
mode += 'b'
if fileobj is None:
fileobj = self.myfileobj = __builtin__.open(filename, mode or 'rb')
if filename is None:
if hasattr(fileobj, 'name'): filename = fileobj.name
else: filename = ''
if mode is None:
if hasattr(fileobj, 'mode'): mode = fileobj.mode
else: mode = 'rb'
if mode[0:1] == 'r':
self.mode = READ
# Set flag indicating start of a new member
self._new_member = True
# Buffer data read from gzip file. extrastart is offset in
# stream where buffer starts. extrasize is number of
# bytes remaining in buffer from current stream position.
self.extrabuf = ""
self.extrasize = 0
self.extrastart = 0
self.name = filename
# Starts small, scales exponentially
self.min_readsize = 100
elif mode[0:1] == 'w' or mode[0:1] == 'a':
self.mode = WRITE
self._init_write(filename)
self.compress = zlib.compressobj(compresslevel,
zlib.DEFLATED,
-zlib.MAX_WBITS,
zlib.DEF_MEM_LEVEL,
0)
else:
raise IOError, "Mode " + mode + " not supported"
self.fileobj = fileobj
self.offset = 0
self.mtime = mtime
if self.mode == WRITE:
self._write_gzip_header()
@property
def filename(self):
import warnings
warnings.warn("use the name attribute", DeprecationWarning, 2)
if self.mode == WRITE and self.name[-3:] != ".gz":
return self.name + ".gz"
return self.name
def __repr__(self):
s = repr(self.fileobj)
return '<gzip ' + s[1:-1] + ' ' + hex(id(self)) + '>'
def _init_write(self, filename):
self.name = filename
self.crc = zlib.crc32("") & 0xffffffffL
self.size = 0
self.writebuf = []
self.bufsize = 0
def _write_gzip_header(self):
self.fileobj.write('\037\213') # magic header
self.fileobj.write('\010') # compression method
fname = os.path.basename(self.name)
if fname.endswith(".gz"):
fname = fname[:-3]
flags = 0
if fname:
flags = FNAME
self.fileobj.write(chr(flags))
mtime = self.mtime
if mtime is None:
mtime = time.time()
write32u(self.fileobj, long(mtime))
self.fileobj.write('\002')
self.fileobj.write('\377')
if fname:
self.fileobj.write(fname + '\000')
def _init_read(self):
self.crc = zlib.crc32("") & 0xffffffffL
self.size = 0
def _read_gzip_header(self):
magic = self.fileobj.read(2)
if magic != '\037\213':
raise IOError, 'Not a gzipped file'
method = ord( self.fileobj.read(1) )
if method != 8:
raise IOError, 'Unknown compression method'
flag = ord( self.fileobj.read(1) )
self.mtime = read32(self.fileobj)
# extraflag = self.fileobj.read(1)
# os = self.fileobj.read(1)
self.fileobj.read(2)
if flag & FEXTRA:
# Read & discard the extra field, if present
xlen = ord(self.fileobj.read(1))
xlen = xlen + 256*ord(self.fileobj.read(1))
self.fileobj.read(xlen)
if flag & FNAME:
# Read and discard a null-terminated string containing the filename
while True:
s = self.fileobj.read(1)
if not s or s=='\000':
break
if flag & FCOMMENT:
# Read and discard a null-terminated string containing a comment
while True:
s = self.fileobj.read(1)
if not s or s=='\000':
break
if flag & FHCRC:
self.fileobj.read(2) # Read & discard the 16-bit header CRC
def write(self,data):
if self.mode != WRITE:
import errno
raise IOError(errno.EBADF, "write() on read-only GzipFile object")
if self.fileobj is None:
raise ValueError, "write() on closed GzipFile object"
# Convert data type if called by io.BufferedWriter.
if isinstance(data, memoryview):
data = data.tobytes()
if len(data) > 0:
self.size = self.size + len(data)
self.crc = zlib.crc32(data, self.crc) & 0xffffffffL
self.fileobj.write( self.compress.compress(data) )
self.offset += len(data)
return len(data)
def read(self, size=-1):
if self.mode != READ:
import errno
raise IOError(errno.EBADF, "read() on write-only GzipFile object")
if self.extrasize <= 0 and self.fileobj is None:
return ''
readsize = 1024
if size < 0: # get the whole thing
try:
while True:
self._read(readsize)
readsize = min(self.max_read_chunk, readsize * 2)
except EOFError:
size = self.extrasize
else: # just get some more of it
try:
while size > self.extrasize:
self._read(readsize)
readsize = min(self.max_read_chunk, readsize * 2)
except EOFError:
if size > self.extrasize:
size = self.extrasize
offset = self.offset - self.extrastart
chunk = self.extrabuf[offset: offset + size]
self.extrasize = self.extrasize - size
self.offset += size
return chunk
def _unread(self, buf):
self.extrasize = len(buf) + self.extrasize
self.offset -= len(buf)
def _read(self, size=1024):
if self.fileobj is None:
raise EOFError, "Reached EOF"
if self._new_member:
# If the _new_member flag is set, we have to
# jump to the next member, if there is one.
#
# First, check if we're at the end of the file;
# if so, it's time to stop; no more members to read.
pos = self.fileobj.tell() # Save current position
self.fileobj.seek(0, 2) # Seek to end of file
if pos == self.fileobj.tell():
raise EOFError, "Reached EOF"
else:
self.fileobj.seek( pos ) # Return to original position
self._init_read()
self._read_gzip_header()
self.decompress = zlib.decompressobj(-zlib.MAX_WBITS)
self._new_member = False
# Read a chunk of data from the file
buf = self.fileobj.read(size)
# If the EOF has been reached, flush the decompression object
# and mark this object as finished.
if buf == "":
uncompress = self.decompress.flush()
self._read_eof()
self._add_read_data( uncompress )
raise EOFError, 'Reached EOF'
uncompress = self.decompress.decompress(buf)
self._add_read_data( uncompress )
if self.decompress.unused_data != "":
# Ending case: we've come to the end of a member in the file,
# so seek back to the start of the unused data, finish up
# this member, and read a new gzip header.
# (The number of bytes to seek back is the length of the unused
# data, minus 8 because _read_eof() will rewind a further 8 bytes)
self.fileobj.seek( -len(self.decompress.unused_data)+8, 1)
# Check the CRC and file size, and set the flag so we read
# a new member on the next call
self._read_eof()
self._new_member = True
def _add_read_data(self, data):
self.crc = zlib.crc32(data, self.crc) & 0xffffffffL
offset = self.offset - self.extrastart
self.extrabuf = self.extrabuf[offset:] + data
self.extrasize = self.extrasize + len(data)
self.extrastart = self.offset
self.size = self.size + len(data)
def _read_eof(self):
# We've read to the end of the file, so we have to rewind in order
# to reread the 8 bytes containing the CRC and the file size.
# We check the that the computed CRC and size of the
# uncompressed data matches the stored values. Note that the size
# stored is the true file size mod 2**32.
self.fileobj.seek(-8, 1)
crc32 = read32(self.fileobj)
isize = read32(self.fileobj) # may exceed 2GB
if crc32 != self.crc:
raise IOError("CRC check failed %s != %s" % (hex(crc32),
hex(self.crc)))
elif isize != (self.size & 0xffffffffL):
raise IOError, "Incorrect length of data produced"
# Gzip files can be padded with zeroes and still have archives.
# Consume all zero bytes and set the file position to the first
# non-zero byte. See http://www.gzip.org/#faq8
c = "\x00"
while c == "\x00":
c = self.fileobj.read(1)
if c:
self.fileobj.seek(-1, 1)
@property
def closed(self):
return self.fileobj is None
def close(self):
if self.fileobj is None:
return
if self.mode == WRITE:
self.fileobj.write(self.compress.flush())
write32u(self.fileobj, self.crc)
# self.size may exceed 2GB, or even 4GB
write32u(self.fileobj, self.size & 0xffffffffL)
self.fileobj = None
elif self.mode == READ:
self.fileobj = None
if self.myfileobj:
self.myfileobj.close()
self.myfileobj = None
def flush(self,zlib_mode=zlib.Z_SYNC_FLUSH):
if self.mode == WRITE:
# Ensure the compressor's buffer is flushed
self.fileobj.write(self.compress.flush(zlib_mode))
self.fileobj.flush()
def fileno(self):
"""Invoke the underlying file object's fileno() method.
This will raise AttributeError if the underlying file object
doesn't support fileno().
"""
return self.fileobj.fileno()
def rewind(self):
'''Return the uncompressed stream file position indicator to the
beginning of the file'''
if self.mode != READ:
raise IOError("Can't rewind in write mode")
self.fileobj.seek(0)
self._new_member = True
self.extrabuf = ""
self.extrasize = 0
self.extrastart = 0
self.offset = 0
def readable(self):
return self.mode == READ
def writable(self):
return self.mode == WRITE
def seekable(self):
return True
def seek(self, offset, whence=0):
if whence:
if whence == 1:
offset = self.offset + offset
else:
raise ValueError('Seek from end not supported')
if self.mode == WRITE:
if offset < self.offset:
raise IOError('Negative seek in write mode')
count = offset - self.offset
for i in range(count // 1024):
self.write(1024 * '\0')
self.write((count % 1024) * '\0')
elif self.mode == READ:
if offset < self.offset:
# for negative seek, rewind and do positive seek
self.rewind()
count = offset - self.offset
for i in range(count // 1024):
self.read(1024)
self.read(count % 1024)
return self.offset
def readline(self, size=-1):
if size < 0:
# Shortcut common case - newline found in buffer.
offset = self.offset - self.extrastart
i = self.extrabuf.find('\n', offset) + 1
if i > 0:
self.extrasize -= i - offset
self.offset += i - offset
return self.extrabuf[offset: i]
size = sys.maxint
readsize = self.min_readsize
else:
readsize = size
bufs = []
while size != 0:
c = self.read(readsize)
i = c.find('\n')
# We set i=size to break out of the loop under two
# conditions: 1) there's no newline, and the chunk is
# larger than size, or 2) there is a newline, but the
# resulting line would be longer than 'size'.
if (size <= i) or (i == -1 and len(c) > size):
i = size - 1
if i >= 0 or c == '':
bufs.append(c[:i + 1]) # Add portion of last chunk
self._unread(c[i + 1:]) # Push back rest of chunk
break
# Append chunk to list, decrease 'size',
bufs.append(c)
size = size - len(c)
readsize = min(size, readsize * 2)
if readsize > self.min_readsize:
self.min_readsize = min(readsize, self.min_readsize * 2, 512)
return ''.join(bufs) # Return resulting line
def _test():
# Act like gzip; with -d, act like gunzip.
# The input file is not deleted, however, nor are any other gzip
# options or features supported.
args = sys.argv[1:]
decompress = args and args[0] == "-d"
if decompress:
args = args[1:]
if not args:
args = ["-"]
for arg in args:
if decompress:
if arg == "-":
f = GzipFile(filename="", mode="rb", fileobj=sys.stdin)
g = sys.stdout
else:
if arg[-3:] != ".gz":
print "filename doesn't end in .gz:", repr(arg)
continue
f = open(arg, "rb")
g = __builtin__.open(arg[:-3], "wb")
else:
if arg == "-":
f = sys.stdin
g = GzipFile(filename="", mode="wb", fileobj=sys.stdout)
else:
f = __builtin__.open(arg, "rb")
g = open(arg + ".gz", "wb")
while True:
chunk = f.read(1024)
if not chunk:
break
g.write(chunk)
if g is not sys.stdout:
g.close()
if f is not sys.stdin:
f.close()
if __name__ == '__main__':
_test()
| 35.864
| 79
| 0.553257
|
318416d6293fd3d1f70d972a2d4d4d6131476729
| 10,622
|
py
|
Python
|
src/common/utils.py
|
askme765cs/macro_pack
|
9acfa610e215b4ba5812e48a3b452bdf92a8664f
|
[
"Apache-2.0"
] | 1
|
2021-07-06T23:21:04.000Z
|
2021-07-06T23:21:04.000Z
|
src/common/utils.py
|
askme765cs/macro_pack
|
9acfa610e215b4ba5812e48a3b452bdf92a8664f
|
[
"Apache-2.0"
] | null | null | null |
src/common/utils.py
|
askme765cs/macro_pack
|
9acfa610e215b4ba5812e48a3b452bdf92a8664f
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# encoding: utf-8
from random import choice
import string
import logging
from termcolor import colored
import os, sys
import socket
from collections import OrderedDict
import importlib.util
import psutil
from datetime import datetime
class ColorLogFiler(logging.StreamHandler):
""" Override logging class to enable terminal colors """
def emit(self, record):
try:
msg = self.format(record)
msg = msg.replace("[+]",colored("[+]", "green"))
msg = msg.replace("[-]",colored("[-]", "green"))
msg = msg.replace("[!]",colored("[!]", "red"))
stream = self.stream
stream.write(msg)
stream.write(self.terminator)
self.flush()
except Exception:
self.handleError(record)
def randomAlpha(length):
""" Returns a random alphabetic string of length 'length' """
key = ''
for i in range(length): # @UnusedVariable
key += choice(string.ascii_lowercase)
return key
def extractWordInString(strToParse, index):
""" Exract word (space separated ) at current index"""
i = index
while i!=0 and strToParse[i-1] not in " \t\n&|":
i = i-1
leftPart = strToParse[i:index]
i = index
while i!=len(strToParse) and strToParse[i] not in " \t\n&|":
i = i+1
rightPart = strToParse[index:i]
extractedWord = leftPart+rightPart
#logging.debug(" [-] extracted Word: %s" % extractedWord)
return extractedWord
def extractPreviousWordInString(strToParse, index):
""" Exract the word (space separated ) preceding the one at current index"""
# Look for beginning or word
i = index
if strToParse[i] not in " \t\n":
while i!=0 and strToParse[i-1] not in " \t\n&|":
i = i-1
if i > 2:
while i!=0 and strToParse[i-1] in " \t\n\",;": # Skip spaces nd special char befor previous word
i = i-1
if i > 2:
previousWord = extractWordInString(strToParse, i)
else:
previousWord = ""
logging.debug(" [-] extracted previous Word: %s" % previousWord)
return previousWord
def extractNextWordInString(strToParse, index):
""" Exract the word (space separated ) following the one at current index"""
# Look for beginning or word
i = index
while i!=len(strToParse) and strToParse[i] not in " \t\n&|":
i = i+1
if len(strToParse)-i > 2:
while i!=0 and strToParse[i] in " \t\n\",;": # Skip spaces nd special char befor previous word
i = i+1
if len(strToParse)-i > 2:
nextWord = extractWordInString(strToParse, i)
else:
nextWord = ""
logging.debug(" [-] Extracted next Word: %s" % nextWord)
return nextWord
def getHostIp():
""" returne current facing IP address """
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
# doesn't have to be reachable
s.connect(('10.255.255.255', 1))
IP = s.getsockname()[0]
except:
IP = '127.0.0.1'
finally:
s.close()
return IP
def getRunningApp():
if getattr(sys, 'frozen', False):
return sys.executable
else:
import __main__ as main # @UnresolvedImport To get the real origin of the script not the location of current file
return os.path.abspath(main.__file__)
def checkIfProcessRunning(processName):
'''
Check if there is any running process that contains the given name processName.
'''
#Iterate over the all the running process
for proc in psutil.process_iter():
try:
# Check if process name contains the given name string.
if processName.lower() in proc.name().lower():
return True
except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):
pass
return False;
def yesOrNo(question):
answer = input(question + "(y/n): ").lower().strip()
print("")
while not(answer == "y" or answer == "yes" or \
answer == "n" or answer == "no"):
print("Input yes or no")
answer = input(question + "(y/n):").lower().strip()
print("")
if answer[0] == "y":
return True
else:
return False
def forceProcessKill(processName):
'''
Force kill a process (only work on windows)
'''
os.system("taskkill /f /im %s" % processName)
def checkModuleExist(name):
r"""Returns if a top-level module with :attr:`name` exists *without**
importing it. This is generally safer than try-catch block around a
`import X`. It avoids third party libraries breaking assumptions of some of
our tests, e.g., setting multiprocessing start method when imported
(see librosa/#747, torchvision/#544).
"""
spec = importlib.util.find_spec(name)
return spec is not None
def validateDate(date_text):
try:
if date_text != datetime.strptime(date_text, "%Y-%m-%d").strftime('%Y-%m-%d'):
raise ValueError
return True
except ValueError:
return False
class MPParam():
def __init__(self,name,optional=False):
self.name = name
self.value = ""
self.optional = optional
def getParamValue(paramArray, paramName):
result = ""
i = 0
while i < len(paramArray):
if paramArray[i].name == paramName:
result = paramArray[i].value
break
i += 1
return result
class MSTypes():
XL="Excel"
XL97="Excel97"
WD="Word"
WD97="Word97"
PPT="PowerPoint"
PPT97="PowerPoint97"
MPP = "MSProject"
PUB="Publisher"
VSD="Visio"
VSD97="Visio97"
ACC="Access"
VBA="VBA"
VBS="Visual Basic Script"
HTA="HTML Application"
SCT="Windows Script Component"
WSF="Windows Script File"
LNK="Shell Link"
GLK = "Groove Shortcut"
SCF="Explorer Command File"
XSL="XSLT Stylesheet"
URL="URL Shortcut"
IQY="Excel Web Query"
SETTINGS_MS="Settings Shortcut"
SYLK="SYmbolic LinK"
CHM="Compressed HTML Help"
LIBRARY_MS="MS Library"
INF="Setup Information"
CSPROJ="Visual Studio Project"
CMD="Command line"
EXE="Portable Executable"
DLL="Portable Executable (DLL)"
MSI="Installer"
UNKNOWN = "Unknown"
WORD_AND_EXCEL_FORMATS = [ XL, XL97, WD, WD97]
MS_OFFICE_BASIC_FORMATS = WORD_AND_EXCEL_FORMATS + [PPT]
MS_OFFICE_FORMATS = MS_OFFICE_BASIC_FORMATS + [ MPP, VSD, VSD97, ACC] # Formats supported by macro_pack
VBSCRIPTS_BASIC_FORMATS = [VBS, HTA, SCT, WSF ]
VBSCRIPTS_FORMATS = VBSCRIPTS_BASIC_FORMATS + [XSL]
VB_FORMATS = VBSCRIPTS_FORMATS + MS_OFFICE_FORMATS
Shortcut_FORMATS = [LNK, GLK, SCF, URL, SETTINGS_MS, LIBRARY_MS, INF, IQY, SYLK, CHM, CMD, CSPROJ]
ProMode_FORMATS = [SYLK, CHM]
HtaMacro_FORMATS = [LNK, CHM, INF, SYLK, CSPROJ]
Trojan_FORMATS = MS_OFFICE_BASIC_FORMATS + [MPP, VSD, VSD97,CHM, CSPROJ]
PE_FORMATS = [EXE, DLL]
# OrderedDict([("target_url",None),("download_path",None)])
EXTENSION_DICT = OrderedDict([ (LNK,".lnk"),( GLK,".glk"),( SCF,".scf"),( URL,".url"), (SETTINGS_MS,".SettingContent-ms"),(LIBRARY_MS,".library-ms"),(INF,".inf"),(IQY, ".iqy"),
(SYLK,".slk"),(CHM,".chm"),(CMD,".cmd"),(CSPROJ,".csproj"),
( XL,".xlsm"),( XL97,".xls"),( WD,".docm"),
(WD97,".doc"),( PPT,".pptm"),( PPT97,".ppt"),( MPP,".mpp"),( PUB,".pub"),( VSD,".vsdm"),( VSD97,".vsd"),
(VBA,".vba"),( VBS,".vbs"),( HTA,".hta"),( SCT,".sct"),( WSF,".wsf"),( XSL,".xsl"),( ACC,".accdb"), ( ACC,".mdb" ),
(EXE,".exe"),( DLL,".dll"),(MSI,".msi")])
@classmethod
def guessApplicationType(self, documentPath):
""" Guess MS application type based on extension """
result = ""
extension = os.path.splitext(documentPath)[1]
if ".xls" == extension.lower():
result = self.XL97
elif extension.lower() in (".xlsx", ".xlsm", ".xltm"):
result = self.XL
elif ".doc" == extension.lower():
result = self.WD97
elif extension.lower() in (".docx", ".docm", ".dotm"):
result = self.WD
elif ".hta" == extension.lower():
result = self.HTA
elif ".mpp" == extension.lower():
result = self.MPP
elif ".ppt" == extension.lower():
result = self.PPT97
elif extension.lower() in (".pptx", ".pptm", ".potm"):
result = self.PPT
elif ".vsd" == extension.lower():
result = self.VSD97
elif ".vsdm" == extension.lower() or extension.lower() == ".vsdx":
result = self.VSD
elif extension.lower() in (".accdb", ".accde", ".mdb"):
result = self.ACC
elif ".pub" == extension.lower():
result = self.PUB
elif ".vba" == extension.lower():
result = self.VBA
elif ".vbs" == extension.lower():
result = self.VBS
elif ".sct" == extension.lower() or extension.lower() == ".wsc":
result = self.SCT
elif ".wsf" == extension.lower():
result = self.WSF
elif ".url" == extension.lower():
result = self.URL
elif ".glk" == extension.lower():
result = self.GLK
elif ".lnk" == extension.lower():
result = self.LNK
elif ".settingcontent-ms" == extension.lower():
result = self.SETTINGS_MS
elif ".library-ms" == extension.lower():
result = self.LIBRARY_MS
elif ".inf" == extension.lower():
result = self.INF
elif ".scf" == extension.lower():
result = self.SCF
elif ".xsl" == extension.lower():
result = self.XSL
elif ".iqy" == extension.lower():
result = self.IQY
elif ".slk" == extension.lower():
result = self.SYLK
elif ".chm" == extension.lower():
result = self.CHM
elif ".csproj" == extension.lower():
result = self.CSPROJ
elif ".cmd" == extension.lower() or extension.lower() == ".bat":
result = self.CMD
elif extension.lower() in (".dll", ".ocx"):
result = self.DLL
elif extension.lower() in (".exe"):
result = self.EXE
elif extension.lower() in (".msi"):
result = self.MSI
else:
result = self.UNKNOWN
return result
| 33.19375
| 180
| 0.571267
|
8e978f07f8fdcc773f7964e7ad31a8f52dad9318
| 2,552
|
py
|
Python
|
indico/modules/events/management/controllers/posters.py
|
uxmaster/indico
|
ecd19f17ef6fdc9f5584f59c87ec647319ce5d31
|
[
"MIT"
] | 1
|
2019-11-03T11:34:16.000Z
|
2019-11-03T11:34:16.000Z
|
indico/modules/events/management/controllers/posters.py
|
NP-compete/indico
|
80db7ca0ef9d1f3240a16b9ff2d84bf0bf26c549
|
[
"MIT"
] | null | null | null |
indico/modules/events/management/controllers/posters.py
|
NP-compete/indico
|
80db7ca0ef9d1f3240a16b9ff2d84bf0bf26c549
|
[
"MIT"
] | null | null | null |
# This file is part of Indico.
# Copyright (C) 2002 - 2019 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import unicode_literals
import uuid
from flask import request
from werkzeug.exceptions import Forbidden, NotFound
from indico.legacy.common.cache import GenericCache
from indico.modules.designer.models.templates import DesignerTemplate
from indico.modules.events.management.controllers import RHManageEventBase
from indico.modules.events.management.forms import PosterPrintingForm
from indico.modules.events.posters import PosterPDF
from indico.util.i18n import _
from indico.web.flask.util import send_file, url_for
from indico.web.util import jsonify_data, jsonify_form
poster_cache = GenericCache('poster-printing')
class RHPosterPrintSettings(RHManageEventBase):
ALLOW_LOCKED = True
def _process_args(self):
RHManageEventBase._process_args(self)
self.template_id = request.args.get('template_id')
def _process(self):
self.commit = False
form = PosterPrintingForm(self.event, template=self.template_id)
if form.validate_on_submit():
data = dict(form.data)
template_id = data.pop('template')
key = unicode(uuid.uuid4())
poster_cache.set(key, data, time=1800)
download_url = url_for('.print_poster', self.event, template_id=template_id, uuid=key)
return jsonify_data(flash=False, redirect=download_url, redirect_no_loading=True)
return jsonify_form(form, disabled_until_change=False, back=_('Cancel'), submit=_('Download PDF'))
class RHPrintEventPoster(RHManageEventBase):
def _process_args(self):
RHManageEventBase._process_args(self)
self.template = DesignerTemplate.get_one(request.view_args['template_id'])
def _check_access(self):
RHManageEventBase._check_access(self)
# Check that template belongs to this event or a category that
# is a parent
if self.template.owner != self.event and self.template.owner.id not in self.event.category_chain:
raise Forbidden
def _process(self):
self.commit = False
config_params = poster_cache.get(request.view_args['uuid'])
if not config_params:
raise NotFound
pdf = PosterPDF(self.template, config_params, self.event)
return send_file('Poster-{}.pdf'.format(self.event.id), pdf.get_pdf(), 'application/pdf')
| 36.985507
| 106
| 0.726489
|
730a3dc313c65c3367dce97e29750204d707a860
| 1,105
|
py
|
Python
|
release/stubs.min/Rhino/DocObjects/__init___parts/ObjectColorSource.py
|
htlcnn/ironpython-stubs
|
780d829e2104b2789d5f4d6f32b0ec9f2930ca03
|
[
"MIT"
] | 182
|
2017-06-27T02:26:15.000Z
|
2022-03-30T18:53:43.000Z
|
release/stubs.min/Rhino/DocObjects/__init___parts/ObjectColorSource.py
|
htlcnn/ironpython-stubs
|
780d829e2104b2789d5f4d6f32b0ec9f2930ca03
|
[
"MIT"
] | 28
|
2017-06-27T13:38:23.000Z
|
2022-03-15T11:19:44.000Z
|
release/stubs.min/Rhino/DocObjects/__init___parts/ObjectColorSource.py
|
htlcnn/ironpython-stubs
|
780d829e2104b2789d5f4d6f32b0ec9f2930ca03
|
[
"MIT"
] | 67
|
2017-06-28T09:43:59.000Z
|
2022-03-20T21:17:10.000Z
|
class ObjectColorSource(Enum,IComparable,IFormattable,IConvertible):
"""
Defines enumerated values for the source of display color of single objects.
enum ObjectColorSource,values: ColorFromLayer (0),ColorFromMaterial (2),ColorFromObject (1),ColorFromParent (3)
"""
def __eq__(self,*args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self,*args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __ge__(self,*args):
pass
def __gt__(self,*args):
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self,*args):
pass
def __lt__(self,*args):
pass
def __ne__(self,*args):
pass
def __reduce_ex__(self,*args):
pass
def __str__(self,*args):
pass
ColorFromLayer=None
ColorFromMaterial=None
ColorFromObject=None
ColorFromParent=None
value__=None
| 29.078947
| 215
| 0.690498
|
2a40bfe70437e87b8b15ef3913df36cf9873835a
| 15,136
|
py
|
Python
|
homeassistant/components/google/__init__.py
|
liangleslie/core
|
cc807b4d597daaaadc92df4a93c6e30da4f570c6
|
[
"Apache-2.0"
] | null | null | null |
homeassistant/components/google/__init__.py
|
liangleslie/core
|
cc807b4d597daaaadc92df4a93c6e30da4f570c6
|
[
"Apache-2.0"
] | 20
|
2021-11-03T06:22:03.000Z
|
2022-03-31T06:21:17.000Z
|
homeassistant/components/google/__init__.py
|
liangleslie/core
|
cc807b4d597daaaadc92df4a93c6e30da4f570c6
|
[
"Apache-2.0"
] | null | null | null |
"""Support for Google - Calendar Event Devices."""
from __future__ import annotations
import asyncio
from collections.abc import Mapping
from datetime import datetime, timedelta
import logging
from typing import Any
import aiohttp
from gcal_sync.api import GoogleCalendarService
from gcal_sync.exceptions import ApiException
from gcal_sync.model import Calendar, DateOrDatetime, Event
from oauth2client.file import Storage
import voluptuous as vol
from voluptuous.error import Error as VoluptuousError
import yaml
from homeassistant import config_entries
from homeassistant.components.application_credentials import (
ClientCredential,
async_import_client_credential,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
CONF_CLIENT_ID,
CONF_CLIENT_SECRET,
CONF_DEVICE_ID,
CONF_ENTITIES,
CONF_NAME,
CONF_OFFSET,
)
from homeassistant.core import HomeAssistant, ServiceCall
from homeassistant.exceptions import (
ConfigEntryAuthFailed,
ConfigEntryNotReady,
HomeAssistantError,
)
from homeassistant.helpers import config_entry_oauth2_flow
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.helpers.entity import generate_entity_id
from homeassistant.helpers.typing import ConfigType
from .api import ApiAuthImpl, get_feature_access
from .const import (
CONF_CALENDAR_ACCESS,
DATA_CONFIG,
DATA_SERVICE,
DEVICE_AUTH_IMPL,
DISCOVER_CALENDAR,
DOMAIN,
FeatureAccess,
)
_LOGGER = logging.getLogger(__name__)
ENTITY_ID_FORMAT = DOMAIN + ".{}"
CONF_TRACK_NEW = "track_new_calendar"
CONF_CAL_ID = "cal_id"
CONF_TRACK = "track"
CONF_SEARCH = "search"
CONF_IGNORE_AVAILABILITY = "ignore_availability"
CONF_MAX_RESULTS = "max_results"
DEFAULT_CONF_OFFSET = "!!"
EVENT_CALENDAR_ID = "calendar_id"
EVENT_DESCRIPTION = "description"
EVENT_END_CONF = "end"
EVENT_END_DATE = "end_date"
EVENT_END_DATETIME = "end_date_time"
EVENT_IN = "in"
EVENT_IN_DAYS = "days"
EVENT_IN_WEEKS = "weeks"
EVENT_START_CONF = "start"
EVENT_START_DATE = "start_date"
EVENT_START_DATETIME = "start_date_time"
EVENT_SUMMARY = "summary"
EVENT_TYPES_CONF = "event_types"
NOTIFICATION_ID = "google_calendar_notification"
NOTIFICATION_TITLE = "Google Calendar Setup"
GROUP_NAME_ALL_CALENDARS = "Google Calendar Sensors"
SERVICE_SCAN_CALENDARS = "scan_for_calendars"
SERVICE_ADD_EVENT = "add_event"
YAML_DEVICES = f"{DOMAIN}_calendars.yaml"
TOKEN_FILE = f".{DOMAIN}.token"
PLATFORMS = ["calendar"]
CONFIG_SCHEMA = vol.Schema(
vol.All(
cv.deprecated(DOMAIN),
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_CLIENT_ID): cv.string,
vol.Required(CONF_CLIENT_SECRET): cv.string,
vol.Optional(CONF_TRACK_NEW, default=True): cv.boolean,
vol.Optional(CONF_CALENDAR_ACCESS, default="read_write"): cv.enum(
FeatureAccess
),
}
)
},
),
extra=vol.ALLOW_EXTRA,
)
_SINGLE_CALSEARCH_CONFIG = vol.All(
cv.deprecated(CONF_MAX_RESULTS),
vol.Schema(
{
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_DEVICE_ID): cv.string,
vol.Optional(CONF_IGNORE_AVAILABILITY, default=True): cv.boolean,
vol.Optional(CONF_OFFSET): cv.string,
vol.Optional(CONF_SEARCH): cv.string,
vol.Optional(CONF_TRACK): cv.boolean,
vol.Optional(CONF_MAX_RESULTS): cv.positive_int, # Now unused
}
),
)
DEVICE_SCHEMA = vol.Schema(
{
vol.Required(CONF_CAL_ID): cv.string,
vol.Required(CONF_ENTITIES, None): vol.All(
cv.ensure_list, [_SINGLE_CALSEARCH_CONFIG]
),
},
extra=vol.ALLOW_EXTRA,
)
_EVENT_IN_TYPES = vol.Schema(
{
vol.Exclusive(EVENT_IN_DAYS, EVENT_TYPES_CONF): cv.positive_int,
vol.Exclusive(EVENT_IN_WEEKS, EVENT_TYPES_CONF): cv.positive_int,
}
)
ADD_EVENT_SERVICE_SCHEMA = vol.Schema(
{
vol.Required(EVENT_CALENDAR_ID): cv.string,
vol.Required(EVENT_SUMMARY): cv.string,
vol.Optional(EVENT_DESCRIPTION, default=""): cv.string,
vol.Exclusive(EVENT_START_DATE, EVENT_START_CONF): cv.date,
vol.Exclusive(EVENT_END_DATE, EVENT_END_CONF): cv.date,
vol.Exclusive(EVENT_START_DATETIME, EVENT_START_CONF): cv.datetime,
vol.Exclusive(EVENT_END_DATETIME, EVENT_END_CONF): cv.datetime,
vol.Exclusive(EVENT_IN, EVENT_START_CONF, EVENT_END_CONF): _EVENT_IN_TYPES,
}
)
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Set up the Google component."""
if DOMAIN not in config:
return True
conf = config.get(DOMAIN, {})
hass.data[DOMAIN] = {DATA_CONFIG: conf}
if CONF_CLIENT_ID in conf and CONF_CLIENT_SECRET in conf:
await async_import_client_credential(
hass,
DOMAIN,
ClientCredential(
conf[CONF_CLIENT_ID],
conf[CONF_CLIENT_SECRET],
),
DEVICE_AUTH_IMPL,
)
# Import credentials from the old token file into the new way as
# a ConfigEntry managed by home assistant.
storage = Storage(hass.config.path(TOKEN_FILE))
creds = await hass.async_add_executor_job(storage.get)
if creds and get_feature_access(hass).scope in creds.scopes:
_LOGGER.debug("Importing configuration entry with credentials")
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data={
"creds": creds,
},
)
)
_LOGGER.warning(
"Configuration of Google Calendar in YAML in configuration.yaml is "
"is deprecated and will be removed in a future release; Your existing "
"OAuth Application Credentials and access settings have been imported "
"into the UI automatically and can be safely removed from your "
"configuration.yaml file"
)
if conf.get(CONF_TRACK_NEW) is False:
# The track_new as False would previously result in new entries
# in google_calendars.yaml with track set to Fasle which is
# handled at calendar entity creation time.
_LOGGER.warning(
"You must manually set the integration System Options in the "
"UI to disable newly discovered entities going forward"
)
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up Google from a config entry."""
hass.data.setdefault(DOMAIN, {})
implementation = (
await config_entry_oauth2_flow.async_get_config_entry_implementation(
hass, entry
)
)
session = config_entry_oauth2_flow.OAuth2Session(hass, entry, implementation)
# Force a token refresh to fix a bug where tokens were persisted with
# expires_in (relative time delta) and expires_at (absolute time) swapped.
# A google session token typically only lasts a few days between refresh.
now = datetime.now()
if session.token["expires_at"] >= (now + timedelta(days=365)).timestamp():
session.token["expires_in"] = 0
session.token["expires_at"] = now.timestamp()
try:
await session.async_ensure_token_valid()
except aiohttp.ClientResponseError as err:
if 400 <= err.status < 500:
raise ConfigEntryAuthFailed from err
raise ConfigEntryNotReady from err
except aiohttp.ClientError as err:
raise ConfigEntryNotReady from err
if not async_entry_has_scopes(hass, entry):
raise ConfigEntryAuthFailed(
"Required scopes are not available, reauth required"
)
calendar_service = GoogleCalendarService(
ApiAuthImpl(async_get_clientsession(hass), session)
)
hass.data[DOMAIN][DATA_SERVICE] = calendar_service
await async_setup_services(hass, calendar_service)
# Only expose the add event service if we have the correct permissions
if get_feature_access(hass, entry) is FeatureAccess.read_write:
await async_setup_add_event_service(hass, calendar_service)
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
entry.async_on_unload(entry.add_update_listener(async_reload_entry))
return True
def async_entry_has_scopes(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Verify that the config entry desired scope is present in the oauth token."""
access = get_feature_access(hass, entry)
token_scopes = entry.data.get("token", {}).get("scope", [])
return access.scope in token_scopes
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
return await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
async def async_reload_entry(hass: HomeAssistant, entry: ConfigEntry) -> None:
"""Reload config entry if the access options change."""
if not async_entry_has_scopes(hass, entry):
await hass.config_entries.async_reload(entry.entry_id)
async def async_setup_services(
hass: HomeAssistant,
calendar_service: GoogleCalendarService,
) -> None:
"""Set up the service listeners."""
calendars = await hass.async_add_executor_job(
load_config, hass.config.path(YAML_DEVICES)
)
calendars_file_lock = asyncio.Lock()
async def _found_calendar(calendar_item: Calendar) -> None:
calendar = get_calendar_info(
hass,
calendar_item.dict(exclude_unset=True),
)
calendar_id = calendar_item.id
# If the google_calendars.yaml file already exists, populate it for
# backwards compatibility, but otherwise do not create it if it does
# not exist.
if calendars:
if calendar_id not in calendars:
calendars[calendar_id] = calendar
async with calendars_file_lock:
await hass.async_add_executor_job(
update_config, hass.config.path(YAML_DEVICES), calendar
)
else:
# Prefer entity/name information from yaml, overriding api
calendar = calendars[calendar_id]
async_dispatcher_send(hass, DISCOVER_CALENDAR, calendar)
created_calendars = set()
async def _scan_for_calendars(call: ServiceCall) -> None:
"""Scan for new calendars."""
try:
result = await calendar_service.async_list_calendars()
except ApiException as err:
raise HomeAssistantError(str(err)) from err
tasks = []
for calendar_item in result.items:
if calendar_item.id in created_calendars:
continue
created_calendars.add(calendar_item.id)
tasks.append(_found_calendar(calendar_item))
await asyncio.gather(*tasks)
hass.services.async_register(DOMAIN, SERVICE_SCAN_CALENDARS, _scan_for_calendars)
async def async_setup_add_event_service(
hass: HomeAssistant,
calendar_service: GoogleCalendarService,
) -> None:
"""Add the service to add events."""
async def _add_event(call: ServiceCall) -> None:
"""Add a new event to calendar."""
start: DateOrDatetime | None = None
end: DateOrDatetime | None = None
if EVENT_IN in call.data:
if EVENT_IN_DAYS in call.data[EVENT_IN]:
now = datetime.now()
start_in = now + timedelta(days=call.data[EVENT_IN][EVENT_IN_DAYS])
end_in = start_in + timedelta(days=1)
start = DateOrDatetime(date=start_in)
end = DateOrDatetime(date=end_in)
elif EVENT_IN_WEEKS in call.data[EVENT_IN]:
now = datetime.now()
start_in = now + timedelta(weeks=call.data[EVENT_IN][EVENT_IN_WEEKS])
end_in = start_in + timedelta(days=1)
start = DateOrDatetime(date=start_in)
end = DateOrDatetime(date=end_in)
elif EVENT_START_DATE in call.data:
start = DateOrDatetime(date=call.data[EVENT_START_DATE])
end = DateOrDatetime(date=call.data[EVENT_END_DATE])
elif EVENT_START_DATETIME in call.data:
start_dt = call.data[EVENT_START_DATETIME]
end_dt = call.data[EVENT_END_DATETIME]
start = DateOrDatetime(
date_time=start_dt, timezone=str(hass.config.time_zone)
)
end = DateOrDatetime(date_time=end_dt, timezone=str(hass.config.time_zone))
if start is None or end is None:
raise ValueError(
"Missing required fields to set start or end date/datetime"
)
await calendar_service.async_create_event(
call.data[EVENT_CALENDAR_ID],
Event(
summary=call.data[EVENT_SUMMARY],
description=call.data[EVENT_DESCRIPTION],
start=start,
end=end,
),
)
hass.services.async_register(
DOMAIN, SERVICE_ADD_EVENT, _add_event, schema=ADD_EVENT_SERVICE_SCHEMA
)
def get_calendar_info(
hass: HomeAssistant, calendar: Mapping[str, Any]
) -> dict[str, Any]:
"""Convert data from Google into DEVICE_SCHEMA."""
calendar_info: dict[str, Any] = DEVICE_SCHEMA(
{
CONF_CAL_ID: calendar["id"],
CONF_ENTITIES: [
{
CONF_NAME: calendar["summary"],
CONF_DEVICE_ID: generate_entity_id(
"{}", calendar["summary"], hass=hass
),
}
],
}
)
return calendar_info
def load_config(path: str) -> dict[str, Any]:
"""Load the google_calendar_devices.yaml."""
calendars = {}
try:
with open(path, encoding="utf8") as file:
data = yaml.safe_load(file)
for calendar in data:
try:
calendars.update({calendar[CONF_CAL_ID]: DEVICE_SCHEMA(calendar)})
except VoluptuousError as exception:
# keep going
_LOGGER.warning("Calendar Invalid Data: %s", exception)
except FileNotFoundError as err:
_LOGGER.debug("Error reading calendar configuration: %s", err)
# When YAML file could not be loaded/did not contain a dict
return {}
return calendars
def update_config(path: str, calendar: dict[str, Any]) -> None:
"""Write the google_calendar_devices.yaml."""
try:
with open(path, "a", encoding="utf8") as out:
out.write("\n")
yaml.dump([calendar], out, default_flow_style=False)
except FileNotFoundError as err:
_LOGGER.debug("Error persisting calendar configuration: %s", err)
| 34.244344
| 87
| 0.663385
|
301c568f2b9e3791041a5b9192a5a3ee48a51f41
| 51,038
|
py
|
Python
|
intelhex/__init__.py
|
debian-janitor/pkg-intelhex
|
65fd28427f2c3d699a60b766d646d5d61719a79a
|
[
"BSD-3-Clause"
] | null | null | null |
intelhex/__init__.py
|
debian-janitor/pkg-intelhex
|
65fd28427f2c3d699a60b766d646d5d61719a79a
|
[
"BSD-3-Clause"
] | 1
|
2021-08-31T22:17:27.000Z
|
2021-08-31T22:17:27.000Z
|
intelhex/__init__.py
|
debian-janitor/pkg-intelhex
|
65fd28427f2c3d699a60b766d646d5d61719a79a
|
[
"BSD-3-Clause"
] | 1
|
2020-12-17T21:30:20.000Z
|
2020-12-17T21:30:20.000Z
|
# Copyright (c) 2005-2016, Alexander Belchenko
# All rights reserved.
#
# Redistribution and use in source and binary forms,
# with or without modification, are permitted provided
# that the following conditions are met:
#
# * Redistributions of source code must retain
# the above copyright notice, this list of conditions
# and the following disclaimer.
# * Redistributions in binary form must reproduce
# the above copyright notice, this list of conditions
# and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the author nor the names
# of its contributors may be used to endorse
# or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
# BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''Intel HEX format manipulation library.'''
__docformat__ = "javadoc"
from array import array
from binascii import hexlify, unhexlify
from bisect import bisect_right
import os
import sys
from intelhex.compat import (
IntTypes,
StrType,
StringIO,
array_tobytes,
asbytes,
asstr,
dict_items_g,
dict_keys,
dict_keys_g,
range_g,
range_l,
)
from intelhex.getsizeof import total_size
class _DeprecatedParam(object):
pass
_DEPRECATED = _DeprecatedParam()
class IntelHex(object):
''' Intel HEX file reader. '''
def __init__(self, source=None):
''' Constructor. If source specified, object will be initialized
with the contents of source. Otherwise the object will be empty.
@param source source for initialization
(file name of HEX file, file object, addr dict or
other IntelHex object)
'''
# public members
self.padding = 0x0FF
# Start Address
self.start_addr = None
# private members
self._buf = {}
self._offset = 0
if source is not None:
if isinstance(source, StrType) or getattr(source, "read", None):
# load hex file
self.loadhex(source)
elif isinstance(source, dict):
self.fromdict(source)
elif isinstance(source, IntelHex):
self.padding = source.padding
if source.start_addr:
self.start_addr = source.start_addr.copy()
self._buf = source._buf.copy()
else:
raise ValueError("source: bad initializer type")
def _decode_record(self, s, line=0):
'''Decode one record of HEX file.
@param s line with HEX record.
@param line line number (for error messages).
@raise EndOfFile if EOF record encountered.
'''
s = s.rstrip('\r\n')
if not s:
return # empty line
if s[0] == ':':
try:
bin = array('B', unhexlify(asbytes(s[1:])))
except (TypeError, ValueError):
# this might be raised by unhexlify when odd hexascii digits
raise HexRecordError(line=line)
length = len(bin)
if length < 5:
raise HexRecordError(line=line)
else:
raise HexRecordError(line=line)
record_length = bin[0]
if length != (5 + record_length):
raise RecordLengthError(line=line)
addr = bin[1]*256 + bin[2]
record_type = bin[3]
if not (0 <= record_type <= 5):
raise RecordTypeError(line=line)
crc = sum(bin)
crc &= 0x0FF
if crc != 0:
raise RecordChecksumError(line=line)
if record_type == 0:
# data record
addr += self._offset
for i in range_g(4, 4+record_length):
if not self._buf.get(addr, None) is None:
raise AddressOverlapError(address=addr, line=line)
self._buf[addr] = bin[i]
addr += 1 # FIXME: addr should be wrapped
# BUT after 02 record (at 64K boundary)
# and after 04 record (at 4G boundary)
elif record_type == 1:
# end of file record
if record_length != 0:
raise EOFRecordError(line=line)
raise _EndOfFile
elif record_type == 2:
# Extended 8086 Segment Record
if record_length != 2 or addr != 0:
raise ExtendedSegmentAddressRecordError(line=line)
self._offset = (bin[4]*256 + bin[5]) * 16
elif record_type == 4:
# Extended Linear Address Record
if record_length != 2 or addr != 0:
raise ExtendedLinearAddressRecordError(line=line)
self._offset = (bin[4]*256 + bin[5]) * 65536
elif record_type == 3:
# Start Segment Address Record
if record_length != 4 or addr != 0:
raise StartSegmentAddressRecordError(line=line)
if self.start_addr:
raise DuplicateStartAddressRecordError(line=line)
self.start_addr = {'CS': bin[4]*256 + bin[5],
'IP': bin[6]*256 + bin[7],
}
elif record_type == 5:
# Start Linear Address Record
if record_length != 4 or addr != 0:
raise StartLinearAddressRecordError(line=line)
if self.start_addr:
raise DuplicateStartAddressRecordError(line=line)
self.start_addr = {'EIP': (bin[4]*16777216 +
bin[5]*65536 +
bin[6]*256 +
bin[7]),
}
def loadhex(self, fobj):
"""Load hex file into internal buffer. This is not necessary
if object was initialized with source set. This will overwrite
addresses if object was already initialized.
@param fobj file name or file-like object
"""
if getattr(fobj, "read", None) is None:
fobj = open(fobj, "r")
fclose = fobj.close
else:
fclose = None
self._offset = 0
line = 0
try:
decode = self._decode_record
try:
for s in fobj:
line += 1
decode(s, line)
except _EndOfFile:
pass
finally:
if fclose:
fclose()
def loadbin(self, fobj, offset=0):
"""Load bin file into internal buffer. Not needed if source set in
constructor. This will overwrite addresses without warning
if object was already initialized.
@param fobj file name or file-like object
@param offset starting address offset
"""
fread = getattr(fobj, "read", None)
if fread is None:
f = open(fobj, "rb")
fread = f.read
fclose = f.close
else:
fclose = None
try:
self.frombytes(array('B', asbytes(fread())), offset=offset)
finally:
if fclose:
fclose()
def loadfile(self, fobj, format):
"""Load data file into internal buffer. Preferred wrapper over
loadbin or loadhex.
@param fobj file name or file-like object
@param format file format ("hex" or "bin")
"""
if format == "hex":
self.loadhex(fobj)
elif format == "bin":
self.loadbin(fobj)
else:
raise ValueError('format should be either "hex" or "bin";'
' got %r instead' % format)
# alias (to be consistent with method tofile)
fromfile = loadfile
def fromdict(self, dikt):
"""Load data from dictionary. Dictionary should contain int keys
representing addresses. Values should be the data to be stored in
those addresses in unsigned char form (i.e. not strings).
The dictionary may contain the key, ``start_addr``
to indicate the starting address of the data as described in README.
The contents of the dict will be merged with this object and will
overwrite any conflicts. This function is not necessary if the
object was initialized with source specified.
"""
s = dikt.copy()
start_addr = s.get('start_addr')
if start_addr is not None:
del s['start_addr']
for k in dict_keys_g(s):
if type(k) not in IntTypes or k < 0:
raise ValueError('Source dictionary should have only int keys')
self._buf.update(s)
if start_addr is not None:
self.start_addr = start_addr
def frombytes(self, bytes, offset=0):
"""Load data from array or list of bytes.
Similar to loadbin() method but works directly with iterable bytes.
"""
for b in bytes:
self._buf[offset] = b
offset += 1
def _get_start_end(self, start=None, end=None, size=None):
"""Return default values for start and end if they are None.
If this IntelHex object is empty then it's error to
invoke this method with both start and end as None.
"""
if (start,end) == (None,None) and self._buf == {}:
raise EmptyIntelHexError
if size is not None:
if None not in (start, end):
raise ValueError("tobinarray: you can't use start,end and size"
" arguments in the same time")
if (start, end) == (None, None):
start = self.minaddr()
if start is not None:
end = start + size - 1
else:
start = end - size + 1
if start < 0:
raise ValueError("tobinarray: invalid size (%d) "
"for given end address (%d)" % (size,end))
else:
if start is None:
start = self.minaddr()
if end is None:
end = self.maxaddr()
if start > end:
start, end = end, start
return start, end
def tobinarray(self, start=None, end=None, pad=_DEPRECATED, size=None):
''' Convert this object to binary form as array. If start and end
unspecified, they will be inferred from the data.
@param start start address of output bytes.
@param end end address of output bytes (inclusive).
@param pad [DEPRECATED PARAMETER, please use self.padding instead]
fill empty spaces with this value
(if pad is None then this method uses self.padding).
@param size size of the block, used with start or end parameter.
@return array of unsigned char data.
'''
if not isinstance(pad, _DeprecatedParam):
print ("IntelHex.tobinarray: 'pad' parameter is deprecated.")
if pad is not None:
print ("Please, use IntelHex.padding attribute instead.")
else:
print ("Please, don't pass it explicitly.")
print ("Use syntax like this: ih.tobinarray(start=xxx, end=yyy, size=zzz)")
else:
pad = None
return self._tobinarray_really(start, end, pad, size)
def _tobinarray_really(self, start, end, pad, size):
"""Return binary array."""
if pad is None:
pad = self.padding
bin = array('B')
if self._buf == {} and None in (start, end):
return bin
if size is not None and size <= 0:
raise ValueError("tobinarray: wrong value for size")
start, end = self._get_start_end(start, end, size)
for i in range_g(start, end+1):
bin.append(self._buf.get(i, pad))
return bin
def tobinstr(self, start=None, end=None, pad=_DEPRECATED, size=None):
''' Convert to binary form and return as binary string.
@param start start address of output bytes.
@param end end address of output bytes (inclusive).
@param pad [DEPRECATED PARAMETER, please use self.padding instead]
fill empty spaces with this value
(if pad is None then this method uses self.padding).
@param size size of the block, used with start or end parameter.
@return bytes string of binary data.
'''
if not isinstance(pad, _DeprecatedParam):
print ("IntelHex.tobinstr: 'pad' parameter is deprecated.")
if pad is not None:
print ("Please, use IntelHex.padding attribute instead.")
else:
print ("Please, don't pass it explicitly.")
print ("Use syntax like this: ih.tobinstr(start=xxx, end=yyy, size=zzz)")
else:
pad = None
return self._tobinstr_really(start, end, pad, size)
def _tobinstr_really(self, start, end, pad, size):
return array_tobytes(self._tobinarray_really(start, end, pad, size))
def tobinfile(self, fobj, start=None, end=None, pad=_DEPRECATED, size=None):
'''Convert to binary and write to file.
@param fobj file name or file object for writing output bytes.
@param start start address of output bytes.
@param end end address of output bytes (inclusive).
@param pad [DEPRECATED PARAMETER, please use self.padding instead]
fill empty spaces with this value
(if pad is None then this method uses self.padding).
@param size size of the block, used with start or end parameter.
'''
if not isinstance(pad, _DeprecatedParam):
print ("IntelHex.tobinfile: 'pad' parameter is deprecated.")
if pad is not None:
print ("Please, use IntelHex.padding attribute instead.")
else:
print ("Please, don't pass it explicitly.")
print ("Use syntax like this: ih.tobinfile(start=xxx, end=yyy, size=zzz)")
else:
pad = None
if getattr(fobj, "write", None) is None:
fobj = open(fobj, "wb")
close_fd = True
else:
close_fd = False
fobj.write(self._tobinstr_really(start, end, pad, size))
if close_fd:
fobj.close()
def todict(self):
'''Convert to python dictionary.
@return dict suitable for initializing another IntelHex object.
'''
r = {}
r.update(self._buf)
if self.start_addr:
r['start_addr'] = self.start_addr
return r
def addresses(self):
'''Returns all used addresses in sorted order.
@return list of occupied data addresses in sorted order.
'''
aa = dict_keys(self._buf)
aa.sort()
return aa
def minaddr(self):
'''Get minimal address of HEX content.
@return minimal address or None if no data
'''
aa = dict_keys(self._buf)
if aa == []:
return None
else:
return min(aa)
def maxaddr(self):
'''Get maximal address of HEX content.
@return maximal address or None if no data
'''
aa = dict_keys(self._buf)
if aa == []:
return None
else:
return max(aa)
def __getitem__(self, addr):
''' Get requested byte from address.
@param addr address of byte.
@return byte if address exists in HEX file, or self.padding
if no data found.
'''
t = type(addr)
if t in IntTypes:
if addr < 0:
raise TypeError('Address should be >= 0.')
return self._buf.get(addr, self.padding)
elif t == slice:
addresses = dict_keys(self._buf)
ih = IntelHex()
if addresses:
addresses.sort()
start = addr.start or addresses[0]
stop = addr.stop or (addresses[-1]+1)
step = addr.step or 1
for i in range_g(start, stop, step):
x = self._buf.get(i)
if x is not None:
ih[i] = x
return ih
else:
raise TypeError('Address has unsupported type: %s' % t)
def __setitem__(self, addr, byte):
"""Set byte at address."""
t = type(addr)
if t in IntTypes:
if addr < 0:
raise TypeError('Address should be >= 0.')
self._buf[addr] = byte
elif t == slice:
if not isinstance(byte, (list, tuple)):
raise ValueError('Slice operation expects sequence of bytes')
start = addr.start
stop = addr.stop
step = addr.step or 1
if None not in (start, stop):
ra = range_l(start, stop, step)
if len(ra) != len(byte):
raise ValueError('Length of bytes sequence does not match '
'address range')
elif (start, stop) == (None, None):
raise TypeError('Unsupported address range')
elif start is None:
start = stop - len(byte)
elif stop is None:
stop = start + len(byte)
if start < 0:
raise TypeError('start address cannot be negative')
if stop < 0:
raise TypeError('stop address cannot be negative')
j = 0
for i in range_g(start, stop, step):
self._buf[i] = byte[j]
j += 1
else:
raise TypeError('Address has unsupported type: %s' % t)
def __delitem__(self, addr):
"""Delete byte at address."""
t = type(addr)
if t in IntTypes:
if addr < 0:
raise TypeError('Address should be >= 0.')
del self._buf[addr]
elif t == slice:
addresses = dict_keys(self._buf)
if addresses:
addresses.sort()
start = addr.start or addresses[0]
stop = addr.stop or (addresses[-1]+1)
step = addr.step or 1
for i in range_g(start, stop, step):
x = self._buf.get(i)
if x is not None:
del self._buf[i]
else:
raise TypeError('Address has unsupported type: %s' % t)
def __len__(self):
"""Return count of bytes with real values."""
return len(dict_keys(self._buf))
def _get_eol_textfile(eolstyle, platform):
if eolstyle == 'native':
return '\n'
elif eolstyle == 'CRLF':
if platform != 'win32':
return '\r\n'
else:
return '\n'
else:
raise ValueError("wrong eolstyle %s" % repr(eolstyle))
_get_eol_textfile = staticmethod(_get_eol_textfile)
def write_hex_file(self, f, write_start_addr=True, eolstyle='native'):
"""Write data to file f in HEX format.
@param f filename or file-like object for writing
@param write_start_addr enable or disable writing start address
record to file (enabled by default).
If there is no start address in obj, nothing
will be written regardless of this setting.
@param eolstyle can be used to force CRLF line-endings
for output file on different platforms.
Supported eol styles: 'native', 'CRLF'.
"""
fwrite = getattr(f, "write", None)
if fwrite:
fobj = f
fclose = None
else:
fobj = open(f, 'w')
fwrite = fobj.write
fclose = fobj.close
eol = IntelHex._get_eol_textfile(eolstyle, sys.platform)
# Translation table for uppercasing hex ascii string.
# timeit shows that using hexstr.translate(table)
# is faster than hexstr.upper():
# 0.452ms vs. 0.652ms (translate vs. upper)
if sys.version_info[0] >= 3:
# Python 3
table = bytes(range_l(256)).upper()
else:
# Python 2
table = ''.join(chr(i).upper() for i in range_g(256))
# start address record if any
if self.start_addr and write_start_addr:
keys = dict_keys(self.start_addr)
keys.sort()
bin = array('B', asbytes('\0'*9))
if keys == ['CS','IP']:
# Start Segment Address Record
bin[0] = 4 # reclen
bin[1] = 0 # offset msb
bin[2] = 0 # offset lsb
bin[3] = 3 # rectyp
cs = self.start_addr['CS']
bin[4] = (cs >> 8) & 0x0FF
bin[5] = cs & 0x0FF
ip = self.start_addr['IP']
bin[6] = (ip >> 8) & 0x0FF
bin[7] = ip & 0x0FF
bin[8] = (-sum(bin)) & 0x0FF # chksum
fwrite(':' +
asstr(hexlify(array_tobytes(bin)).translate(table)) +
eol)
elif keys == ['EIP']:
# Start Linear Address Record
bin[0] = 4 # reclen
bin[1] = 0 # offset msb
bin[2] = 0 # offset lsb
bin[3] = 5 # rectyp
eip = self.start_addr['EIP']
bin[4] = (eip >> 24) & 0x0FF
bin[5] = (eip >> 16) & 0x0FF
bin[6] = (eip >> 8) & 0x0FF
bin[7] = eip & 0x0FF
bin[8] = (-sum(bin)) & 0x0FF # chksum
fwrite(':' +
asstr(hexlify(array_tobytes(bin)).translate(table)) +
eol)
else:
if fclose:
fclose()
raise InvalidStartAddressValueError(start_addr=self.start_addr)
# data
addresses = dict_keys(self._buf)
addresses.sort()
addr_len = len(addresses)
if addr_len:
minaddr = addresses[0]
maxaddr = addresses[-1]
if maxaddr > 65535:
need_offset_record = True
else:
need_offset_record = False
high_ofs = 0
cur_addr = minaddr
cur_ix = 0
while cur_addr <= maxaddr:
if need_offset_record:
bin = array('B', asbytes('\0'*7))
bin[0] = 2 # reclen
bin[1] = 0 # offset msb
bin[2] = 0 # offset lsb
bin[3] = 4 # rectyp
high_ofs = int(cur_addr>>16)
b = divmod(high_ofs, 256)
bin[4] = b[0] # msb of high_ofs
bin[5] = b[1] # lsb of high_ofs
bin[6] = (-sum(bin)) & 0x0FF # chksum
fwrite(':' +
asstr(hexlify(array_tobytes(bin)).translate(table)) +
eol)
while True:
# produce one record
low_addr = cur_addr & 0x0FFFF
# chain_len off by 1
chain_len = min(15, 65535-low_addr, maxaddr-cur_addr)
# search continuous chain
stop_addr = cur_addr + chain_len
if chain_len:
ix = bisect_right(addresses, stop_addr,
cur_ix,
min(cur_ix+chain_len+1, addr_len))
chain_len = ix - cur_ix # real chain_len
# there could be small holes in the chain
# but we will catch them by try-except later
# so for big continuous files we will work
# at maximum possible speed
else:
chain_len = 1 # real chain_len
bin = array('B', asbytes('\0'*(5+chain_len)))
b = divmod(low_addr, 256)
bin[1] = b[0] # msb of low_addr
bin[2] = b[1] # lsb of low_addr
bin[3] = 0 # rectype
try: # if there is small holes we'll catch them
for i in range_g(chain_len):
bin[4+i] = self._buf[cur_addr+i]
except KeyError:
# we catch a hole so we should shrink the chain
chain_len = i
bin = bin[:5+i]
bin[0] = chain_len
bin[4+chain_len] = (-sum(bin)) & 0x0FF # chksum
fwrite(':' +
asstr(hexlify(array_tobytes(bin)).translate(table)) +
eol)
# adjust cur_addr/cur_ix
cur_ix += chain_len
if cur_ix < addr_len:
cur_addr = addresses[cur_ix]
else:
cur_addr = maxaddr + 1
break
high_addr = int(cur_addr>>16)
if high_addr > high_ofs:
break
# end-of-file record
fwrite(":00000001FF"+eol)
if fclose:
fclose()
def tofile(self, fobj, format):
"""Write data to hex or bin file. Preferred method over tobin or tohex.
@param fobj file name or file-like object
@param format file format ("hex" or "bin")
"""
if format == 'hex':
self.write_hex_file(fobj)
elif format == 'bin':
self.tobinfile(fobj)
else:
raise ValueError('format should be either "hex" or "bin";'
' got %r instead' % format)
def gets(self, addr, length):
"""Get string of bytes from given address. If any entries are blank
from addr through addr+length, a NotEnoughDataError exception will
be raised. Padding is not used.
"""
a = array('B', asbytes('\0'*length))
try:
for i in range_g(length):
a[i] = self._buf[addr+i]
except KeyError:
raise NotEnoughDataError(address=addr, length=length)
return array_tobytes(a)
def puts(self, addr, s):
"""Put string of bytes at given address. Will overwrite any previous
entries.
"""
a = array('B', asbytes(s))
for i in range_g(len(a)):
self._buf[addr+i] = a[i]
def getsz(self, addr):
"""Get zero-terminated bytes string from given address. Will raise
NotEnoughDataError exception if a hole is encountered before a 0.
"""
i = 0
try:
while True:
if self._buf[addr+i] == 0:
break
i += 1
except KeyError:
raise NotEnoughDataError(msg=('Bad access at 0x%X: '
'not enough data to read zero-terminated string') % addr)
return self.gets(addr, i)
def putsz(self, addr, s):
"""Put bytes string in object at addr and append terminating zero at end."""
self.puts(addr, s)
self._buf[addr+len(s)] = 0
def dump(self, tofile=None, width=16, withpadding=False):
"""Dump object content to specified file object or to stdout if None.
Format is a hexdump with some header information at the beginning,
addresses on the left, and data on right.
@param tofile file-like object to dump to
@param width number of bytes per line (i.e. columns)
@param withpadding print padding character instead of '--'
@raise ValueError if width is not a positive integer
"""
if not isinstance(width,int) or width < 1:
raise ValueError('width must be a positive integer.')
# The integer can be of float type - does not work with bit operations
width = int(width)
if tofile is None:
tofile = sys.stdout
# start addr possibly
if self.start_addr is not None:
cs = self.start_addr.get('CS')
ip = self.start_addr.get('IP')
eip = self.start_addr.get('EIP')
if eip is not None and cs is None and ip is None:
tofile.write('EIP = 0x%08X\n' % eip)
elif eip is None and cs is not None and ip is not None:
tofile.write('CS = 0x%04X, IP = 0x%04X\n' % (cs, ip))
else:
tofile.write('start_addr = %r\n' % start_addr)
# actual data
addresses = dict_keys(self._buf)
if addresses:
addresses.sort()
minaddr = addresses[0]
maxaddr = addresses[-1]
startaddr = (minaddr // width) * width
endaddr = ((maxaddr // width) + 1) * width
maxdigits = max(len(hex(endaddr)) - 2, 4) # Less 2 to exclude '0x'
templa = '%%0%dX' % maxdigits
rangewidth = range_l(width)
if withpadding:
pad = self.padding
else:
pad = None
for i in range_g(startaddr, endaddr, width):
tofile.write(templa % i)
tofile.write(' ')
s = []
for j in rangewidth:
x = self._buf.get(i+j, pad)
if x is not None:
tofile.write(' %02X' % x)
if 32 <= x < 127: # GNU less does not like 0x7F (128 decimal) so we'd better show it as dot
s.append(chr(x))
else:
s.append('.')
else:
tofile.write(' --')
s.append(' ')
tofile.write(' |' + ''.join(s) + '|\n')
def merge(self, other, overlap='error'):
"""Merge content of other IntelHex object into current object (self).
@param other other IntelHex object.
@param overlap action on overlap of data or starting addr:
- error: raising OverlapError;
- ignore: ignore other data and keep current data
in overlapping region;
- replace: replace data with other data
in overlapping region.
@raise TypeError if other is not instance of IntelHex
@raise ValueError if other is the same object as self
(it can't merge itself)
@raise ValueError if overlap argument has incorrect value
@raise AddressOverlapError on overlapped data
"""
# check args
if not isinstance(other, IntelHex):
raise TypeError('other should be IntelHex object')
if other is self:
raise ValueError("Can't merge itself")
if overlap not in ('error', 'ignore', 'replace'):
raise ValueError("overlap argument should be either "
"'error', 'ignore' or 'replace'")
# merge data
this_buf = self._buf
other_buf = other._buf
for i in other_buf:
if i in this_buf:
if overlap == 'error':
raise AddressOverlapError(
'Data overlapped at address 0x%X' % i)
elif overlap == 'ignore':
continue
this_buf[i] = other_buf[i]
# merge start_addr
if self.start_addr != other.start_addr:
if self.start_addr is None: # set start addr from other
self.start_addr = other.start_addr
elif other.start_addr is None: # keep existing start addr
pass
else: # conflict
if overlap == 'error':
raise AddressOverlapError(
'Starting addresses are different')
elif overlap == 'replace':
self.start_addr = other.start_addr
def segments(self):
"""Return a list of ordered tuple objects, representing contiguous occupied data addresses.
Each tuple has a length of two and follows the semantics of the range and xrange objects.
The second entry of the tuple is always an integer greater than the first entry.
"""
addresses = self.addresses()
if not addresses:
return []
elif len(addresses) == 1:
return([(addresses[0], addresses[0]+1)])
adjacent_differences = [(b - a) for (a, b) in zip(addresses[:-1], addresses[1:])]
breaks = [i for (i, x) in enumerate(adjacent_differences) if x > 1]
endings = [addresses[b] for b in breaks]
endings.append(addresses[-1])
beginings = [addresses[b+1] for b in breaks]
beginings.insert(0, addresses[0])
return [(a, b+1) for (a, b) in zip(beginings, endings)]
def get_memory_size(self):
"""Returns the approximate memory footprint for data."""
n = sys.getsizeof(self)
n += sys.getsizeof(self.padding)
n += total_size(self.start_addr)
n += total_size(self._buf)
n += sys.getsizeof(self._offset)
return n
#/IntelHex
class IntelHex16bit(IntelHex):
"""Access to data as 16-bit words. Intended to use with Microchip HEX files."""
def __init__(self, source=None):
"""Construct class from HEX file
or from instance of ordinary IntelHex class. If IntelHex object
is passed as source, the original IntelHex object should not be used
again because this class will alter it. This class leaves padding
alone unless it was precisely 0xFF. In that instance it is sign
extended to 0xFFFF.
@param source file name of HEX file or file object
or instance of ordinary IntelHex class.
Will also accept dictionary from todict method.
"""
if isinstance(source, IntelHex):
# from ihex8
self.padding = source.padding
self.start_addr = source.start_addr
# private members
self._buf = source._buf
self._offset = source._offset
elif isinstance(source, dict):
raise IntelHexError("IntelHex16bit does not support initialization from dictionary yet.\n"
"Patches are welcome.")
else:
IntelHex.__init__(self, source)
if self.padding == 0x0FF:
self.padding = 0x0FFFF
def __getitem__(self, addr16):
"""Get 16-bit word from address.
Raise error if only one byte from the pair is set.
We assume a Little Endian interpretation of the hex file.
@param addr16 address of word (addr8 = 2 * addr16).
@return word if bytes exists in HEX file, or self.padding
if no data found.
"""
addr1 = addr16 * 2
addr2 = addr1 + 1
byte1 = self._buf.get(addr1, None)
byte2 = self._buf.get(addr2, None)
if byte1 != None and byte2 != None:
return byte1 | (byte2 << 8) # low endian
if byte1 == None and byte2 == None:
return self.padding
raise BadAccess16bit(address=addr16)
def __setitem__(self, addr16, word):
"""Sets the address at addr16 to word assuming Little Endian mode.
"""
addr_byte = addr16 * 2
b = divmod(word, 256)
self._buf[addr_byte] = b[1]
self._buf[addr_byte+1] = b[0]
def minaddr(self):
'''Get minimal address of HEX content in 16-bit mode.
@return minimal address used in this object
'''
aa = dict_keys(self._buf)
if aa == []:
return 0
else:
return min(aa)>>1
def maxaddr(self):
'''Get maximal address of HEX content in 16-bit mode.
@return maximal address used in this object
'''
aa = dict_keys(self._buf)
if aa == []:
return 0
else:
return max(aa)>>1
def tobinarray(self, start=None, end=None, size=None):
'''Convert this object to binary form as array (of 2-bytes word data).
If start and end unspecified, they will be inferred from the data.
@param start start address of output data.
@param end end address of output data (inclusive).
@param size size of the block (number of words),
used with start or end parameter.
@return array of unsigned short (uint16_t) data.
'''
bin = array('H')
if self._buf == {} and None in (start, end):
return bin
if size is not None and size <= 0:
raise ValueError("tobinarray: wrong value for size")
start, end = self._get_start_end(start, end, size)
for addr in range_g(start, end+1):
bin.append(self[addr])
return bin
#/class IntelHex16bit
def hex2bin(fin, fout, start=None, end=None, size=None, pad=None):
"""Hex-to-Bin convertor engine.
@return 0 if all OK
@param fin input hex file (filename or file-like object)
@param fout output bin file (filename or file-like object)
@param start start of address range (optional)
@param end end of address range (inclusive; optional)
@param size size of resulting file (in bytes) (optional)
@param pad padding byte (optional)
"""
try:
h = IntelHex(fin)
except HexReaderError:
e = sys.exc_info()[1] # current exception
txt = "ERROR: bad HEX file: %s" % str(e)
print(txt)
return 1
# start, end, size
if size != None and size != 0:
if end == None:
if start == None:
start = h.minaddr()
end = start + size - 1
else:
if (end+1) >= size:
start = end + 1 - size
else:
start = 0
try:
if pad is not None:
# using .padding attribute rather than pad argument to function call
h.padding = pad
h.tobinfile(fout, start, end)
except IOError:
e = sys.exc_info()[1] # current exception
txt = "ERROR: Could not write to file: %s: %s" % (fout, str(e))
print(txt)
return 1
return 0
#/def hex2bin
def bin2hex(fin, fout, offset=0):
"""Simple bin-to-hex convertor.
@return 0 if all OK
@param fin input bin file (filename or file-like object)
@param fout output hex file (filename or file-like object)
@param offset starting address offset for loading bin
"""
h = IntelHex()
try:
h.loadbin(fin, offset)
except IOError:
e = sys.exc_info()[1] # current exception
txt = 'ERROR: unable to load bin file:', str(e)
print(txt)
return 1
try:
h.tofile(fout, format='hex')
except IOError:
e = sys.exc_info()[1] # current exception
txt = "ERROR: Could not write to file: %s: %s" % (fout, str(e))
print(txt)
return 1
return 0
#/def bin2hex
def diff_dumps(ih1, ih2, tofile=None, name1="a", name2="b", n_context=3):
"""Diff 2 IntelHex objects and produce unified diff output for their
hex dumps.
@param ih1 first IntelHex object to compare
@param ih2 second IntelHex object to compare
@param tofile file-like object to write output
@param name1 name of the first hex file to show in the diff header
@param name2 name of the first hex file to show in the diff header
@param n_context number of context lines in the unidiff output
"""
def prepare_lines(ih):
sio = StringIO()
ih.dump(sio)
dump = sio.getvalue()
lines = dump.splitlines()
return lines
a = prepare_lines(ih1)
b = prepare_lines(ih2)
import difflib
result = list(difflib.unified_diff(a, b, fromfile=name1, tofile=name2, n=n_context, lineterm=''))
if tofile is None:
tofile = sys.stdout
output = '\n'.join(result)+'\n'
tofile.write(output)
class Record(object):
"""Helper methods to build valid ihex records."""
def _from_bytes(bytes):
"""Takes a list of bytes, computes the checksum, and outputs the entire
record as a string. bytes should be the hex record without the colon
or final checksum.
@param bytes list of byte values so far to pack into record.
@return String representation of one HEX record
"""
assert len(bytes) >= 4
# calculate checksum
s = (-sum(bytes)) & 0x0FF
bin = array('B', bytes + [s])
return ':' + asstr(hexlify(array_tobytes(bin))).upper()
_from_bytes = staticmethod(_from_bytes)
def data(offset, bytes):
"""Return Data record. This constructs the full record, including
the length information, the record type (0x00), the
checksum, and the offset.
@param offset load offset of first byte.
@param bytes list of byte values to pack into record.
@return String representation of one HEX record
"""
assert 0 <= offset < 65536
assert 0 < len(bytes) < 256
b = [len(bytes), (offset>>8)&0x0FF, offset&0x0FF, 0x00] + bytes
return Record._from_bytes(b)
data = staticmethod(data)
def eof():
"""Return End of File record as a string.
@return String representation of Intel Hex EOF record
"""
return ':00000001FF'
eof = staticmethod(eof)
def extended_segment_address(usba):
"""Return Extended Segment Address Record.
@param usba Upper Segment Base Address.
@return String representation of Intel Hex USBA record.
"""
b = [2, 0, 0, 0x02, (usba>>8)&0x0FF, usba&0x0FF]
return Record._from_bytes(b)
extended_segment_address = staticmethod(extended_segment_address)
def start_segment_address(cs, ip):
"""Return Start Segment Address Record.
@param cs 16-bit value for CS register.
@param ip 16-bit value for IP register.
@return String representation of Intel Hex SSA record.
"""
b = [4, 0, 0, 0x03, (cs>>8)&0x0FF, cs&0x0FF,
(ip>>8)&0x0FF, ip&0x0FF]
return Record._from_bytes(b)
start_segment_address = staticmethod(start_segment_address)
def extended_linear_address(ulba):
"""Return Extended Linear Address Record.
@param ulba Upper Linear Base Address.
@return String representation of Intel Hex ELA record.
"""
b = [2, 0, 0, 0x04, (ulba>>8)&0x0FF, ulba&0x0FF]
return Record._from_bytes(b)
extended_linear_address = staticmethod(extended_linear_address)
def start_linear_address(eip):
"""Return Start Linear Address Record.
@param eip 32-bit linear address for the EIP register.
@return String representation of Intel Hex SLA record.
"""
b = [4, 0, 0, 0x05, (eip>>24)&0x0FF, (eip>>16)&0x0FF,
(eip>>8)&0x0FF, eip&0x0FF]
return Record._from_bytes(b)
start_linear_address = staticmethod(start_linear_address)
class _BadFileNotation(Exception):
"""Special error class to use with _get_file_and_addr_range."""
pass
def _get_file_and_addr_range(s, _support_drive_letter=None):
"""Special method for hexmerge.py script to split file notation
into 3 parts: (filename, start, end)
@raise _BadFileNotation when string cannot be safely split.
"""
if _support_drive_letter is None:
_support_drive_letter = (os.name == 'nt')
drive = ''
if _support_drive_letter:
if s[1:2] == ':' and s[0].upper() in ''.join([chr(i) for i in range_g(ord('A'), ord('Z')+1)]):
drive = s[:2]
s = s[2:]
parts = s.split(':')
n = len(parts)
if n == 1:
fname = parts[0]
fstart = None
fend = None
elif n != 3:
raise _BadFileNotation
else:
fname = parts[0]
def ascii_hex_to_int(ascii):
if ascii is not None:
try:
return int(ascii, 16)
except ValueError:
raise _BadFileNotation
return ascii
fstart = ascii_hex_to_int(parts[1] or None)
fend = ascii_hex_to_int(parts[2] or None)
return drive+fname, fstart, fend
##
# IntelHex Errors Hierarchy:
#
# IntelHexError - basic error
# HexReaderError - general hex reader error
# AddressOverlapError - data for the same address overlap
# HexRecordError - hex record decoder base error
# RecordLengthError - record has invalid length
# RecordTypeError - record has invalid type (RECTYP)
# RecordChecksumError - record checksum mismatch
# EOFRecordError - invalid EOF record (type 01)
# ExtendedAddressRecordError - extended address record base error
# ExtendedSegmentAddressRecordError - invalid extended segment address record (type 02)
# ExtendedLinearAddressRecordError - invalid extended linear address record (type 04)
# StartAddressRecordError - start address record base error
# StartSegmentAddressRecordError - invalid start segment address record (type 03)
# StartLinearAddressRecordError - invalid start linear address record (type 05)
# DuplicateStartAddressRecordError - start address record appears twice
# InvalidStartAddressValueError - invalid value of start addr record
# _EndOfFile - it's not real error, used internally by hex reader as signal that EOF record found
# BadAccess16bit - not enough data to read 16 bit value (deprecated, see NotEnoughDataError)
# NotEnoughDataError - not enough data to read N contiguous bytes
# EmptyIntelHexError - requested operation cannot be performed with empty object
class IntelHexError(Exception):
'''Base Exception class for IntelHex module'''
_fmt = 'IntelHex base error' #: format string
def __init__(self, msg=None, **kw):
"""Initialize the Exception with the given message.
"""
self.msg = msg
for key, value in dict_items_g(kw):
setattr(self, key, value)
def __str__(self):
"""Return the message in this Exception."""
if self.msg:
return self.msg
try:
return self._fmt % self.__dict__
except (NameError, ValueError, KeyError):
e = sys.exc_info()[1] # current exception
return 'Unprintable exception %s: %s' \
% (repr(e), str(e))
class _EndOfFile(IntelHexError):
"""Used for internal needs only."""
_fmt = 'EOF record reached -- signal to stop read file'
class HexReaderError(IntelHexError):
_fmt = 'Hex reader base error'
class AddressOverlapError(HexReaderError):
_fmt = 'Hex file has data overlap at address 0x%(address)X on line %(line)d'
# class NotAHexFileError was removed in trunk.revno.54 because it's not used
class HexRecordError(HexReaderError):
_fmt = 'Hex file contains invalid record at line %(line)d'
class RecordLengthError(HexRecordError):
_fmt = 'Record at line %(line)d has invalid length'
class RecordTypeError(HexRecordError):
_fmt = 'Record at line %(line)d has invalid record type'
class RecordChecksumError(HexRecordError):
_fmt = 'Record at line %(line)d has invalid checksum'
class EOFRecordError(HexRecordError):
_fmt = 'File has invalid End-of-File record'
class ExtendedAddressRecordError(HexRecordError):
_fmt = 'Base class for extended address exceptions'
class ExtendedSegmentAddressRecordError(ExtendedAddressRecordError):
_fmt = 'Invalid Extended Segment Address Record at line %(line)d'
class ExtendedLinearAddressRecordError(ExtendedAddressRecordError):
_fmt = 'Invalid Extended Linear Address Record at line %(line)d'
class StartAddressRecordError(HexRecordError):
_fmt = 'Base class for start address exceptions'
class StartSegmentAddressRecordError(StartAddressRecordError):
_fmt = 'Invalid Start Segment Address Record at line %(line)d'
class StartLinearAddressRecordError(StartAddressRecordError):
_fmt = 'Invalid Start Linear Address Record at line %(line)d'
class DuplicateStartAddressRecordError(StartAddressRecordError):
_fmt = 'Start Address Record appears twice at line %(line)d'
class InvalidStartAddressValueError(StartAddressRecordError):
_fmt = 'Invalid start address value: %(start_addr)s'
class NotEnoughDataError(IntelHexError):
_fmt = ('Bad access at 0x%(address)X: '
'not enough data to read %(length)d contiguous bytes')
class BadAccess16bit(NotEnoughDataError):
_fmt = 'Bad access at 0x%(address)X: not enough data to read 16 bit value'
class EmptyIntelHexError(IntelHexError):
_fmt = "Requested operation cannot be executed with empty object"
| 37.75
| 117
| 0.550139
|
8310926db5ab9fc50319b558c9643a8d83d3fb0a
| 8,691
|
py
|
Python
|
test/functional/tl_managed.py
|
patrickdugan/BlockPo-to-Tradelayer
|
ba1ebf3c329751d414302577a09481ba28db1815
|
[
"MIT"
] | null | null | null |
test/functional/tl_managed.py
|
patrickdugan/BlockPo-to-Tradelayer
|
ba1ebf3c329751d414302577a09481ba28db1815
|
[
"MIT"
] | 5
|
2021-06-21T21:21:53.000Z
|
2021-06-22T20:10:16.000Z
|
test/functional/tl_managed.py
|
patrickdugan/BlockPo-to-Tradelayer
|
ba1ebf3c329751d414302577a09481ba28db1815
|
[
"MIT"
] | 1
|
2021-06-21T21:14:45.000Z
|
2021-06-21T21:14:45.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2015-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test basic for Creating tokens ."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import os
import json
import http.client
import urllib.parse
class ManagedBasicsTest (BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
self.extra_args = [["-txindex=1"]]
def setup_chain(self):
super().setup_chain()
#Append rpcauth to bitcoin.conf before initialization
rpcauth = "rpcauth=rt:93648e835a54c573682c2eb19f882535$7681e9c5b74bdd85e78166031d2058e1069b3ed7ed967c93fc63abba06f31144"
rpcuser = "rpcuser=rpcuser💻"
rpcpassword = "rpcpassword=rpcpassword🔑"
with open(os.path.join(self.options.tmpdir+"/node0", "litecoin.conf"), 'a', encoding='utf8') as f:
f.write(rpcauth+"\n")
def run_test(self):
self.log.info("Preparing the workspace...")
# mining 200 blocks
self.nodes[0].generate(200)
################################################################################
# Checking RPC tl_sendissuancemanaged and tl_sendgrant (in the first 200 blocks of the chain) #
################################################################################
url = urllib.parse.urlparse(self.nodes[0].url)
#Old authpair
authpair = url.username + ':' + url.password
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
addresses = []
accounts = ["john", "doe", "another"]
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
self.log.info("Creating sender address")
addresses = tradelayer_createAddresses(accounts, conn, headers)
self.log.info("Funding addresses with LTC")
amount = 0.1
tradelayer_fundingAddresses(addresses, amount, conn, headers)
self.log.info("Checking the LTC balance in every account")
tradelayer_checkingBalance(accounts, amount, conn, headers)
self.log.info("Self Attestation for addresses")
tradelayer_selfAttestation(addresses,conn, headers)
self.log.info("Checking attestations")
out = tradelayer_HTTP(conn, headers, False, "tl_list_attestation")
# self.log.info(out)
result = []
registers = out['result']
for addr in addresses:
for i in registers:
if i['att sender'] == addr and i['att receiver'] == addr and i['kyc_id'] == 0:
result.append(True)
assert_equal(result, [True, True, True])
self.log.info("Creating new tokens (sendissuancemanaged)")
array = [0]
params = str([addresses[0], 2, 0, "lihki", "", "", array]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, True, "tl_sendissuancemanaged",params)
# self.log.info(out)
self.nodes[0].generate(1)
self.log.info("Checking the property")
params = str([4])
out = tradelayer_HTTP(conn, headers, True, "tl_getproperty",params)
assert_equal(out['result']['propertyid'],4)
assert_equal(out['result']['name'],'lihki')
assert_equal(out['result']['issuer'], addresses[0])
assert_equal(out['result']['data'],'')
assert_equal(out['result']['url'],'')
assert_equal(out['result']['divisible'],True)
assert_equal(out['result']['totaltokens'],'0.00000000')
self.log.info("Checking token balance equal zero in every address")
for addr in addresses:
params = str([addr, 4]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, True, "tl_getbalance",params)
# self.log.info(out)
assert_equal(out['error'], None)
assert_equal(out['result']['balance'],'0.00000000')
assert_equal(out['result']['reserve'],'0.00000000')
self.log.info("Sending 2000 tokens to receiver (sendgrant)")
params = str([addresses[0], addresses[1], 4, "2000"]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, True, "tl_sendgrant",params)
# self.log.info(out)
self.nodes[0].generate(1)
self.log.info("Checking tokens in receiver address")
params = str([addresses[1], 4]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, True, "tl_getbalance",params)
# self.log.info(out)
assert_equal(out['error'], None)
assert_equal(out['result']['balance'],'2000.00000000')
assert_equal(out['result']['reserve'],'0.00000000')
self.log.info("Sending 1000 tokens to from receiver to last address (using tl_send)")
params = str([addresses[1], addresses[2], 4,"1000"]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, True, "tl_send",params)
# self.log.info(out)
assert_equal(out['error'], None)
self.nodes[0].generate(1)
self.log.info("Checking tokens in receiver address and last address")
for i in range(2,3):
params = str([addresses[i], 4]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, True, "tl_getbalance",params)
# self.log.info(out)
assert_equal(out['error'], None)
assert_equal(out['result']['balance'],'1000.00000000')
assert_equal(out['result']['reserve'],'0.00000000')
self.log.info("Trying to change the issuer")
params = str([addresses[0], addresses[1], 4]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, False, "tl_sendchangeissuer",params)
# self.log.info(out)
self.nodes[0].generate(1)
self.log.info("Checking the property (with new issuer)")
params = str([4])
out = tradelayer_HTTP(conn, headers, True, "tl_getproperty",params)
assert_equal(out['result']['propertyid'],4)
assert_equal(out['result']['name'],'lihki')
assert_equal(out['result']['issuer'], addresses[1])
assert_equal(out['result']['data'],'')
assert_equal(out['result']['url'],'')
assert_equal(out['result']['divisible'],True)
assert_equal(out['result']['totaltokens'],'2000.00000000')
self.log.info("Sending 264 tokens from issuer to himself (tl_sendgrant)")
params = str([addresses[1], addresses[1], 4, "264"]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, True, "tl_sendgrant",params)
# self.log.info(out)
assert_equal(out['error'], None)
self.nodes[0].generate(1)
self.log.info("Checking issuer's balance")
params = str([addresses[1], 4]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, True, "tl_getbalance",params)
# self.log.info(out)
assert_equal(out['error'], None)
assert_equal(out['result']['balance'],'1264.00000000')
assert_equal(out['result']['reserve'],'0.00000000')
self.log.info("Sending 888 tokens from issuer to himself (tl_send)")
params = str([addresses[1], addresses[1], 4, "888"]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, False, "tl_send",params)
# self.log.info(out)
assert_equal(out['error']['message'], 'sending tokens to same address')
self.nodes[0].generate(1)
self.log.info("Checking issuer's balance")
params = str([addresses[1], 4]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, True, "tl_getbalance",params)
# self.log.info(out)
assert_equal(out['error'], None)
assert_equal(out['result']['balance'],'1264.00000000')
assert_equal(out['result']['reserve'],'0.00000000')
self.log.info("Revoking tokens in issuer")
params = str([addresses[1], 4, "264"]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, False, "tl_sendrevoke",params)
# self.log.info(out)
assert_equal(out['error'], None)
self.nodes[0].generate(1)
self.log.info("Checking issuer's balance now")
params = str([addresses[1], 4]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, True, "tl_getbalance",params)
# self.log.info(out)
assert_equal(out['error'], None)
assert_equal(out['result']['balance'],'1000.00000000')
assert_equal(out['result']['reserve'],'0.00000000')
conn.close()
self.stop_nodes()
if __name__ == '__main__':
ManagedBasicsTest ().main ()
| 39.148649
| 128
| 0.603843
|
619b8ad9824d2b6c55555f876fde6be4dce0c856
| 5,980
|
py
|
Python
|
Project/src/Modules/House/Lighting/Outlets/_test/test_outlets.py
|
DBrianKimmel/PyHouse
|
a100fc67761a22ae47ed6f21f3c9464e2de5d54f
|
[
"MIT"
] | 3
|
2016-11-16T00:37:58.000Z
|
2019-11-10T13:10:19.000Z
|
Project/src/Modules/House/Lighting/Outlets/_test/test_outlets.py
|
DBrianKimmel/PyHouse
|
a100fc67761a22ae47ed6f21f3c9464e2de5d54f
|
[
"MIT"
] | null | null | null |
Project/src/Modules/House/Lighting/Outlets/_test/test_outlets.py
|
DBrianKimmel/PyHouse
|
a100fc67761a22ae47ed6f21f3c9464e2de5d54f
|
[
"MIT"
] | 1
|
2020-07-19T22:06:52.000Z
|
2020-07-19T22:06:52.000Z
|
"""
@name: /home/briank/workspace/PyHouse/Project/src/Modules/House/Lighting/_test/test_outlets.py
@author: D. Brian Kimmel
@contact: D.BrianKimmel@gmail.com
@copyright: (c) 2013-2020 by D. Brian Kimmel
@license: MIT License
@note: Created on Dec 7, 2019
@summary:
Passed all 8 tests - DBK - 2019-12-08
"""
__updated__ = '2020-02-09'
# Import system type stuff
from twisted.trial import unittest
from ruamel.yaml import YAML
# Import PyMh files and modules.
from _test.testing_mixin import SetupPyHouseObj
from Modules.House.Lighting.Outlets.outlets import LocalConfig as outletsConfig
from Modules.Core.Utilities.debug_tools import PrettyFormatAny
TEST_YAML = """\
Outlets:
- Name: Musicroom Lamp
Room: Music
Comment: This is the music room lamp
Family:
Name: Insteon
Address: 11.11.11
- Name: Christmas
Comment: ??
Family:
Name: Insteon
Address: 22.22.22
- Name: Gameroom Lamp
Room: Game
Comment: Fireplace end
Family:
Name: Insteon
Address: 33.33.33
- Name: Curio
Family:
Name: Insteon
Address: 44.44.44
- Name: China Cabinet
Family:
Name: Insteon
Address: 55.55.55
"""
class SetupMixin(object):
"""
"""
def setUp(self):
self.m_pyhouse_obj = SetupPyHouseObj().BuildPyHouseObj()
l_yaml = YAML()
self.m_test_config = l_yaml.load(TEST_YAML)
class A0(unittest.TestCase):
def test_00_Print(self):
_x = PrettyFormatAny.form('_x', 'title') # so it is defined when printing is cleaned up.
print('Id: test_outlets')
class A1_Setup(SetupMixin, unittest.TestCase):
"""
This section tests the above setup for things we will need further down in the tests.
"""
def setUp(self):
SetupMixin.setUp(self)
def test_01_Config(self):
""" Be sure that the config contains the right stuff.
"""
# print(PrettyFormatAny.form(self.m_test_config, 'A1-01-A - Config'))
# print(PrettyFormatAny.form(self.m_pyhouse_obj.House, 'PyHouse House'))
self.assertIsNotNone(self.m_test_config['Outlets'])
class C1_Read(SetupMixin, unittest.TestCase):
"""
This section tests the reading and writing of config used by lighting_lights.
"""
def setUp(self):
SetupMixin.setUp(self)
self.m_config = outletsConfig(self.m_pyhouse_obj)
def test_01_Outlet0(self):
""" Test loading outlet 0
"""
l_yaml = self.m_test_config['Outlets'][0]
# print('C1-01-A - Yaml: ', l_yaml)
l_outlet = self.m_config._extract_one_outlet(l_yaml)
# print(PrettyFormatAny.form(l_outlet, 'C1-01-B - Family'))
# print(PrettyFormatAny.form(l_outlet.Family, 'C1-01-C - Family'))
# print(PrettyFormatAny.form(l_outlet.Room, 'C1-01-d - Room'))
self.assertEqual(l_outlet.Name, 'Musicroom Lamp')
self.assertEqual(l_outlet.Comment, 'This is the music room lamp')
self.assertEqual(l_outlet.DeviceType, 'Lighting')
self.assertEqual(l_outlet.DeviceSubType, 'Outlet')
self.assertEqual(l_outlet.Family.Name, 'Insteon')
self.assertEqual(l_outlet.Family.Address, '11.11.11')
def test_02_Outlet1(self):
""" Test loading outlet 1
"""
l_yaml = self.m_test_config['Outlets'][1]
# print('C1-02-A - Yaml: ', l_yaml)
l_outlet = self.m_config._extract_one_outlet(l_yaml)
# print(PrettyFormatAny.form(l_light, 'C1-02-B - Light'))
self.assertEqual(l_outlet.Name, 'Christmas')
self.assertEqual(l_outlet.Comment, '??')
self.assertEqual(l_outlet.DeviceType, 'Lighting')
self.assertEqual(l_outlet.DeviceSubType, 'Outlet')
self.assertEqual(l_outlet.Family.Name, 'Insteon')
self.assertEqual(l_outlet.Family.Address, '22.22.22')
def test_03_Outlet2(self):
""" Test loading outlet 2
"""
l_yaml = self.m_test_config['Outlets'][2]
# print('C1-03-A - Yaml: ', l_yaml)
l_outlet = self.m_config._extract_one_outlet(l_yaml)
# print(PrettyFormatAny.form(l_outlet, 'C1-03-B - Outlet'))
self.assertEqual(l_outlet.Name, 'Gameroom Lamp')
self.assertEqual(l_outlet.Comment, 'Fireplace end')
self.assertEqual(l_outlet.DeviceType, 'Lighting')
self.assertEqual(l_outlet.DeviceSubType, 'Outlet')
self.assertEqual(l_outlet.Family.Name, 'Insteon')
self.assertEqual(l_outlet.Family.Address, '33.33.33')
def test_04_Outlets(self):
""" Test loading all outlets
"""
l_yaml = self.m_test_config['Outlets']
# print('C1-04-A - Yaml: ', l_yaml)
l_outlets = self.m_config._extract_all_outlets(l_yaml)
# print(PrettyFormatAny.form(l_outlets, 'C1-04-B - Outlets'))
self.assertEqual(l_outlets[0].Name, 'Musicroom Lamp')
self.assertEqual(l_outlets[1].Name, 'Christmas')
self.assertEqual(l_outlets[2].Name, 'Gameroom Lamp')
class C2_YamlWrite(SetupMixin, unittest.TestCase):
"""
This section tests the reading and writing of the Yaml config file used by lighting_lights.
"""
def setUp(self):
SetupMixin.setUp(self)
# self.m_obj = lightsXML().read_all_lights_xml(self.m_pyhouse_obj)
def test_01_(self):
"""Test the write for proper XML Base elements
"""
print(PrettyFormatAny.form(self.m_pyhouse_obj.House.Lighting.Lights, 'C2-01-A - Node'))
class Z9_YamlWrite(SetupMixin, unittest.TestCase):
"""
This section tests the reading and writing of the Yaml config file used by lighting_lights.
"""
def setUp(self):
SetupMixin.setUp(self)
def test_01_(self):
"""Test the write for proper XML Base elements
"""
# print(PrettyFormatAny.form(self.m_pyhouse_obj.House.Lighting.Lights, 'C2-01-A - Node'))
pass
# ## END DBK
| 32.5
| 99
| 0.648495
|
b21bd5733aac45bbe746c8e28bb73fa3f68b402e
| 1,767
|
py
|
Python
|
monero/backends/offline.py
|
lialsoftlab/monero-python
|
18fa9aec11bf9300e80bba21f7f22472b3972483
|
[
"BSD-3-Clause"
] | null | null | null |
monero/backends/offline.py
|
lialsoftlab/monero-python
|
18fa9aec11bf9300e80bba21f7f22472b3972483
|
[
"BSD-3-Clause"
] | null | null | null |
monero/backends/offline.py
|
lialsoftlab/monero-python
|
18fa9aec11bf9300e80bba21f7f22472b3972483
|
[
"BSD-3-Clause"
] | null | null | null |
from .. import exceptions
from ..account import Account
from ..address import Address
from ..seed import Seed
class WalletIsOffline(exceptions.BackendException):
pass
class OfflineWallet(object):
"""
Offline backend for Monero wallet. Provides support for address generation.
"""
_address = None
_svk = None
_ssk = None
def __init__(self, address, view_key=None, spend_key=None):
self._address = Address(address)
self._svk = view_key or self._svk
self._ssk = spend_key or self._ssk
def height(self):
raise WalletIsOffline()
def spend_key(self):
return self._ssk
def view_key(self):
return self._svk
def seed(self):
return Seed(self._ssk)
def accounts(self):
return [Account(self, 0)]
def new_account(self, label=None):
raise WalletIsOffline()
def addresses(self, account=0):
if account == 0:
return [self._address]
raise WalletIsOffline()
def new_address(self, account=0, label=None):
raise WalletIsOffline()
def balances(self, account=0):
raise WalletIsOffline()
def transfers_in(self, account, pmtfilter):
raise WalletIsOffline()
def transfers_out(self, account, pmtfilter):
raise WalletIsOffline()
def export_outputs(self):
raise WalletIsOffline()
def import_outputs(self, outputs_hex):
raise WalletIsOffline()
def export_key_images(self):
raise WalletIsOffline()
def import_key_images(self, key_images):
raise WalletIsOffline()
def transfer(self, *args, **kwargs):
raise WalletIsOffline()
def get_transfer_by_txid(self, account, txid):
raise WalletIsOffline()
| 22.948052
| 79
| 0.653084
|
3dfff51d61ad41eae0d6a875193eea22433d37d7
| 14,921
|
py
|
Python
|
tests/db_tests/test_reimporting_activities.py
|
gotiniens/workoutizer
|
a190e8f8d782b57e319cf123e9fe5e59ba6c9340
|
[
"MIT"
] | 45
|
2020-10-23T13:01:34.000Z
|
2022-03-07T08:48:16.000Z
|
tests/db_tests/test_reimporting_activities.py
|
gotiniens/workoutizer
|
a190e8f8d782b57e319cf123e9fe5e59ba6c9340
|
[
"MIT"
] | 62
|
2020-10-06T19:18:12.000Z
|
2022-01-13T19:25:34.000Z
|
tests/db_tests/test_reimporting_activities.py
|
gotiniens/workoutizer
|
a190e8f8d782b57e319cf123e9fe5e59ba6c9340
|
[
"MIT"
] | 12
|
2020-10-07T12:44:00.000Z
|
2021-09-13T06:36:24.000Z
|
import datetime
import shutil
from pathlib import Path
import pytest
import pytz
from wkz import configuration, models
from wkz.best_sections.generic import activity_suitable_for_awards
from wkz.demo import prepare_import_of_demo_activities
from wkz.io.file_importer import run_importer__dask
def test_reimport_of_activities(tracks_in_tmpdir, client):
"""
Test reimporter in following steps:
1. import demo activities
2. modify some attributes of a given activity
3. trigger reimporter
4. check that attributes have been overwritten with the original values
5. check that activity page is accessible
"""
# 1. import one cycling and one hiking activities
activity_1 = "hike_with_coordinates_muggenbrunn.fit"
prepare_import_of_demo_activities(
models,
list_of_files_to_copy=[
activity_1,
"cycling_bad_schandau.fit",
],
)
assert len(models.Sport.objects.all()) == 5
assert len(models.Settings.objects.all()) == 1
run_importer__dask(models, importing_demo_data=True)
all_activities = models.Activity.objects.all()
assert len(all_activities) == 11
assert len(models.Activity.objects.filter(sport__slug="swimming")) == 9
assert len(models.Activity.objects.filter(sport__slug="jogging")) == 0
all_cycling = models.Activity.objects.filter(sport__slug="cycling")
assert len(all_cycling) == 1
cycling = all_cycling[0]
orig_cycling_distance = cycling.distance
orig_cycling_duration = cycling.duration
orig_cycling_name = cycling.name
orig_cycling_date = cycling.date
cycling_best_sections = models.BestSection.objects.filter(activity=cycling.pk)
orig_number_of_cycling_best_sections = len(cycling_best_sections)
all_hiking = models.Activity.objects.filter(sport__slug="hiking")
assert len(all_hiking) == 1
hiking = all_hiking[0]
orig_hiking_distance = hiking.distance
orig_hiking_duration = hiking.duration
orig_hiking_name = hiking.name
# check that min and max altitude got imported, related to bug fix
assert hiking.trace_file.max_altitude is not None
assert hiking.trace_file.min_altitude is not None
hiking_fastest_sections = models.BestSection.objects.get(activity=hiking.pk, distance=1000, kind="fastest")
orig_1km_fastest_start = hiking_fastest_sections.start
orig_1km_velocity = hiking_fastest_sections.max_value
hiking_climb_sections = models.BestSection.objects.get(activity=hiking.pk, distance=200, kind="climb")
orig_1km_climb_start = hiking_climb_sections.start
orig_1km_climb = hiking_climb_sections.max_value
# 2. modify some attributes of a given activity
new_date = datetime.datetime(1999, 1, 1, 19, 19, 19, tzinfo=pytz.utc)
hiking.distance = 5_000.0
hiking.duration = datetime.timedelta(hours=500)
hiking.name = "some arbitrary hiking name"
# remove the demo activity flag of hiking activity
hiking.is_demo_activity = False
hiking.date = new_date
hiking.save()
hiking_fastest_sections.start = 50_000_000
hiking_fastest_sections.max_value = 999.999
hiking_fastest_sections.save()
hiking_climb_sections.start = 70_000_000
hiking_climb_sections.max_value = 12345.6789
hiking_climb_sections.save()
# verify values got changed
hiking_climb_sections_modified = models.BestSection.objects.get(activity=hiking.pk, distance=200, kind="climb")
assert hiking_climb_sections_modified.start == 70_000_000
assert hiking_climb_sections_modified.max_value == 12345.6789
cycling.distance = 9_000.0
cycling.duration = datetime.timedelta(hours=900)
cycling.name = "some arbitrary cycling name"
cycling.date = new_date
cycling.save()
# verify that cycling is a demo activity
assert cycling.is_demo_activity is True
# get lap values to verify reimporter handles updating of lap data correctly
lap_data = models.Lap.objects.filter(trace=cycling.trace_file)
orig_lap_speeds = [lap.speed for lap in lap_data]
# modify lap speed
for lap in lap_data:
lap.speed = 123456.789
lap.save()
# verify it got changed
assert [lap.speed for lap in models.Lap.objects.filter(trace=cycling.trace_file)] != orig_lap_speeds
# delete all cycling best sections
for best_section in cycling_best_sections:
best_section.delete()
assert len(models.BestSection.objects.filter(activity=cycling.pk)) == 0
# move activity file to new dir to ensure path got changed
path_to_traces = Path(tracks_in_tmpdir.path_to_trace_dir)
new_dir = path_to_traces / "new_dir"
new_dir.mkdir()
assert new_dir.is_dir()
old_path = path_to_traces / activity_1
assert hiking.trace_file.path_to_file == str(old_path)
new_path = new_dir / activity_1
assert old_path.is_file()
assert new_path.is_file() is False
shutil.move(old_path, new_path)
assert old_path.is_file() is False
assert new_path.is_file()
# 3. trigger reimport to update values
run_importer__dask(models, reimporting=True)
all_activities = models.Activity.objects.all()
assert len(all_activities) == 11
# 4. check that attributes have been overwritten with the original values
updated_hiking = models.Activity.objects.get(sport__slug="hiking")
assert updated_hiking.distance == orig_hiking_distance
assert updated_hiking.duration == orig_hiking_duration
# names should not be overwritten
assert updated_hiking.name != orig_hiking_name
assert updated_hiking.name == "some arbitrary hiking name"
# the date should be not stay at its new value because the hiking activity is not a demo activity it should rather
# be updated back to its original value, but thats hardly possible since the date of demo activities is adjusted to
# reflect the current time
assert updated_hiking.date != new_date
# verify that attributes of best section got overwritten
updated_hiking_fastest_sections = models.BestSection.objects.get(
activity=updated_hiking.pk, distance=1000, kind="fastest"
)
assert updated_hiking_fastest_sections.start == orig_1km_fastest_start
assert updated_hiking_fastest_sections.max_value == orig_1km_velocity
# verify that the path to trace file got updated
assert updated_hiking.trace_file.path_to_file == str(new_path)
updated_hiking_climb_sections = models.BestSection.objects.get(
activity=updated_hiking.pk, distance=200, kind="climb"
)
assert updated_hiking_climb_sections.start == orig_1km_climb_start
assert updated_hiking_climb_sections.max_value == orig_1km_climb
updated_cycling = models.Activity.objects.get(sport__slug="cycling")
assert updated_cycling.distance == orig_cycling_distance
assert updated_cycling.duration == orig_cycling_duration
# names should not be overwritten
assert updated_cycling.name != orig_cycling_name
assert updated_cycling.name == "some arbitrary cycling name"
# the date of an demo activity should also not updated back to its original
assert updated_cycling.date != orig_cycling_date
assert updated_cycling.date == new_date
# verify that cycling is still a demo activity
assert updated_cycling.is_demo_activity is True
# verify that all cycling best sections got reimported and created again
assert len(models.BestSection.objects.filter(activity=cycling.pk)) == orig_number_of_cycling_best_sections
# verify lap data is back to original speed values
updated_lap_data = models.Lap.objects.filter(trace=cycling.trace_file)
updated_lap_speeds = [lap.speed for lap in updated_lap_data]
assert updated_lap_speeds == orig_lap_speeds
# 5. verify that the activity pages are accessible after reimporting
activities = all_activities
for activity in activities:
response = client.get(f"/activity/{activity.pk}")
assert response.status_code == 200
@pytest.mark.parametrize("kind", ["fastest", "climb"])
def test_reimporting_of_best_sections(import_one_activity, kind):
# import one cycling activity
import_one_activity("cycling_bad_schandau.fit")
assert models.Activity.objects.count() == 1
assert models.Settings.objects.count() == 1
run_importer__dask(models)
assert models.Activity.objects.count() == 1
activity = models.Activity.objects.get()
bs = models.BestSection.objects.filter(activity=activity, kind=kind)
# there should never be more best sections of kind 'fastest' than configured possible fastest sections
assert len(bs) <= len(configuration.fastest_distances)
# store original values
orig_start = [section.start for section in bs]
orig_end = [section.end for section in bs]
orig_max_values = [section.max_value for section in bs]
orig_number_of_best_sections = len(bs)
# modify values
for section in bs:
section.start = 10_000
section.end = 20_000
section.max_value = 33_333.3
section.save()
# verify that the data got changed
changed_bs = models.BestSection.objects.filter(activity=activity, kind=kind)
assert [section.start for section in changed_bs] != orig_start
assert [section.end for section in changed_bs] != orig_end
assert [section.max_value for section in changed_bs] != orig_max_values
# also add another dummy best section which should be removed again by the reimport
dummy_section = models.BestSection(
activity=activity,
kind=kind,
distance=12345,
start=42,
end=84,
max_value=999.999,
)
dummy_section.save()
# verify number of sections has increased
assert len(models.BestSection.objects.filter(activity=activity, kind=kind)) == orig_number_of_best_sections + 1
# now trigger reimport to update modified values
run_importer__dask(models, reimporting=True)
# check that dummy section was deleted because it is not present in the configured fastest sections
assert len(models.BestSection.objects.filter(activity=activity, kind=kind)) == orig_number_of_best_sections
# verify that the modified values are back to their original values
# verify that the data got changed
updated_bs = models.BestSection.objects.filter(activity=activity, kind=kind)
assert [section.start for section in updated_bs] == orig_start
assert [section.end for section in updated_bs] == orig_end
assert [section.max_value for section in updated_bs] == orig_max_values
if kind == "fastest":
for section in updated_bs:
assert section.distance in configuration.fastest_distances
elif kind == "climb":
for section in updated_bs:
assert section.distance in configuration.climb_distances
def test_reimport__not_evaluates_for_awards__changing_sport_flag(import_one_activity):
# Changed behaviour of this test to check that best sections do not get removed when changing evaluates_for_awards
import_one_activity("cycling_bad_schandau.fit")
# verify activity is suitable for best sections
assert models.Activity.objects.count() == 1
activity = models.Activity.objects.get()
assert activity_suitable_for_awards(activity) is True
assert models.BestSection.objects.filter(activity=activity).count() > 0
# change sport flag for evaluates_for_awards to False
sport = activity.sport
sport.evaluates_for_awards = False
sport.save()
assert activity_suitable_for_awards(activity) is False
# reimport activity
run_importer__dask(models, reimporting=True)
assert models.Activity.objects.count() == 1
activity = models.Activity.objects.get()
assert activity_suitable_for_awards(activity) is False
# check that best sections did not get removed
assert models.BestSection.objects.filter(activity=activity).count() > 0
# now change everything back and verify that the best sections get saved to db again by reimporting
sport = activity.sport
sport.evaluates_for_awards = True
sport.save()
assert activity_suitable_for_awards(activity) is True
# reimport activity
run_importer__dask(models, reimporting=True)
assert models.Activity.objects.count() == 1
activity = models.Activity.objects.get()
assert activity_suitable_for_awards(activity) is True
# check that best sections got removed
assert models.BestSection.objects.filter(activity=activity).count() > 0
def test_reimport__not_evaluates_for_awards__changing_activity_flag(import_one_activity):
# Changed behaviour of this test to check that best sections do not get removed when changing evaluates_for_awards
import_one_activity("cycling_bad_schandau.fit")
# verify activity is suitable for best sections
assert models.Activity.objects.count() == 1
activity = models.Activity.objects.get()
assert activity_suitable_for_awards(activity) is True
assert models.BestSection.objects.filter(activity=activity).count() > 0
# change activity flag for evaluates_for_awards to False
activity.evaluates_for_awards = False
activity.save()
assert activity_suitable_for_awards(activity) is False
# reimport activity
run_importer__dask(models, reimporting=True)
assert models.Activity.objects.count() == 1
activity = models.Activity.objects.get()
assert activity_suitable_for_awards(activity) is False
# check that best sections did not get removed
assert models.BestSection.objects.filter(activity=activity).count() > 0
# now change everything back and verify that the best sections get saved to db again by reimporting
activity.evaluates_for_awards = True
activity.save()
assert activity_suitable_for_awards(activity) is True
# reimport activity
run_importer__dask(models, reimporting=True)
assert models.Activity.objects.count() == 1
activity = models.Activity.objects.get()
assert activity_suitable_for_awards(activity) is True
# check that best sections got removed
assert models.BestSection.objects.filter(activity=activity).count() > 0
def test_run_importer__dask__reimporting(tmp_path, import_one_activity):
import_one_activity("cycling_bad_schandau.fit")
assert models.Activity.objects.count() == 1
activity = models.Activity.objects.get()
original_distance = activity.distance
# change some values
activity.name = "Foo"
activity.distance = 999.9
activity.save()
activity = models.Activity.objects.get()
assert activity.name == "Foo"
run_importer__dask(models, reimporting=True)
# the name should not have changed during reimporting
assert models.Activity.objects.filter(name="Foo").count() == 1
# the distance however should have been reverted to the original value
assert models.Activity.objects.filter(distance=original_distance).count() == 1
| 39.895722
| 119
| 0.749145
|
11b95e0f9e7afe8543bf0c3e7be151865cf4b771
| 5,394
|
py
|
Python
|
tests/serve/mock/end-to-end/opbank/test_opbank.py
|
dfioravanti/hmt
|
df79404076ec7acea0cfb12b636d58e3ffc83bc5
|
[
"MIT"
] | 25
|
2020-05-14T13:25:42.000Z
|
2021-11-09T10:09:27.000Z
|
tests/serve/mock/end-to-end/opbank/test_opbank.py
|
dfioravanti/hmt
|
df79404076ec7acea0cfb12b636d58e3ffc83bc5
|
[
"MIT"
] | 19
|
2020-05-05T19:47:41.000Z
|
2021-02-05T17:06:53.000Z
|
tests/serve/mock/end-to-end/opbank/test_opbank.py
|
dfioravanti/hmt
|
df79404076ec7acea0cfb12b636d58e3ffc83bc5
|
[
"MIT"
] | 6
|
2020-05-16T10:02:48.000Z
|
2021-10-04T08:03:49.000Z
|
import json
import pytest
from tornado.httpclient import AsyncHTTPClient, HTTPRequest
from hmt.serve.mock.log import Log
from hmt.serve.mock.scope import Scope
from hmt.serve.mock.specs import load_specs
from hmt.serve.utils.routing import HeaderRouting
@pytest.fixture
def app(mocking_app):
return mocking_app(
"tests/serve/mock/end-to-end/opbank/callbacks",
load_specs("tests/serve/mock/end-to-end/opbank/schemas"),
HeaderRouting(),
Log(Scope()),
)
ACCOUNTS_HEADERS = {
"Host": "sandbox.apis.op-palvelut.fi",
"x-api-key": "ZoStul8nNuwq1SYCzSrLcO1wAj4Tyf7x",
"x-request-id": "12345",
"x-session-id": "12345",
"authorization": "Bearer 6c18c234b1b18b1d97c7043e2e41135c293d0da9",
"x-authorization": "6c18c234b1b18b1d97c7043e2e41135c293d0da9",
}
#
PAYMENTS_HEADERS = {
"Host": "sandbox.apis.op-palvelut.fi",
"x-api-key": "ZoStul8nNuwq1SYCzSrLcO1wAj4Tyf7x",
"x-request-id": "12345",
"x-session-id": "12345",
# 'authorization': "Bearer 6c18c234b1b18b1d97c7043e2e41135c293d0da9",
"x-authorization": "6c18c234b1b18b1d97c7043e2e41135c293d0da9",
}
"""
def get_accounts(http_client: AsyncHTTPClient, base_url: str):
req = HTTPRequest(base_url+'/accounts/v3/accounts', headers=ACCOUNTS_HEADERS)
ret = yield http_client.fetch(req)
return json.loads(ret.body)['accounts']
"""
"""
def init_payment(payer_iban, receiver_iban, amount, http_client, base_url):
body = {
"amount": amount,
"subject": "Client Test",
"currency": "EUR",
"payerIban": payer_iban,
"valueDate": "2020-01-27T22:59:34Z",
"receiverBic": "string",
"receiverIban": receiver_iban,
"receiverName": "string"
}
url = base_url + '/v1/payments/initiate'
req = HTTPRequest(url, method='POST', headers=PAYMENTS_HEADERS, body=json.dumps(body))
res = yield http_client.fetch(req)
return json.loads(res.body)
"""
"""
def confirm_payment(payment_id, http_client: AsyncHTTPClient, base_url: str):
body = {
'paymentId': payment_id
}
url = base_url + '/v1/payments/confirm'
req = HTTPRequest(url, headers=PAYMENTS_HEADERS, body=json.dumps(body))
response = yield http_client.fetch(req)
return json.loads(response)
"""
@pytest.mark.gen_test
def test_opbank(http_client: AsyncHTTPClient, base_url: str):
# eventually, we will want to test the line below
# currently, however, pytest.tornado only supports creating
# one fixture for a mock
# requests.delete("http://localhost:8888/admin/storage")
# payer_iban = 'FI8359986950002741'
# receiver_iban = 'FI4859986920215738'
payer_iban = "FI3959986920207073"
receiver_iban = "FI2350009421535899"
amount = 5
### get account
req = HTTPRequest(base_url + "/accounts/v3/accounts", headers=ACCOUNTS_HEADERS)
ret = yield http_client.fetch(req)
accounts = json.loads(ret.body)["accounts"]
print("Account list before payment: {}".format(accounts))
payer_account = next(
(account for account in accounts if account["identifier"] == payer_iban)
)
receiver_account = next(
(account for account in accounts if account["identifier"] == receiver_iban)
)
assert 2215.81 == payer_account["balance"]
assert 0 == receiver_account["balance"]
### init account
body = {
"amount": amount,
"subject": "Client Test",
"currency": "EUR",
"payerIban": payer_iban,
"valueDate": "2020-01-27T22:59:34Z",
"receiverBic": "string",
"receiverIban": receiver_iban,
"receiverName": "string",
}
url = base_url + "/v1/payments/initiate"
req = HTTPRequest(
url, method="POST", headers=PAYMENTS_HEADERS, body=json.dumps(body)
)
res = yield http_client.fetch(req)
payment = json.loads(res.body)
payment_id: str = payment["paymentId"]
print("Created payment {}".format(payment))
### get account
req = HTTPRequest(base_url + "/accounts/v3/accounts", headers=ACCOUNTS_HEADERS)
ret = yield http_client.fetch(req)
accounts = json.loads(ret.body)["accounts"]
print("Account list after payment initiated: {}".format(accounts))
payer_account = next(
(account for account in accounts if account["identifier"] == payer_iban)
)
receiver_account = next(
(account for account in accounts if account["identifier"] == receiver_iban)
)
assert 2215.81 == payer_account["balance"]
assert 0 == receiver_account["balance"]
### confirm payment
body = {"paymentId": payment_id}
url = base_url + "/v1/payments/confirm"
req = HTTPRequest(
url, method="POST", headers=PAYMENTS_HEADERS, body=json.dumps(body)
)
yield http_client.fetch(req)
### get accounts
req = HTTPRequest(base_url + "/accounts/v3/accounts", headers=ACCOUNTS_HEADERS)
ret = yield http_client.fetch(req)
accounts = json.loads(ret.body)["accounts"]
print("Account list after payment confirmed: {}".format(accounts))
payer_account = next(
(account for account in accounts if account["identifier"] == payer_iban)
)
receiver_account = next(
(account for account in accounts if account["identifier"] == receiver_iban)
)
assert 2210.81 == payer_account["balance"]
assert 5 == receiver_account["balance"]
| 32.493976
| 90
| 0.670931
|
9a3c8f8c9c0ddfb0b0abad46ef94eea555519355
| 854
|
py
|
Python
|
wrappers/QuASAR/quasar_split_by_chromosomes_qc.py
|
JSegueni/3DChromatin_ReplicateQC
|
02d028bc56d8850cbef3192a813eaff8b35d2a8f
|
[
"MIT"
] | 30
|
2017-09-29T00:38:45.000Z
|
2021-04-19T11:27:29.000Z
|
wrappers/QuASAR/quasar_split_by_chromosomes_qc.py
|
StayHungryStayFool/3DChromatin_ReplicateQC
|
942949bb4d4111715cee83605fa0a51ee1acd91a
|
[
"MIT"
] | 16
|
2017-09-29T22:30:03.000Z
|
2021-05-24T15:07:33.000Z
|
wrappers/QuASAR/quasar_split_by_chromosomes_qc.py
|
StayHungryStayFool/3DChromatin_ReplicateQC
|
942949bb4d4111715cee83605fa0a51ee1acd91a
|
[
"MIT"
] | 14
|
2017-09-24T15:11:53.000Z
|
2021-08-21T03:47:53.000Z
|
#!/usr/bin/env python
import sys
import glob
import argparse
import re
import os
def main():
scorefile = open( sys.argv[1], 'r')
samplename=sys.argv[2]
lines=scorefile.readlines()
header = None
for i in range(len(lines)):
if lines[i] == 'Quality Score Results\n':
header = i + 2
break
header_line = lines[header].rstrip('\n').split('\t')
data_line = lines[header + 1].rstrip('\n').split('\t')
d={}
for i in range(2, len(header_line)):
d[header_line[i]]=data_line[i]
for chromo, score in d.iteritems():
outfile=open(os.path.dirname(sys.argv[1])+'/'+'chr'+chromo+'.'+re.sub('.QuASAR-QC','',re.sub('.QuASAR-Rep.','',os.path.basename(sys.argv[1]))),'w')
outfile.write(samplename+'\t'+score+'\n')
outfile.close()
if __name__ == "__main__":
main()
| 28.466667
| 155
| 0.588993
|
63203c0b485c8afa2e0aa099ffc610eeb1e49662
| 1,783
|
py
|
Python
|
custom/ilsgateway/management/commands/test_report_data_generation.py
|
johan--/commcare-hq
|
86ee99c54f55ee94e4c8f2f6f30fc44e10e69ebd
|
[
"BSD-3-Clause"
] | null | null | null |
custom/ilsgateway/management/commands/test_report_data_generation.py
|
johan--/commcare-hq
|
86ee99c54f55ee94e4c8f2f6f30fc44e10e69ebd
|
[
"BSD-3-Clause"
] | 1
|
2022-03-12T01:03:25.000Z
|
2022-03-12T01:03:25.000Z
|
custom/ilsgateway/management/commands/test_report_data_generation.py
|
johan--/commcare-hq
|
86ee99c54f55ee94e4c8f2f6f30fc44e10e69ebd
|
[
"BSD-3-Clause"
] | null | null | null |
from datetime import datetime
from django.core.management import BaseCommand
from corehq.apps.locations.models import SQLLocation
from custom.ilsgateway.models import ILSGatewayConfig, ReportRun, SupplyPointStatus, DeliveryGroupReport, \
SupplyPointWarehouseRecord, OrganizationSummary, ProductAvailabilityData, Alert
from custom.ilsgateway.tasks import report_run
from custom.ilsgateway.tanzania.warehouse import updater
class Command(BaseCommand):
"""
Manually test the stock data migration.
"""
def handle(self, domain, *args, **options):
if len(args) == 1:
ilsgateway_id = args[0]
else:
ilsgateway_id = 1166 # defaults to bondenzi: http://ilsgateway.com/tz/facility/1166/
# monkey patch the default start date to cover less data
updater.default_start_date = lambda: datetime(2015, 1, 1)
config = ILSGatewayConfig.for_domain(domain)
assert config.enabled, 'ilsgateway sync must be configured for this domain'
locations = _get_locations_from_ilsgateway_id(domain, ilsgateway_id)
_clear_data(domain)
report_run(domain, locations, strict=False)
def _clear_data(domain):
ReportRun.objects.filter(domain=domain).delete()
SupplyPointStatus.objects.all().delete()
DeliveryGroupReport.objects.all().delete()
SupplyPointWarehouseRecord.objects.all().delete()
OrganizationSummary.objects.all().delete()
ProductAvailabilityData.objects.all().delete()
Alert.objects.all().delete()
def _get_locations_from_ilsgateway_id(domain, ilsgateway_id):
facility = SQLLocation.objects.get(domain=domain, external_id=ilsgateway_id)
return [facility.couch_location] + [facility.parent.couch_location] + [facility.parent.parent.couch_location]
| 40.522727
| 113
| 0.747616
|
d5b9a8c80d2128c28c8e4b0fcc537b5e32a968ff
| 287
|
py
|
Python
|
output/models/nist_data/atomic/g_month_day/schema_instance/nistschema_sv_iv_atomic_g_month_day_max_exclusive_5_xsd/__init__.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | 1
|
2021-08-14T17:59:21.000Z
|
2021-08-14T17:59:21.000Z
|
output/models/nist_data/atomic/g_month_day/schema_instance/nistschema_sv_iv_atomic_g_month_day_max_exclusive_5_xsd/__init__.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | 4
|
2020-02-12T21:30:44.000Z
|
2020-04-15T20:06:46.000Z
|
output/models/nist_data/atomic/g_month_day/schema_instance/nistschema_sv_iv_atomic_g_month_day_max_exclusive_5_xsd/__init__.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | null | null | null |
from output.models.nist_data.atomic.g_month_day.schema_instance.nistschema_sv_iv_atomic_g_month_day_max_exclusive_5_xsd.nistschema_sv_iv_atomic_g_month_day_max_exclusive_5 import NistschemaSvIvAtomicGMonthDayMaxExclusive5
__all__ = [
"NistschemaSvIvAtomicGMonthDayMaxExclusive5",
]
| 47.833333
| 221
| 0.905923
|
d8d5ddae7aa8b40b8a5647b82b295acba5a71bfb
| 1,838
|
py
|
Python
|
authors/apps/notify/views.py
|
dev-jey/ah-the-phoenix
|
985d216210c0c81ec06e223c6952b0c69fabdcfa
|
[
"BSD-3-Clause"
] | 1
|
2019-04-04T23:49:42.000Z
|
2019-04-04T23:49:42.000Z
|
authors/apps/notify/views.py
|
dev-jey/ah-the-phoenix
|
985d216210c0c81ec06e223c6952b0c69fabdcfa
|
[
"BSD-3-Clause"
] | 21
|
2019-01-29T17:41:36.000Z
|
2022-03-11T23:43:20.000Z
|
authors/apps/notify/views.py
|
dev-jey/ah-the-phoenix
|
985d216210c0c81ec06e223c6952b0c69fabdcfa
|
[
"BSD-3-Clause"
] | 1
|
2019-11-23T18:27:55.000Z
|
2019-11-23T18:27:55.000Z
|
from rest_framework.response import Response
from rest_framework import status
from rest_framework.views import APIView
from rest_framework.permissions import IsAuthenticated
from django.http import Http404
from .serializers import NotificationSerializer
# from .signal import *
from .renderers import NotificationJSONRenderer
class NotificationViewList(APIView):
'''
get all notifications where the receiver was the current user
'''
permission_classes = (IsAuthenticated,)
renderer_classes = (NotificationJSONRenderer,)
serializer_class = NotificationSerializer
def get(self, request):
queryset = self.request.user.notifications.all().order_by('-timestamp')
serializer = self.serializer_class(
queryset,
many=True,
context={'request': request}
)
serializer.data
return Response(serializer.data, status=status.HTTP_200_OK)
class SingleNotification(APIView):
"get single notification"
permission_classes = (IsAuthenticated,)
renderer_classes = (NotificationJSONRenderer,)
serializer_class = NotificationSerializer
def get(self, request, id):
try:
notification = self.request.user.notifications.all().filter(
id=id).first()
except Exception:
raise Http404('Error when retrieving notification')
if not notification:
return Response({'error': 'Notification not found'},
status.HTTP_404_NOT_FOUND)
else:
if notification.unread:
notification.mark_as_read()
serializer = self.serializer_class(
notification,
many=False,
context={'request': request}
)
return Response(serializer.data, status=status.HTTP_200_OK)
| 31.689655
| 79
| 0.67247
|
058d19c2d13efa66a8ded640d89c1589d6ede936
| 515
|
py
|
Python
|
Leetcoding-Actions/Explore-Monthly-Challenges/2020-11/05-Minimum-Cost-to-Move-Chips-to-the-Same-Position.py
|
shoaibur/SWE
|
1e114a2750f2df5d6c50b48c8e439224894d65da
|
[
"MIT"
] | 1
|
2020-11-14T18:28:13.000Z
|
2020-11-14T18:28:13.000Z
|
Leetcoding-Actions/Explore-Monthly-Challenges/2020-11/05-Minimum-Cost-to-Move-Chips-to-the-Same-Position.py
|
shoaibur/SWE
|
1e114a2750f2df5d6c50b48c8e439224894d65da
|
[
"MIT"
] | null | null | null |
Leetcoding-Actions/Explore-Monthly-Challenges/2020-11/05-Minimum-Cost-to-Move-Chips-to-the-Same-Position.py
|
shoaibur/SWE
|
1e114a2750f2df5d6c50b48c8e439224894d65da
|
[
"MIT"
] | null | null | null |
class Solution:
def minCostToMoveChips(self, position: List[int]) -> int:
'''
Idea: Move all even positioned chips to 0, i.e., count them and
all odd positioned chips to 1, i.e., count them; take min of two counts
T: O(n) and S: O(1)
'''
zeroCost, oneCost = 0, 0
for i in range(len(position)):
if position[i] % 2:
oneCost += 1
else:
zeroCost += 1
return min(zeroCost, oneCost)
| 32.1875
| 79
| 0.506796
|
aee0fe124065c9ab1b63422b93948719c9f4f7e0
| 974
|
py
|
Python
|
utils/layer_utils.py
|
alexchungio/CGAN-MNIST
|
9da907f7902cc8dd470ac63f08a00b6395b23fc3
|
[
"Apache-2.0"
] | null | null | null |
utils/layer_utils.py
|
alexchungio/CGAN-MNIST
|
9da907f7902cc8dd470ac63f08a00b6395b23fc3
|
[
"Apache-2.0"
] | null | null | null |
utils/layer_utils.py
|
alexchungio/CGAN-MNIST
|
9da907f7902cc8dd470ac63f08a00b6395b23fc3
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#------------------------------------------------------
# @ File : layer_utils.py
# @ Description:
# @ Author : Alex Chung
# @ Contact : yonganzhong@outlook.com
# @ License : Copyright (c) 2017-2018
# @ Time : 2020/10/2 下午6:56
# @ Software : PyCharm
#-------------------------------------------------------
import tensorflow as tf
from tensorflow.python.ops import array_ops
class ConvConcat(tf.keras.layers.Layer):
def __init__(self, axis=-1, **kwargs):
super(ConvConcat, self).__init__(**kwargs)
self.axis = axis
def build(self, axis):
pass
def call(self, inputs):
x_shape = inputs[0].get_shape()
y_shape = inputs[1].get_shape()
z = tf.keras.layers.concatenate([inputs[0], inputs[1] * tf.ones(x_shape[:3] + [y_shape[3]])],
axis=self.axis)
return z
if __name__ == "__main__":
pass
| 27.828571
| 101
| 0.51232
|
a9e823b296dfa2dda74329da067651a420982ccf
| 4,666
|
py
|
Python
|
auto_naming_screenshot.py
|
akame-bottle/auto-naming-screenshot
|
c507d0e331e61e61ce6742a7fd1c96b2014675c8
|
[
"MIT"
] | null | null | null |
auto_naming_screenshot.py
|
akame-bottle/auto-naming-screenshot
|
c507d0e331e61e61ce6742a7fd1c96b2014675c8
|
[
"MIT"
] | 2
|
2021-02-28T05:59:00.000Z
|
2021-02-28T06:06:06.000Z
|
auto_naming_screenshot.py
|
akame-bottle/auto-naming-screenshot
|
c507d0e331e61e61ce6742a7fd1c96b2014675c8
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2021 赤目ボトル(Akame Bottle).
#
# This software is released under the MIT License.
# http://opensource.org/licenses/mit-license.php
import bpy
import os
import glob
import re
bl_info = {
"name" = "Auto Naming Screenshot",
"author" = "Akame Bottle",
"version" = (0,1,0),
"blender" = (2,79,0),
"location" = "Properties -> Render -> Auto Naming Screenshot",
"description" = "スクリーンショットを指定したディレクトリに連番ファイル名で保存します",
"warning" = "",
"wiki_url" = "https://github.com/akame-bottle/auto-naming-screenshot/wiki",
"tracker_url",
"category" = "Render"
}
class ANSS_PG_Properties(bpy.types.PropertyGroup):
def path_check(self, context):
abs_path = bpy.path.abspath(self.dirpath)
path_ok = os.path.exists(abs_path)
dirpath = bpy.props.StringProperty(subtype="DIR_PATH", default="//", description="デフォルトはDesktop", update=path_check)
filename = bpy.props.StringProperty(subtype="FILE_NAME", default="#.png", description="#を連番に変換")
full_screen = bpy.props.BoolProperty(default = False)
path_ok = bpy.props.BoolProperty(default = True)
class ANSS_OT_Screenshot(bpy.types.Operator):
bl_idname = "scene.auto_naming_screenshot"
bl_label = "Save Screenshot"
def execute(self, context):
props = context.scene.ANSS_props
if not props.path_ok:
return {'CANCELLED'}
dir = bpy.path.abspath(props.dirpath)
# file number check
num = r"\d+"
sharp = r"#+"
filename = props.filename + (".png" if os.path.splitext(props.filename)[1] == '' else "")
target_pattern = re.sub(sharp, r"*", filename)
file_number = 0
def get_num(name):
result = re.search(num, name)
return int(result.group()) if result else 0
if glob.glob(os.path.join(dir, target_pattern)):
result = max([f for f in glob.glob(os.path.join(dir, target_pattern))], key=get_num)
fn = re.search(num, result)
if fn:
file_number = int(fn.group())
# merge path
replaced_filename = re.sub(sharp, str(int(file_number) + 1), filename)
comp_path = os.path.join(props.dirpath, replaced_filename)
bpy.ops.screen.screenshot(filepath=comp_path, full=props.full_screen)
self.report({'INFO'}, "Save Screenshot! " + comp_path)
return {'FINISHED'}
class ANSS_PT_Properties(bpy.types.Panel):
"""Creates a Panel in the scene context of the properties editor"""
bl_label = "Auto Naming Screenshot"
bl_idname = "ANSS_PT_auto_naming_screenshot"
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = "render"
def draw(self, context):
layout = self.layout
scene = context.scene
# Auto Naming Screenshot
row = layout.row()
row.operator("scene.auto_naming_screenshot")
props = scene.ANSS_props
row = layout.row()
row.prop(props, "dirpath", text="")
if not props.path_ok:
layout.label(text="Invalid Path", icon="ERROR")
row = layout.row()
row.prop(props, "filename", text="File Name")
row = layout.row()
row.prop(props, "full_screen", text="Full Screen")
classes = [
ANSS_PG_Properties,
ANSS_OT_Screenshot,
ANSS_PT_Properties
]
addon_keymaps = []
def register_shortcut():
wm = bpy.context.window_manager
kc = wm.keyconfigs.addon
if kc:
km = kc.keymaps.new(name="Auto Naming Screenshot", space_type="PROPERTIES")
# ショートカットキーの登録
kmi = km.keymap_items.new(
idname=ANSS_OT_Screenshot.bl_idname,
type="F3",
value="PRESS",
shift=False,
ctrl=True,
alt=True
)
# ショートカットキー一覧に登録
addon_keymaps.append((km, kmi))
def unregister_shortcut():
for km, kmi in addon_keymaps:
# ショートカットキーの登録解除
km.keymap_items.remove(kmi)
# ショートカットキー一覧をクリア
addon_keymaps.clear()
def register():
for cls in classes:
bpy.utils.register_class(cls)
bpy.types.Scene.ANSS_props = bpy.props.PointerProperty(type=ANSS_PG_Properties)
register_shortcut()
def unregister():
unregister_shortcut()
for cls in sclasses:
bpy.utils.unregister_class(cls)
del bpy.types.Scene.ANSS_props
# if __name__ == "__main__":
# register()
| 31.958904
| 125
| 0.596871
|
a9859f3841031de125e3c671ae5636f440ba4fa2
| 1,711
|
py
|
Python
|
pythf/error.py
|
alipniczkij/pythf
|
732c481491674d8fa17676262d8eb32c69e4465b
|
[
"MIT"
] | 5
|
2021-04-06T06:55:06.000Z
|
2022-02-17T14:22:37.000Z
|
pythf/error.py
|
alipniczkij/pythf
|
732c481491674d8fa17676262d8eb32c69e4465b
|
[
"MIT"
] | null | null | null |
pythf/error.py
|
alipniczkij/pythf
|
732c481491674d8fa17676262d8eb32c69e4465b
|
[
"MIT"
] | 5
|
2021-02-11T16:36:02.000Z
|
2022-01-11T14:49:02.000Z
|
class ApiError(Exception):
"""Base class for all Group-IB THF API errors; also raised for generic internal errors."""
def __init__(self, message=None, original_exception=None):
"""
Initialize the ApiError.
Args:
message: The actual error message.
original_exception: The exception that caused this one to be raised.
"""
self.original_exception = original_exception
self.message = str(message)
def __str__(self):
"""
Convert the exception to a string.
Returns:
str: String equivalent of the exception.
"""
return self.message
class ObjectNotFoundError(ApiError):
pass
class ClientError(ApiError):
TITLE = "Client Error"
def __init__(self, uri, status_code, message, original_exception=None):
self.uri = uri
self.status_code = status_code
super().__init__(message, original_exception)
def __str__(self):
return "{}: {} answered with {}. Message: {}".format(self.TITLE,
self.uri,
self.status_code,
self.message)
class ServerError(ClientError):
TITLE = "Server Error"
class AuthenticationError(ClientError):
TITLE = "Authentication Error"
class ServerIsBeingUpdatedError(ClientError):
TITLE = "Server is being updated"
class BadRequestError(ClientError):
TITLE = "Bad Request"
class BadResponseError(ClientError):
TITLE = "Bad Response"
class ConnectionError(ClientError):
TITLE = "Connection Error"
| 26.734375
| 94
| 0.593805
|
f418852098c9c143d037b962d3437b77f4a60dc4
| 6,432
|
py
|
Python
|
git_split_commit/command.py
|
mik3y/git-split-commit
|
04f82dee1bd1b303e6626331766bc54ae25787be
|
[
"MIT"
] | 4
|
2020-05-22T19:34:36.000Z
|
2021-10-05T03:59:18.000Z
|
git_split_commit/command.py
|
mik3y/git-split-commit
|
04f82dee1bd1b303e6626331766bc54ae25787be
|
[
"MIT"
] | 2
|
2020-05-22T19:06:44.000Z
|
2020-12-22T08:29:30.000Z
|
git_split_commit/command.py
|
mik3y/git-split-commit
|
04f82dee1bd1b303e6626331766bc54ae25787be
|
[
"MIT"
] | null | null | null |
import os
import click
import git
import coloredlogs
import logging
import tempfile
from simple_term_menu import TerminalMenu
logger = logging.getLogger("main")
def print_error(msg):
click.echo(click.style(msg, fg="red"))
def setup_logging(debug):
if debug:
coloredlogs.install(level="DEBUG")
else:
coloredlogs.install(level="INFO")
def build_rebase_script(repo, sha):
lines = [f"edit {sha}"]
for commit in repo.iter_commits(f"{sha}..HEAD", reverse=True):
lines.append(f"pick {str(commit)}")
return "\n".join(lines)
def show_menu(options, title):
selected_options = set()
while True:
all_options = []
for idx, opt in enumerate(options):
if idx in selected_options:
prefix = "[*]"
else:
prefix = "[ ]"
all_options.append(f'{prefix} {opt["option_name"]}')
all_options.append("❌ Cancel")
all_options.append("✅ Done")
menu = TerminalMenu(all_options, title=title)
idx = menu.show()
if idx < len(options):
if idx in selected_options:
selected_options.remove(idx)
else:
selected_options.add(idx)
elif idx == len(options):
return None
else:
return [options[idx]["file_diff"] for idx in selected_options]
def pick_the_split(repo, base_sha):
"""Interactively split the operations in `base_sha` into two lists of operations.
This method is non-destructive.
"""
previous_commit = repo.commit(f"{base_sha}~1")
all_operations = previous_commit.diff(base_sha)
options = []
for d in all_operations:
option_name = f"{d.change_type} {d.a_path}"
option = {
"option_name": option_name,
"file_diff": d,
}
options.append(option)
click.echo("")
first_diffs = show_menu(options, title="Select operations to keep in first commit")
if first_diffs is None:
raise click.Abort()
elif not first_diffs:
print_error(f"Error: Must select at least one operation for first commit.")
raise click.Abort()
elif len(first_diffs) == len(options):
print_error(f"Error: Must leave at least one operation for second commit.")
raise click.Abort()
second_diffs = set(all_operations).symmetric_difference(first_diffs)
return first_diffs, second_diffs
def review_split(first_diffs, second_diffs):
"""Prompts the user to review and confirm a split.
This method is non-destructive.
"""
click.echo("Ready to split! Please review:")
click.echo("")
for name, diffs in (("First", first_diffs), ("Second", second_diffs)):
click.echo(f"{name} commit:")
click.echo("")
for d in diffs:
click.echo(f" {d.change_type} {d.a_path}")
click.echo()
proceed = click.confirm("Proceed?")
if not proceed:
raise click.Abort()
def execute_split(
output_branch_name, repo, source_commit, first_diffs, second_diffs, second_commit_message
):
"""Executes the split. This method is destructive!"""
base_sha = str(source_commit)
logging.debug(f'Creating branch "{output_branch_name}" at HEAD')
branch_head = repo.create_head(output_branch_name)
logging.debug(f"Switching into branch")
repo.head.reference = branch_head
assert not repo.head.is_detached
repo.head.reset(index=True, working_tree=True)
script = build_rebase_script(repo, base_sha)
logger.debug(f"rebase script:\n{script}")
temp_file = tempfile.NamedTemporaryFile(delete=False)
logger.debug(f"Writing rebase script to {temp_file.name}")
temp_file.write(script.encode())
temp_file.close()
first_commit_message_file = tempfile.NamedTemporaryFile(delete=False)
logger.debug(f"Writing commit message to {first_commit_message_file.name}")
first_commit_message_file.write(str(source_commit.message).encode())
first_commit_message_file.close()
# Faking an interactive rebase requires overriding `GIT_SEQUENCE_EDITOR`,
# so we do that here.
#
# NOTE(mikey): We're creating the rebase script on our own, and
# feeding it directly to `git rebase -i`. We could alternatively read
# the default rebase script, and edit the first line. This might be
# safer, our implementation of `build_rebase_script()` could be buggy.
custom_env = {
"GIT_SEQUENCE_EDITOR": f"cat '{temp_file.name}' >",
}
repo_git = repo.git
with repo_git.custom_environment(**custom_env):
repo_git.rebase("-i", f"{base_sha}^")
repo_git.reset("HEAD^")
for d in first_diffs:
stage_diff(repo_git, d)
repo_git.commit("-F", first_commit_message_file.name)
for d in second_diffs:
stage_diff(repo_git, d)
repo_git.commit("-m", second_commit_message)
repo_git.rebase("--continue")
def stage_diff(repo_git, diff):
"""Given a diff object, run `git add` or `git rm` for it within `repo_git`."""
if diff.change_type == "A":
repo_git.add(diff.a_path)
elif diff.change_type == "D":
repo_git.rm(diff.a_path)
else:
repo_git.add("-u", diff.a_path)
@click.command()
@click.argument("sha")
@click.option("--debug/--no-debug", default=False)
@click.option("--output_branch_name", default="split-commit-tmp", prompt=True)
def split(sha, debug, output_branch_name):
setup_logging(debug)
repo = git.Repo(search_parent_directories=True)
logger.debug(f"Initialized repo: {repo.working_tree_dir}")
try:
commit = repo.commit(sha)
except git.exc.BadName:
print_error(f"Error: Commit {sha} not found")
raise click.Abort()
try:
existing_branch = repo.commit(output_branch_name)
except git.exc.BadName:
pass
else:
print_error(f"Error: The branch {output_branch_name} already exists")
raise click.Abort()
base_sha = str(commit)
first_diffs, second_diffs = pick_the_split(repo, base_sha)
second_commit_message = click.prompt(
"Message for second commit?", default="Split from previous commit"
)
review_split(first_diffs, second_diffs)
execute_split(
output_branch_name, repo, commit, first_diffs, second_diffs, second_commit_message
)
click.echo("🍌 Split complete! Enjoy your day.")
| 31.841584
| 93
| 0.657338
|
14b692e8dd221953ab2053b9b845c92c1f355352
| 1,258
|
py
|
Python
|
mnm/bot/stats.py
|
agateblue/mnm
|
adf45c5f86611851ef3f51b60c30150095dba638
|
[
"MIT"
] | 2
|
2021-02-09T06:49:24.000Z
|
2021-07-19T00:44:39.000Z
|
mnm/bot/stats.py
|
agateblue/mnm
|
adf45c5f86611851ef3f51b60c30150095dba638
|
[
"MIT"
] | null | null | null |
mnm/bot/stats.py
|
agateblue/mnm
|
adf45c5f86611851ef3f51b60c30150095dba638
|
[
"MIT"
] | 1
|
2021-06-07T10:39:07.000Z
|
2021-06-07T10:39:07.000Z
|
import collections
from mnm.instances.influxdb_client import get_client
class Stat(object):
def get(self, **kwargs):
raise NotImplementedError
def get_results(self, query):
return get_client().query(query)
class InstanceFieldStat(Stat):
query = """
SELECT sum("{field}")
FROM instances_hourly
WHERE time > now() - 24h
GROUP BY time(1h)"""
def get(self):
query = self.query.format(field=self.field)
results = list(self.get_results(query)['instances_hourly'])
return {
'total': results[-1]['sum'],
'1h': results[-1]['sum'] - results[-2]['sum'],
'24h': results[-1]['sum'] - results[1]['sum'],
}
class UsersStat(InstanceFieldStat):
code = 'users'
field = "users"
description = 'Users-related metrics'
class StatusesStat(InstanceFieldStat):
code = 'statuses'
field = "statuses"
description = 'Statuses-related metrics'
class InstancesStat(InstanceFieldStat):
code = 'instances'
field = "instances"
description = 'Instances-related metrics'
stats = collections.OrderedDict()
stats['users'] = UsersStat()
# stats['instances'] = InstancesStat()
stats['statuses'] = StatusesStat()
| 22.464286
| 67
| 0.627186
|
666e1590a35424faa8e8152da8e2a9efca17422b
| 1,515
|
py
|
Python
|
mysite/verde/management/commands/plotly.py
|
induts/Capstone
|
d539a2de929cd05ff913e8c8731fd22894b38dc6
|
[
"Apache-2.0"
] | null | null | null |
mysite/verde/management/commands/plotly.py
|
induts/Capstone
|
d539a2de929cd05ff913e8c8731fd22894b38dc6
|
[
"Apache-2.0"
] | null | null | null |
mysite/verde/management/commands/plotly.py
|
induts/Capstone
|
d539a2de929cd05ff913e8c8731fd22894b38dc6
|
[
"Apache-2.0"
] | null | null | null |
import plotly.plotly as py
import plotly.graph_objs as go
import plotly
import pandas as pd
plotly.tools.set_credentials_file(username='cheryllewman', api_key='FHe2FrQ6UQXs6hvv8StD')
# Create random data with numpy
import numpy as np
df = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/master/2011_us_ag_exports.csv')
for col in df.columns:
df[col] = df[col].astype(str)
scl = [[0.0, 'rgb(242,240,247)'], [0.2, 'rgb(218,218,235)'], [0.4, 'rgb(188,189,220)'], \
[0.6, 'rgb(158,154,200)'], [0.8, 'rgb(117,107,177)'], [1.0, 'rgb(84,39,143)']]
df['text'] = df['state'] + '<br>' + \
'Beef ' + df['beef'] + ' Dairy ' + df['dairy'] + '<br>' + \
'Fruits ' + df['total fruits'] + ' Veggies ' + df['total veggies'] + '<br>' + \
'Wheat ' + df['wheat'] + ' Corn ' + df['corn']
data = [dict(
type='choropleth',
colorscale=scl,
autocolorscale=False,
locations=df['code'],
z=df['total exports'].astype(float),
locationmode='USA-states',
text=df['text'],
marker=dict(
line=dict(
color='rgb(255,255,255)',
width=2
)),
colorbar=dict(
title="Millions USD")
)]
layout = dict(
title='2011 US Agriculture Exports by State<br>(Hover for breakdown)',
geo=dict(
scope='usa',
projection=dict(type='albers usa'),
showlakes=True,
lakecolor='rgb(255, 255, 255)'),
)
fig = dict(data=data, layout=layout)
py.iplot(fig, filename='d3-cloropleth-map')
| 30.3
| 99
| 0.590759
|
da94410293435762256cadbe933ad7be07049699
| 1,609
|
py
|
Python
|
changes/api/plan_details.py
|
vault-the/changes
|
37e23c3141b75e4785cf398d015e3dbca41bdd56
|
[
"Apache-2.0"
] | 443
|
2015-01-03T16:28:39.000Z
|
2021-04-26T16:39:46.000Z
|
changes/api/plan_details.py
|
vault-the/changes
|
37e23c3141b75e4785cf398d015e3dbca41bdd56
|
[
"Apache-2.0"
] | 12
|
2015-07-30T19:07:16.000Z
|
2016-11-07T23:11:21.000Z
|
changes/api/plan_details.py
|
vault-the/changes
|
37e23c3141b75e4785cf398d015e3dbca41bdd56
|
[
"Apache-2.0"
] | 47
|
2015-01-09T10:04:00.000Z
|
2020-11-18T17:58:19.000Z
|
from __future__ import absolute_import, division, unicode_literals
from flask.ext.restful import reqparse
from changes.api.auth import get_project_slug_from_plan_id, requires_project_admin
from changes.api.base import APIView
from changes.config import db
from changes.models.plan import Plan, PlanStatus
import uuid
STATUS_CHOICES = ('active', 'inactive')
class PlanDetailsAPIView(APIView):
post_parser = reqparse.RequestParser()
post_parser.add_argument('name', type=unicode)
post_parser.add_argument('status', choices=STATUS_CHOICES)
post_parser.add_argument('snapshot_plan_id', type=unicode)
def get(self, plan_id):
plan = Plan.query.get(plan_id)
if plan is None:
return '', 404
context = self.serialize(plan)
context['steps'] = list(plan.steps)
return self.respond(context)
@requires_project_admin(get_project_slug_from_plan_id)
def post(self, plan_id):
plan = Plan.query.get(plan_id)
if plan is None:
return '', 404
args = self.post_parser.parse_args()
if args.name:
plan.label = args.name
if args.status:
plan.status = PlanStatus[args.status]
if args.snapshot_plan_id:
# Use snapshot from another plan to allow sharing snapshots.
#
# TODO(jukka): Verify that the plans belong to the same project.
# We can't share plans between projects.
plan.snapshot_plan_id = uuid.UUID(hex=args.snapshot_plan_id)
db.session.commit()
return self.respond(plan)
| 29.796296
| 82
| 0.674332
|
4a0684c27499199b41e43043a1b220001105e346
| 7,180
|
py
|
Python
|
tests/test_apiv2_district_controller.py
|
tervay/the-blue-alliance
|
e14c15cb04b455f90a2fcfdf4c1cdbf8454e17f8
|
[
"MIT"
] | 266
|
2015-01-04T00:10:48.000Z
|
2022-03-28T18:42:05.000Z
|
tests/test_apiv2_district_controller.py
|
gregmarra/the-blue-alliance
|
5bedaf5c80b4623984760d3da3289640639112f9
|
[
"MIT"
] | 2,673
|
2015-01-01T20:14:33.000Z
|
2022-03-31T18:17:16.000Z
|
tests/test_apiv2_district_controller.py
|
gregmarra/the-blue-alliance
|
5bedaf5c80b4623984760d3da3289640639112f9
|
[
"MIT"
] | 230
|
2015-01-04T00:10:48.000Z
|
2022-03-26T18:12:04.000Z
|
import unittest2
import webtest
import json
import webapp2
from datetime import datetime
from google.appengine.ext import ndb
from google.appengine.ext import testbed
from consts.event_type import EventType
from controllers.api.api_district_controller import ApiDistrictListController, ApiDistrictEventsController
from models.district import District
from models.event import Event
from models.event_details import EventDetails
class TestListDistrictsController(unittest2.TestCase):
def setUp(self):
app = webapp2.WSGIApplication([webapp2.Route(r'/<year:>', ApiDistrictListController, methods=['GET'])], debug=True)
self.testapp = webtest.TestApp(app)
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_datastore_v3_stub()
self.testbed.init_urlfetch_stub()
self.testbed.init_memcache_stub()
ndb.get_context().clear_cache() # Prevent data from leaking between tests
self.testbed.init_taskqueue_stub(root_path=".")
self.district = District(
id='2010ne',
year=2010,
abbreviation='ne',
display_name='New England',
)
self.district.put()
self.event = Event(
id="2010sc",
name="Palmetto Regional",
event_type_enum=EventType.DISTRICT_CMP,
district_key=ndb.Key(District, '2010ne'),
short_name="Palmetto",
event_short="sc",
year=2010,
end_date=datetime(2010, 03, 27),
official=True,
city="Clemson",
state_prov="SC",
country="USA",
venue="Long Beach Arena",
venue_address="Long Beach Arena\r\n300 East Ocean Blvd\r\nLong Beach, CA 90802\r\nUSA",
start_date=datetime(2010, 03, 24),
webcast_json="[{\"type\": \"twitch\", \"channel\": \"frcgamesense\"}]",
website="http://www.firstsv.org"
)
self.event.put()
self.event_details = EventDetails(
id=self.event.key.id(),
alliance_selections=[
{"declines": [], "picks": ["frc971", "frc254", "frc1662"]},
{"declines": [], "picks": ["frc1678", "frc368", "frc4171"]},
{"declines": [], "picks": ["frc2035", "frc192", "frc4990"]},
{"declines": [], "picks": ["frc1323", "frc846", "frc2135"]},
{"declines": [], "picks": ["frc2144", "frc1388", "frc668"]},
{"declines": [], "picks": ["frc1280", "frc604", "frc100"]},
{"declines": [], "picks": ["frc114", "frc852", "frc841"]},
{"declines": [], "picks": ["frc2473", "frc3256", "frc1868"]}
]
)
self.event_details.put()
def tearDown(self):
self.testbed.deactivate()
def assertDistrictKeys(self, district):
self.assertEqual(district["key"], self.district.abbreviation)
self.assertEqual(district["name"], self.district.display_name)
def test_district_api(self):
response = self.testapp.get('/{}'.format(self.event.year), headers={"X-TBA-App-Id": "tba-tests:disstrict-controller-test:v01"})
districts = json.loads(response.body)
self.assertDistrictKeys(districts[0])
class TestListDistrictEventsController(unittest2.TestCase):
def setUp(self):
app = webapp2.WSGIApplication([webapp2.Route(r'/<district_abbrev:>/<year:>', ApiDistrictEventsController, methods=['GET'])], debug=True)
self.testapp = webtest.TestApp(app)
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_datastore_v3_stub()
self.testbed.init_urlfetch_stub()
self.testbed.init_memcache_stub()
ndb.get_context().clear_cache() # Prevent data from leaking between tests
self.testbed.init_taskqueue_stub(root_path=".")
self.district = District(
id='2010ne',
year=2010,
abbreviation='ne',
display_name='New England',
)
self.district.put()
self.event = Event(
id="2010sc",
name="Palmetto Regional",
event_type_enum=EventType.DISTRICT_CMP,
district_key=ndb.Key(District, '2010ne'),
short_name="Palmetto",
event_short="sc",
year=2010,
end_date=datetime(2010, 03, 27),
official=True,
city="Clemson",
state_prov="SC",
country="USA",
venue="Long Beach Arena",
venue_address="Long Beach Arena\r\n300 East Ocean Blvd\r\nLong Beach, CA 90802\r\nUSA",
start_date=datetime(2010, 03, 24),
webcast_json="[{\"type\": \"twitch\", \"channel\": \"frcgamesense\"}]",
website="http://www.firstsv.org"
)
self.event.put()
self.event_details = EventDetails(
id=self.event.key.id(),
alliance_selections=[
{"declines": [], "picks": ["frc971", "frc254", "frc1662"]},
{"declines": [], "picks": ["frc1678", "frc368", "frc4171"]},
{"declines": [], "picks": ["frc2035", "frc192", "frc4990"]},
{"declines": [], "picks": ["frc1323", "frc846", "frc2135"]},
{"declines": [], "picks": ["frc2144", "frc1388", "frc668"]},
{"declines": [], "picks": ["frc1280", "frc604", "frc100"]},
{"declines": [], "picks": ["frc114", "frc852", "frc841"]},
{"declines": [], "picks": ["frc2473", "frc3256", "frc1868"]}
]
)
self.event_details.put()
def tearDown(self):
self.testbed.deactivate()
def assertDistrictEvent(self, event):
self.assertEqual(event["key"], self.event.key_name)
self.assertEqual(event["name"], self.event.name)
self.assertEqual(event["short_name"], self.event.short_name)
self.assertEqual(event["official"], self.event.official)
self.assertEqual(event["event_type_string"], self.event.event_type_str)
self.assertEqual(event["event_type"], self.event.event_type_enum)
self.assertEqual(event["event_district_string"], self.event.event_district_str)
self.assertEqual(event["event_district"], self.event.event_district_enum)
self.assertEqual(event["start_date"], self.event.start_date.date().isoformat())
self.assertEqual(event["end_date"], self.event.end_date.date().isoformat())
self.assertEqual(event["location"], self.event.location)
self.assertEqual(event["venue_address"], self.event.venue_address.replace('\r\n', '\n'))
self.assertEqual(event["webcast"], json.loads(self.event.webcast_json))
self.assertEqual(event["alliances"], self.event.alliance_selections)
self.assertEqual(event["website"], self.event.website)
def test_event_api(self):
response = self.testapp.get("/{}/2010".format(self.district.abbreviation), headers={"X-TBA-App-Id": "tba-tests:disstrict-controller-test:v01"})
events = json.loads(response.body)
self.assertDistrictEvent(events[0])
| 41.264368
| 151
| 0.59805
|
385102019f5c4e52ed208d6d60720471d7db9ae5
| 9,831
|
py
|
Python
|
PyFuscation.py
|
tdefise/PyFuscation
|
1e73417c0cdfcada1ee6080785d399e9f8dfa586
|
[
"BSD-3-Clause"
] | null | null | null |
PyFuscation.py
|
tdefise/PyFuscation
|
1e73417c0cdfcada1ee6080785d399e9f8dfa586
|
[
"BSD-3-Clause"
] | 12
|
2021-02-10T20:53:08.000Z
|
2022-03-28T04:56:06.000Z
|
PyFuscation.py
|
tdefise/PyFuscation
|
1e73417c0cdfcada1ee6080785d399e9f8dfa586
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
"""
PyFuscation.py
This python3 script obfuscates powershell function, variable and parameters in an attempt to bypass AV blacklists
"""
import ast
import configparser
import os
import random
import re
import shlex
import shutil
import string
import subprocess
import sys
import time
from argparse import ArgumentParser
import banner
def printR(out):
print("\033[91m{}\033[00m".format("[!] " + out))
def printG(out):
print("\033[92m{}\033[00m".format("[*] " + out))
def printY(out):
print("\033[93m{}\033[00m".format("[+] " + out))
def printP(out):
print("\033[95m{}\033[00m".format("[-] " + out))
def realTimeMuxER(command):
# command is not controllable by an external resource
p = subprocess.Popen(
shlex.split(command), # nosemgrep: dangerous-subprocess-use
stdout=subprocess.PIPE,
)
while True:
output = p.stdout.readline().decode()
if output == "" and p.poll() is not None:
break
if output:
print(output.strip())
def removeJunk(oF):
# general cleanup
cmd = "sed -i -e '/<#/,/#>/c\\\\' " + oF
realTimeMuxER(cmd)
cmd = "sed -i -e 's/^[[:space:]]*#.*$//g' " + oF
realTimeMuxER(cmd)
cmd = "sed -i '/^$/d' " + oF
realTimeMuxER(cmd)
def useSED(DICT, oF):
for var in DICT:
new = str(DICT.get(var))
cmd = "sed -i -e 's/" + var + "\\b" + "/" + new + "/g' " + oF
realTimeMuxER(cmd)
def THEreplacER(DICT, iF, oF):
iFHandle = open(iF, "r")
ofHandle = open(oF, "w")
regex = r"(\$\w{3,})"
lower_DICT = list(map(lambda x: x.lower(), DICT))
# For var replace with Dictionary value
for line in iFHandle:
v = re.findall(regex, line)
if not v:
ofHandle.write(line + "\n")
ofHandle.flush()
else:
for var in v:
if var.lower() in lower_DICT:
new = str(DICT.get(var))
ofHandle.write(line.replace(var, new) + "\n")
ofHandle.flush()
else:
ofHandle.write(line + "\n")
ofHandle.flush()
iFHandle.close()
ofHandle.close()
def findCustomParams(iFile, oFile, VARs):
PARAMs = {}
READ = False
start = 0
end = 0
regex = r"([\$-]\w{4,})"
ofHandle = open(oFile, "w")
with open(iFile, "r") as f:
for line in f:
line = line.strip()
if re.search(r"\bparam\b", line, re.I):
# Ok we are at the begining of a custum parameter
READ = True
# The open paren is on another line so move until we find it
start = start + line.count("(")
if start == 0:
continue
end = end + line.count(")")
v = re.findall(regex, line)
for i in v:
if i.lower() not in lower_Reserverd and i not in PARAMs:
# Lets check to see if this has been replaced already
new = VARs.get(i)
if not new:
continue
new = " -" + new[1:]
old = " -" + i[1:]
PARAMs[old] = new
ofHandle.write("Replacing: " + old + " with: " + new + "\n")
# If the params are all on one line were done here
if start != 0 and start == end:
start = 0
end = 0
READ = False
continue
# These are the custom parameters
elif READ:
v = re.findall(regex, line)
for i in v:
if i.lower() not in lower_Reserverd and i not in PARAMs:
new = VARs.get(i)
if not new:
continue
new = " -" + new[1:]
old = " -" + i[1:]
PARAMs[old] = new
ofHandle.write("Replacing: " + old + " with: " + new + "\n")
start = start + line.count("(")
end = end + line.count(")")
if start != 0 and start == end:
start = 0
end = 0
READ = False
# Keep moving until we have work
else:
continue
printY("Parameters Replaced : " + str(len(PARAMs)))
ofHandle.close()
return PARAMs
def findVARs(iFile, lFile):
VARs = {}
vNum = 9999
regex = r"(\$\w{6,})"
ofHandle = open(lFile, "w")
with open(iFile, "r") as f:
for line in f:
v = re.findall(regex, line)
for i in v:
if i in VARs:
continue
elif i.lower() not in lower_Reserverd:
# Powershell vars are case insensitive
lowerVARS = {k.lower(): v for k, v in VARs.items()}
if i.lower() in lowerVARS:
new = lowerVARS.get(i.lower())
ofHandle.write("Replacing: " + i + " with: " + new + "\n")
VARs[i] = new
else:
vNum = 99
new = "$" + "".join(
[random.choice(string.ascii_letters) for n in range(8)]
)
VARs[i] = new + str(vNum)
ofHandle.write("Replacing: " + i + " with: " + new + "\n")
vNum += 1
# return dict of variable and their replacements
printY("Variables Replaced : " + str(len(VARs)))
ofHandle.close()
return VARs
def findFUNCs(iFile, lFile):
FUNCs = {}
ofHandle = open(lFile, "w")
with open(iFile, "r") as f:
for line in f:
funcMatch = re.search(
r"^\s*Function ([a-zA-Z0-9_-]{6,})[\s\{]+$", line, re.IGNORECASE
)
if funcMatch and funcMatch.group(1) not in FUNCs:
if funcMatch.group(1) == "main":
continue
vNum = 9999
new = randomString(wordList)
FUNCs[funcMatch.group(1)] = new
ofHandle.write(
"Replacing: " + funcMatch.group(1) + " with: " + str(new) + "\n"
)
vNum += 1
# return dict of variable and their replacements
printY("Functions Replaced : " + str(len(FUNCs)))
ofHandle.close()
return FUNCs
def randomString(iFile):
with open(iFile, "r") as f:
line = next(f)
for num, aline in enumerate(f, 2):
if random.randrange(num):
continue
line = aline
string = "".join(e for e in line if e.isalnum())
return string
def main():
iFile = args.script
printR("Obfuscating: " + iFile)
ts = time.strftime("%m%d%Y_%H_%M_%S", time.gmtime())
oDir = os.path.dirname(args.script) + "/" + ts
os.mkdir(oDir)
oFile = oDir + "/" + ts + ".ps1"
vFile = oDir + "/" + ts + ".variables"
fFile = oDir + "/" + ts + ".functions"
pFile = oDir + "/" + ts + ".parameters"
shutil.copy(args.script, oFile)
obfuVAR = dict()
obfuPARMS = dict()
obfuFUNCs = dict()
# Remove White space and comments
removeJunk(oFile)
# Obfuscate Variables
if args.var:
obfuVAR = findVARs(iFile, vFile)
useSED(obfuVAR, oFile)
printP("Obfuscated Variables located : " + vFile)
# Obfuscate custom parameters
if args.par:
obfuPARMS = findCustomParams(iFile, pFile, obfuVAR)
useSED(obfuPARMS, oFile)
printP("Obfuscated Parameters located : " + pFile)
# Obfuscate Functions
if args.func:
obfuFUNCs = findFUNCs(iFile, fFile)
useSED(obfuFUNCs, oFile)
# Print the Functions
print("")
print("Obfuscated Function Names")
print("-------------------------")
sorted_list = sorted(obfuFUNCs)
for i in sorted_list:
printG("Replaced " + i + " With: " + obfuFUNCs[i])
print("")
printP("Obfuscated Functions located : " + fFile)
printP("Obfuscated script located at : " + oFile)
if __name__ == "__main__":
if sys.version_info <= (3, 0):
sys.stdout.write("This script requires Python 3.x\n")
sys.exit(1)
banner.banner()
banner.title()
config = configparser.ConfigParser()
parser = ArgumentParser()
parser.add_argument(
"-f", dest="func", help="Obfuscate functions", action="store_true"
)
parser.add_argument(
"-v", dest="var", help="Obfuscate variables", action="store_true"
)
parser.add_argument(
"-p", dest="par", help="Obfuscate parameters", action="store_true"
)
parser.add_argument("--ps", dest="script", help="Obfuscate powershell")
args = parser.parse_args()
# Powershell script
if args.script is None:
parser.print_help()
sys.exit()
else:
# Check if the input file is valid:
if not os.path.isfile(args.script):
printR("Check File: " + args.script)
sys.exit()
else:
PSconfigFile = os.path.abspath(os.path.dirname(__file__)) + "/PSconfig.ini"
config.read(PSconfigFile)
Reseverd = ast.literal_eval(config.get("PS_Reserverd", "f"))
lower_Reserverd = list(map(lambda x: x.lower(), Reseverd))
wordList = os.path.abspath(os.path.dirname(__file__)) + "/wordList.txt"
if not os.path.isfile(wordList):
printR("Check wordList: " + wordList)
sys.exit()
main()
| 29.611446
| 113
| 0.498118
|
d0990e88a60da6ea581d316ff03e07a774e54083
| 2,443
|
py
|
Python
|
deeplabv3plus.py
|
Lin-boy/data
|
5bb3c21e6d85af79168b13b242281448e130f494
|
[
"MIT"
] | null | null | null |
deeplabv3plus.py
|
Lin-boy/data
|
5bb3c21e6d85af79168b13b242281448e130f494
|
[
"MIT"
] | null | null | null |
deeplabv3plus.py
|
Lin-boy/data
|
5bb3c21e6d85af79168b13b242281448e130f494
|
[
"MIT"
] | null | null | null |
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from net.sync_batchnorm import SynchronizedBatchNorm2d
from torch.nn import init
from net.backbone import build_backbone
from net.ASPP import ASPP
class deeplabv3plus(nn.Module):
def __init__(self, cfg):
super(deeplabv3plus, self).__init__()
self.backbone = None
self.backbone_layers = None
input_channel = 2048
self.aspp = ASPP(dim_in=input_channel,
dim_out=cfg.MODEL_ASPP_OUTDIM,
rate=16//cfg.MODEL_OUTPUT_STRIDE,
bn_mom = cfg.TRAIN_BN_MOM)
self.dropout1 = nn.Dropout(0.5)
self.upsample4 = nn.UpsamplingBilinear2d(scale_factor=4)
self.upsample_sub = nn.UpsamplingBilinear2d(scale_factor=cfg.MODEL_OUTPUT_STRIDE//4)
indim = 256
self.shortcut_conv = nn.Sequential(
nn.Conv2d(indim, cfg.MODEL_SHORTCUT_DIM, cfg.MODEL_SHORTCUT_KERNEL, 1, padding=cfg.MODEL_SHORTCUT_KERNEL//2,bias=True),
SynchronizedBatchNorm2d(cfg.MODEL_SHORTCUT_DIM, momentum=cfg.TRAIN_BN_MOM),
nn.ReLU(inplace=True),
)
self.cat_conv = nn.Sequential(
nn.Conv2d(cfg.MODEL_ASPP_OUTDIM+cfg.MODEL_SHORTCUT_DIM, cfg.MODEL_ASPP_OUTDIM, 3, 1, padding=1,bias=True),
SynchronizedBatchNorm2d(cfg.MODEL_ASPP_OUTDIM, momentum=cfg.TRAIN_BN_MOM),
nn.ReLU(inplace=True),
nn.Dropout(0.5),
nn.Conv2d(cfg.MODEL_ASPP_OUTDIM, cfg.MODEL_ASPP_OUTDIM, 3, 1, padding=1,bias=True),
SynchronizedBatchNorm2d(cfg.MODEL_ASPP_OUTDIM, momentum=cfg.TRAIN_BN_MOM),
nn.ReLU(inplace=True),
nn.Dropout(0.1),
)
self.cls_conv = nn.Conv2d(cfg.MODEL_ASPP_OUTDIM, cfg.MODEL_NUM_CLASSES, 1, 1, padding=0)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, SynchronizedBatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
self.backbone = build_backbone(cfg.MODEL_BACKBONE, os=cfg.MODEL_OUTPUT_STRIDE)
self.backbone_layers = self.backbone.get_layers()
def forward(self, x):
x_bottom = self.backbone(x)
layers = self.backbone.get_layers()
feature_aspp = self.aspp(layers[-1])
feature_aspp = self.dropout1(feature_aspp)
feature_aspp = self.upsample_sub(feature_aspp)
feature_shallow = self.shortcut_conv(layers[0])
feature_cat = torch.cat([feature_aspp,feature_shallow],1)
result = self.cat_conv(feature_cat)
result = self.cls_conv(result)
result = self.upsample4(result)
return result
| 38.171875
| 123
| 0.76054
|
5c9a4ae0501bba9fd8b0dde853853c6f5f812809
| 10,911
|
py
|
Python
|
tests/providers/google/suite/hooks/test_sheets.py
|
IGIT-CN/airflow
|
a6e5bcd59198afe5716813e84ebc4c59eade532c
|
[
"Apache-2.0"
] | 3
|
2019-12-11T15:54:13.000Z
|
2021-05-24T20:21:08.000Z
|
tests/providers/google/suite/hooks/test_sheets.py
|
IGIT-CN/airflow
|
a6e5bcd59198afe5716813e84ebc4c59eade532c
|
[
"Apache-2.0"
] | 1
|
2021-09-29T17:37:13.000Z
|
2021-09-29T17:37:13.000Z
|
tests/providers/google/suite/hooks/test_sheets.py
|
IGIT-CN/airflow
|
a6e5bcd59198afe5716813e84ebc4c59eade532c
|
[
"Apache-2.0"
] | 2
|
2021-01-11T13:53:03.000Z
|
2021-10-02T05:06:34.000Z
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""
Unit Tests for the GSheets Hook
"""
import unittest
import mock
from airflow.exceptions import AirflowException
from airflow.providers.google.suite.hooks.sheets import GSheetsHook
from tests.providers.google.cloud.utils.base_gcp_mock import mock_base_gcp_hook_default_project_id
GCP_CONN_ID = 'test'
SPREADHSEET_ID = '1234567890'
RANGE_ = 'test!A:E'
RANGES = ['test!A:Q', 'test!R:Z']
VALUES = [[1, 2, 3]]
VALUES_BATCH = [[[1, 2, 3]], [[4, 5, 6]]]
MAJOR_DIMENSION = 'ROWS'
VALUE_RENDER_OPTION = 'FORMATTED_VALUE'
DATE_TIME_RENDER_OPTION = 'SERIAL_NUMBER'
INCLUDE_VALUES_IN_RESPONSE = True
VALUE_INPUT_OPTION = 'RAW'
INSERT_DATA_OPTION = 'OVERWRITE'
NUM_RETRIES = 5
API_RESPONSE = {'test': 'repsonse'}
class TestGSheetsHook(unittest.TestCase):
def setUp(self):
with mock.patch('airflow.providers.google.cloud.hooks.base.CloudBaseHook.__init__',
new=mock_base_gcp_hook_default_project_id):
self.hook = GSheetsHook(gcp_conn_id=GCP_CONN_ID, spreadsheet_id=SPREADHSEET_ID)
@mock.patch("airflow.providers.google.suite.hooks.sheets.GSheetsHook._authorize")
@mock.patch("airflow.providers.google.suite.hooks.sheets.build")
def test_gsheets_client_creation(self, mock_build, mock_authorize):
result = self.hook.get_conn()
mock_build.assert_called_once_with(
'sheets', 'v4', http=mock_authorize.return_value, cache_discovery=False
)
self.assertEqual(mock_build.return_value, result)
@mock.patch("airflow.providers.google.suite.hooks.sheets.GSheetsHook.get_conn")
def test_get_values(self, get_conn):
get_method = get_conn.return_value.spreadsheets.return_value.values.return_value.get
execute_method = get_method.return_value.execute
execute_method.return_value = API_RESPONSE
result = self.hook.get_values(
range_=RANGE_,
major_dimension=MAJOR_DIMENSION,
value_render_option=VALUE_RENDER_OPTION,
date_time_render_option=DATE_TIME_RENDER_OPTION)
self.assertIs(result, API_RESPONSE)
execute_method.assert_called_once_with(num_retries=NUM_RETRIES)
get_method.assert_called_once_with(
spreadsheetId=SPREADHSEET_ID,
range=RANGE_,
majorDimension=MAJOR_DIMENSION,
valueRenderOption=VALUE_RENDER_OPTION,
dateTimeRenderOption=DATE_TIME_RENDER_OPTION
)
@mock.patch("airflow.providers.google.suite.hooks.sheets.GSheetsHook.get_conn")
def test_batch_get_values(self, get_conn):
batch_get_method = get_conn.return_value.spreadsheets.return_value.values.return_value.batchGet
execute_method = batch_get_method.return_value.execute
execute_method.return_value = API_RESPONSE
result = self.hook.batch_get_values(
ranges=RANGES,
major_dimension=MAJOR_DIMENSION,
value_render_option=VALUE_RENDER_OPTION,
date_time_render_option=DATE_TIME_RENDER_OPTION)
self.assertIs(result, API_RESPONSE)
execute_method.assert_called_once_with(num_retries=NUM_RETRIES)
batch_get_method.assert_called_once_with(
spreadsheetId=SPREADHSEET_ID,
ranges=RANGES,
majorDimension=MAJOR_DIMENSION,
valueRenderOption=VALUE_RENDER_OPTION,
dateTimeRenderOption=DATE_TIME_RENDER_OPTION
)
@mock.patch("airflow.providers.google.suite.hooks.sheets.GSheetsHook.get_conn")
def test_update_values(self, get_conn):
update_method = get_conn.return_value.spreadsheets.return_value.values.return_value.update
execute_method = update_method.return_value.execute
execute_method.return_value = API_RESPONSE
result = self.hook.update_values(
range_=RANGE_,
values=VALUES,
major_dimension=MAJOR_DIMENSION,
value_input_option=VALUE_INPUT_OPTION,
include_values_in_response=INCLUDE_VALUES_IN_RESPONSE,
value_render_option=VALUE_RENDER_OPTION,
date_time_render_option=DATE_TIME_RENDER_OPTION)
body = {
"range": RANGE_,
"majorDimension": MAJOR_DIMENSION,
"values": VALUES
}
self.assertIs(result, API_RESPONSE)
execute_method.assert_called_once_with(num_retries=NUM_RETRIES)
update_method.assert_called_once_with(
spreadsheetId=SPREADHSEET_ID,
range=RANGE_,
valueInputOption=VALUE_INPUT_OPTION,
includeValuesInResponse=INCLUDE_VALUES_IN_RESPONSE,
responseValueRenderOption=VALUE_RENDER_OPTION,
responseDateTimeRenderOption=DATE_TIME_RENDER_OPTION,
body=body
)
@mock.patch("airflow.providers.google.suite.hooks.sheets.GSheetsHook.get_conn")
def test_batch_update_values(self, get_conn):
batch_update_method = get_conn.return_value.spreadsheets.return_value.values.return_value.batchUpdate
execute_method = batch_update_method.return_value.execute
execute_method.return_value = API_RESPONSE
result = self.hook.batch_update_values(
ranges=RANGES,
values=VALUES_BATCH,
major_dimension=MAJOR_DIMENSION,
value_input_option=VALUE_INPUT_OPTION,
include_values_in_response=INCLUDE_VALUES_IN_RESPONSE,
value_render_option=VALUE_RENDER_OPTION,
date_time_render_option=DATE_TIME_RENDER_OPTION)
data = []
for idx, range_ in enumerate(RANGES):
value_range = {
"range": range_,
"majorDimension": MAJOR_DIMENSION,
"values": VALUES_BATCH[idx]
}
data.append(value_range)
body = {
"valueInputOption": VALUE_INPUT_OPTION,
"data": data,
"includeValuesInResponse": INCLUDE_VALUES_IN_RESPONSE,
"responseValueRenderOption": VALUE_RENDER_OPTION,
"responseDateTimeRenderOption": DATE_TIME_RENDER_OPTION
}
self.assertIs(result, API_RESPONSE)
execute_method.assert_called_once_with(num_retries=NUM_RETRIES)
batch_update_method.assert_called_once_with(
spreadsheetId=SPREADHSEET_ID,
body=body
)
@mock.patch("airflow.providers.google.suite.hooks.sheets.GSheetsHook.get_conn")
def test_batch_update_values_with_bad_data(self, get_conn):
batch_update_method = get_conn.return_value.spreadsheets.return_value.values.return_value.batchUpdate
execute_method = batch_update_method.return_value.execute
execute_method.return_value = API_RESPONSE
with self.assertRaises(AirflowException) as cm:
self.hook.batch_update_values(
ranges=['test!A1:B2', 'test!C1:C2'],
values=[[1, 2, 3]], # bad data
major_dimension=MAJOR_DIMENSION,
value_input_option=VALUE_INPUT_OPTION,
include_values_in_response=INCLUDE_VALUES_IN_RESPONSE,
value_render_option=VALUE_RENDER_OPTION,
date_time_render_option=DATE_TIME_RENDER_OPTION)
batch_update_method.assert_not_called()
execute_method.assert_not_called()
err = cm.exception
self.assertIn("must be of equal length.", str(err))
@mock.patch("airflow.providers.google.suite.hooks.sheets.GSheetsHook.get_conn")
def test_append_values(self, get_conn):
append_method = get_conn.return_value.spreadsheets.return_value.values.return_value.append
execute_method = append_method.return_value.execute
execute_method.return_value = API_RESPONSE
result = self.hook.append_values(
range_=RANGE_,
values=VALUES,
major_dimension=MAJOR_DIMENSION,
value_input_option=VALUE_INPUT_OPTION,
insert_data_option=INSERT_DATA_OPTION,
include_values_in_response=INCLUDE_VALUES_IN_RESPONSE,
value_render_option=VALUE_RENDER_OPTION,
date_time_render_option=DATE_TIME_RENDER_OPTION)
body = {
"range": RANGE_,
"majorDimension": MAJOR_DIMENSION,
"values": VALUES
}
self.assertIs(result, API_RESPONSE)
execute_method.assert_called_once_with(num_retries=NUM_RETRIES)
append_method.assert_called_once_with(
spreadsheetId=SPREADHSEET_ID,
range=RANGE_,
valueInputOption=VALUE_INPUT_OPTION,
insertDataOption=INSERT_DATA_OPTION,
includeValuesInResponse=INCLUDE_VALUES_IN_RESPONSE,
responseValueRenderOption=VALUE_RENDER_OPTION,
responseDateTimeRenderOption=DATE_TIME_RENDER_OPTION,
body=body
)
@mock.patch("airflow.providers.google.suite.hooks.sheets.GSheetsHook.get_conn")
def test_clear_values(self, get_conn):
clear_method = get_conn.return_value.spreadsheets.return_value.values.return_value.clear
execute_method = clear_method.return_value.execute
execute_method.return_value = API_RESPONSE
result = self.hook.clear(range_=RANGE_)
self.assertIs(result, API_RESPONSE)
execute_method.assert_called_once_with(num_retries=NUM_RETRIES)
clear_method.assert_called_once_with(
spreadsheetId=SPREADHSEET_ID,
range=RANGE_
)
@mock.patch("airflow.providers.google.suite.hooks.sheets.GSheetsHook.get_conn")
def test_batch_clear_values(self, get_conn):
batch_clear_method = get_conn.return_value.spreadsheets.return_value.values.return_value.batchClear
execute_method = batch_clear_method.return_value.execute
execute_method.return_value = API_RESPONSE
result = self.hook.batch_clear(ranges=RANGES)
body = {"ranges": RANGES}
self.assertIs(result, API_RESPONSE)
execute_method.assert_called_once_with(num_retries=NUM_RETRIES)
batch_clear_method.assert_called_once_with(
spreadsheetId=SPREADHSEET_ID,
body=body
)
| 44.534694
| 109
| 0.706993
|
656df0e16a61e9d53bc1deea75be7d354cd024a7
| 1,022
|
py
|
Python
|
LeetCode/0051_N-Queen_1.py
|
ankitpandey0/PythonAlgorithms
|
48efed4664fdf2a14b2eb7ce917226a30b34ff53
|
[
"MIT"
] | null | null | null |
LeetCode/0051_N-Queen_1.py
|
ankitpandey0/PythonAlgorithms
|
48efed4664fdf2a14b2eb7ce917226a30b34ff53
|
[
"MIT"
] | null | null | null |
LeetCode/0051_N-Queen_1.py
|
ankitpandey0/PythonAlgorithms
|
48efed4664fdf2a14b2eb7ce917226a30b34ff53
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
#recursive or bottom-up approach decreases the number of iterations hence providing increased efficiency
#define a function to check for continuing the permutations search ... use the distance formula to get the diagonal points
def whether_to_extend(perm):
i = len(perm) - 1
for j in range(i):
if i-j == abs(perm[i] - perm[j]):
return False
return True
# define a function to generate the permutations recursively while checking the conditions associated with backtracing
def extend(perm, n):
#write the base case
if len(perm) == n:
print(perm)
#comment the following line to get all possible permutaions
exit()
for i in range(n):
if i not in perm:
perm.append(i)
if whether_to_extend(perm):
extend(perm, n)
perm.pop()
if __name__ == "__main__":
n = int(input("Enter the number of rows on the chess board:"))
extend([], n)
| 29.2
| 123
| 0.624266
|
e6e44eb1afb5487a7ca4c82df7bdccf4eb65ff75
| 735
|
py
|
Python
|
test/unit/rules/templates/test_limitsize_description.py
|
tomislacker/cfn-python-lint
|
f209ddfef9bcc1a005adfebcfcc16220b18deddb
|
[
"MIT-0"
] | 1
|
2020-05-08T20:12:31.000Z
|
2020-05-08T20:12:31.000Z
|
test/unit/rules/templates/test_limitsize_description.py
|
tomislacker/cfn-python-lint
|
f209ddfef9bcc1a005adfebcfcc16220b18deddb
|
[
"MIT-0"
] | null | null | null |
test/unit/rules/templates/test_limitsize_description.py
|
tomislacker/cfn-python-lint
|
f209ddfef9bcc1a005adfebcfcc16220b18deddb
|
[
"MIT-0"
] | 1
|
2020-12-01T14:54:28.000Z
|
2020-12-01T14:54:28.000Z
|
"""
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
SPDX-License-Identifier: MIT-0
"""
from test.unit.rules import BaseRuleTestCase
from cfnlint.rules.templates.LimitDescription import LimitDescription # pylint: disable=E0401
class TestTemplateLimitDescription(BaseRuleTestCase):
"""Test template limit size"""
def setUp(self):
"""Setup"""
super(TestTemplateLimitDescription, self).setUp()
self.collection.register(LimitDescription())
def test_file_positive(self):
"""Test Positive"""
self.helper_file_positive()
def test_file_negative(self):
"""Test failure"""
self.helper_file_negative('test/fixtures/templates/bad/limit_size.yaml', 1)
| 30.625
| 94
| 0.714286
|
37853c87924721c22817ba497e5450b69044f0d3
| 21,202
|
py
|
Python
|
electrum_axe/plugins/safe_t/qt.py
|
ddude1/electrum-axe
|
b2d030dfc2afb2ca27739b9169ac610c68421dbb
|
[
"MIT"
] | 336
|
2018-03-15T04:12:19.000Z
|
2021-05-20T06:26:42.000Z
|
electrum_axe/plugins/safe_t/qt.py
|
ddude1/electrum-axe
|
b2d030dfc2afb2ca27739b9169ac610c68421dbb
|
[
"MIT"
] | 13
|
2018-05-13T09:40:18.000Z
|
2022-03-20T09:15:50.000Z
|
electrum_axe/plugins/safe_t/qt.py
|
ddude1/electrum-axe
|
b2d030dfc2afb2ca27739b9169ac610c68421dbb
|
[
"MIT"
] | 73
|
2018-03-15T04:12:21.000Z
|
2020-07-19T04:01:18.000Z
|
from functools import partial
import threading
from PyQt5.QtCore import Qt, pyqtSignal, QRegExp
from PyQt5.QtGui import QRegExpValidator
from PyQt5.QtWidgets import (QVBoxLayout, QLabel, QGridLayout, QPushButton,
QHBoxLayout, QButtonGroup, QGroupBox,
QTextEdit, QLineEdit, QRadioButton, QCheckBox, QWidget,
QMessageBox, QFileDialog, QSlider, QTabWidget)
from electrum_axe.gui.qt.util import (WindowModalDialog, WWLabel, Buttons, CancelButton,
OkButton, CloseButton)
from electrum_axe.i18n import _
from electrum_axe.plugin import hook
from electrum_axe.util import bh2u
from ..hw_wallet.qt import QtHandlerBase, QtPluginBase
from ..hw_wallet.plugin import only_hook_if_libraries_available
from .safe_t import SafeTPlugin, TIM_NEW, TIM_RECOVER, TIM_MNEMONIC
PASSPHRASE_HELP_SHORT =_(
"Passphrases allow you to access new wallets, each "
"hidden behind a particular case-sensitive passphrase.")
PASSPHRASE_HELP = PASSPHRASE_HELP_SHORT + " " + _(
"You need to create a separate Axe Electrum wallet for each passphrase "
"you use as they each generate different addresses. Changing "
"your passphrase does not lose other wallets, each is still "
"accessible behind its own passphrase.")
RECOMMEND_PIN = _(
"You should enable PIN protection. Your PIN is the only protection "
"for your Axe coins if your device is lost or stolen.")
PASSPHRASE_NOT_PIN = _(
"If you forget a passphrase you will be unable to access any "
"Axe coins in the wallet behind it. A passphrase is not a PIN. "
"Only change this if you are sure you understand it.")
class QtHandler(QtHandlerBase):
pin_signal = pyqtSignal(object)
def __init__(self, win, pin_matrix_widget_class, device):
super(QtHandler, self).__init__(win, device)
self.pin_signal.connect(self.pin_dialog)
self.pin_matrix_widget_class = pin_matrix_widget_class
def get_pin(self, msg):
self.done.clear()
self.pin_signal.emit(msg)
self.done.wait()
return self.response
def pin_dialog(self, msg):
# Needed e.g. when resetting a device
self.clear_dialog()
dialog = WindowModalDialog(self.top_level_window(), _("Enter PIN"))
matrix = self.pin_matrix_widget_class()
vbox = QVBoxLayout()
vbox.addWidget(QLabel(msg))
vbox.addWidget(matrix)
vbox.addLayout(Buttons(CancelButton(dialog), OkButton(dialog)))
dialog.setLayout(vbox)
dialog.exec_()
self.response = str(matrix.get_value())
self.done.set()
class QtPlugin(QtPluginBase):
# Derived classes must provide the following class-static variables:
# icon_file
# pin_matrix_widget_class
def create_handler(self, window):
return QtHandler(window, self.pin_matrix_widget_class(), self.device)
@only_hook_if_libraries_available
@hook
def receive_menu(self, menu, addrs, wallet):
if len(addrs) != 1:
return
for keystore in wallet.get_keystores():
if type(keystore) == self.keystore_class:
def show_address(keystore=keystore):
keystore.thread.add(partial(self.show_address, wallet, addrs[0], keystore))
device_name = "{} ({})".format(self.device, keystore.label)
menu.addAction(_("Show on {}").format(device_name), show_address)
def show_settings_dialog(self, window, keystore):
device_id = self.choose_device(window, keystore)
if device_id:
SettingsDialog(window, self, keystore, device_id).exec_()
def request_safe_t_init_settings(self, wizard, method, device):
vbox = QVBoxLayout()
next_enabled = True
label = QLabel(_("Enter a label to name your device:"))
name = QLineEdit()
hl = QHBoxLayout()
hl.addWidget(label)
hl.addWidget(name)
hl.addStretch(1)
vbox.addLayout(hl)
def clean_text(widget):
text = widget.toPlainText().strip()
return ' '.join(text.split())
if method in [TIM_NEW, TIM_RECOVER]:
gb = QGroupBox()
hbox1 = QHBoxLayout()
gb.setLayout(hbox1)
vbox.addWidget(gb)
gb.setTitle(_("Select your seed length:"))
bg = QButtonGroup()
for i, count in enumerate([12, 18, 24]):
rb = QRadioButton(gb)
rb.setText(_("{:d} words").format(count))
bg.addButton(rb)
bg.setId(rb, i)
hbox1.addWidget(rb)
rb.setChecked(True)
cb_pin = QCheckBox(_('Enable PIN protection'))
cb_pin.setChecked(True)
else:
text = QTextEdit()
text.setMaximumHeight(60)
if method == TIM_MNEMONIC:
msg = _("Enter your BIP39 mnemonic:")
else:
msg = _("Enter the master private key beginning with xprv:")
def set_enabled():
from electrum_axe.bip32 import is_xprv
wizard.next_button.setEnabled(is_xprv(clean_text(text)))
text.textChanged.connect(set_enabled)
next_enabled = False
vbox.addWidget(QLabel(msg))
vbox.addWidget(text)
pin = QLineEdit()
pin.setValidator(QRegExpValidator(QRegExp('[1-9]{0,9}')))
pin.setMaximumWidth(100)
hbox_pin = QHBoxLayout()
hbox_pin.addWidget(QLabel(_("Enter your PIN (digits 1-9):")))
hbox_pin.addWidget(pin)
hbox_pin.addStretch(1)
if method in [TIM_NEW, TIM_RECOVER]:
vbox.addWidget(WWLabel(RECOMMEND_PIN))
vbox.addWidget(cb_pin)
else:
vbox.addLayout(hbox_pin)
passphrase_msg = WWLabel(PASSPHRASE_HELP_SHORT)
passphrase_warning = WWLabel(PASSPHRASE_NOT_PIN)
passphrase_warning.setStyleSheet("color: red")
cb_phrase = QCheckBox(_('Enable passphrases'))
cb_phrase.setChecked(False)
vbox.addWidget(passphrase_msg)
vbox.addWidget(passphrase_warning)
vbox.addWidget(cb_phrase)
wizard.exec_layout(vbox, next_enabled=next_enabled)
if method in [TIM_NEW, TIM_RECOVER]:
item = bg.checkedId()
pin = cb_pin.isChecked()
else:
item = ' '.join(str(clean_text(text)).split())
pin = str(pin.text())
return (item, name.text(), pin, cb_phrase.isChecked())
class Plugin(SafeTPlugin, QtPlugin):
icon_unpaired = "safe-t_unpaired.png"
icon_paired = "safe-t.png"
@classmethod
def pin_matrix_widget_class(self):
# We use a local updated copy of pinmatrix.py until safetlib
# releases a new version that includes https://github.com/archos-safe-t/python-safet/commit/b1eab3dba4c04fdfc1fcf17b66662c28c5f2380e
# from safetlib.qt.pinmatrix import PinMatrixWidget
from .pinmatrix import PinMatrixWidget
return PinMatrixWidget
class SettingsDialog(WindowModalDialog):
'''This dialog doesn't require a device be paired with a wallet.
We want users to be able to wipe a device even if they've forgotten
their PIN.'''
def __init__(self, window, plugin, keystore, device_id):
title = _("{} Settings").format(plugin.device)
super(SettingsDialog, self).__init__(window, title)
self.setMaximumWidth(540)
devmgr = plugin.device_manager()
config = devmgr.config
handler = keystore.handler
thread = keystore.thread
hs_cols, hs_rows = (128, 64)
def invoke_client(method, *args, **kw_args):
unpair_after = kw_args.pop('unpair_after', False)
def task():
client = devmgr.client_by_id(device_id)
if not client:
raise RuntimeError("Device not connected")
if method:
getattr(client, method)(*args, **kw_args)
if unpair_after:
devmgr.unpair_id(device_id)
return client.features
thread.add(task, on_success=update)
def update(features):
self.features = features
set_label_enabled()
if features.bootloader_hash:
bl_hash = bh2u(features.bootloader_hash)
bl_hash = "\n".join([bl_hash[:32], bl_hash[32:]])
else:
bl_hash = "N/A"
noyes = [_("No"), _("Yes")]
endis = [_("Enable Passphrases"), _("Disable Passphrases")]
disen = [_("Disabled"), _("Enabled")]
setchange = [_("Set a PIN"), _("Change PIN")]
version = "%d.%d.%d" % (features.major_version,
features.minor_version,
features.patch_version)
device_label.setText(features.label)
pin_set_label.setText(noyes[features.pin_protection])
passphrases_label.setText(disen[features.passphrase_protection])
bl_hash_label.setText(bl_hash)
label_edit.setText(features.label)
device_id_label.setText(features.device_id)
initialized_label.setText(noyes[features.initialized])
version_label.setText(version)
clear_pin_button.setVisible(features.pin_protection)
clear_pin_warning.setVisible(features.pin_protection)
pin_button.setText(setchange[features.pin_protection])
pin_msg.setVisible(not features.pin_protection)
passphrase_button.setText(endis[features.passphrase_protection])
language_label.setText(features.language)
def set_label_enabled():
label_apply.setEnabled(label_edit.text() != self.features.label)
def rename():
invoke_client('change_label', label_edit.text())
def toggle_passphrase():
title = _("Confirm Toggle Passphrase Protection")
currently_enabled = self.features.passphrase_protection
if currently_enabled:
msg = _("After disabling passphrases, you can only pair this "
"Axe Electrum wallet if it had an empty passphrase. "
"If its passphrase was not empty, you will need to "
"create a new wallet with the install wizard. You "
"can use this wallet again at any time by re-enabling "
"passphrases and entering its passphrase.")
else:
msg = _("Your current Axe Electrum wallet can only be used with "
"an empty passphrase. You must create a separate "
"wallet with the install wizard for other passphrases "
"as each one generates a new set of addresses.")
msg += "\n\n" + _("Are you sure you want to proceed?")
if not self.question(msg, title=title):
return
invoke_client('toggle_passphrase', unpair_after=currently_enabled)
def change_homescreen():
dialog = QFileDialog(self, _("Choose Homescreen"))
filename, __ = dialog.getOpenFileName()
if not filename:
return # user cancelled
if filename.endswith('.toif'):
img = open(filename, 'rb').read()
if img[:8] != b'TOIf\x90\x00\x90\x00':
handler.show_error('File is not a TOIF file with size of 144x144')
return
else:
from PIL import Image # FIXME
im = Image.open(filename)
if im.size != (128, 64):
handler.show_error('Image must be 128 x 64 pixels')
return
im = im.convert('1')
pix = im.load()
img = bytearray(1024)
for j in range(64):
for i in range(128):
if pix[i, j]:
o = (i + j * 128)
img[o // 8] |= (1 << (7 - o % 8))
img = bytes(img)
invoke_client('change_homescreen', img)
def clear_homescreen():
invoke_client('change_homescreen', b'\x00')
def set_pin():
invoke_client('set_pin', remove=False)
def clear_pin():
invoke_client('set_pin', remove=True)
def wipe_device():
wallet = window.wallet
if wallet and sum(wallet.get_balance()):
title = _("Confirm Device Wipe")
msg = _("Are you SURE you want to wipe the device?\n"
"Your wallet still has Axe coins in it!")
if not self.question(msg, title=title,
icon=QMessageBox.Critical):
return
invoke_client('wipe_device', unpair_after=True)
def slider_moved():
mins = timeout_slider.sliderPosition()
timeout_minutes.setText(_("{:2d} minutes").format(mins))
def slider_released():
config.set_session_timeout(timeout_slider.sliderPosition() * 60)
# Information tab
info_tab = QWidget()
info_layout = QVBoxLayout(info_tab)
info_glayout = QGridLayout()
info_glayout.setColumnStretch(2, 1)
device_label = QLabel()
pin_set_label = QLabel()
passphrases_label = QLabel()
version_label = QLabel()
device_id_label = QLabel()
bl_hash_label = QLabel()
bl_hash_label.setWordWrap(True)
language_label = QLabel()
initialized_label = QLabel()
rows = [
(_("Device Label"), device_label),
(_("PIN set"), pin_set_label),
(_("Passphrases"), passphrases_label),
(_("Firmware Version"), version_label),
(_("Device ID"), device_id_label),
(_("Bootloader Hash"), bl_hash_label),
(_("Language"), language_label),
(_("Initialized"), initialized_label),
]
for row_num, (label, widget) in enumerate(rows):
info_glayout.addWidget(QLabel(label), row_num, 0)
info_glayout.addWidget(widget, row_num, 1)
info_layout.addLayout(info_glayout)
# Settings tab
settings_tab = QWidget()
settings_layout = QVBoxLayout(settings_tab)
settings_glayout = QGridLayout()
# Settings tab - Label
label_msg = QLabel(_("Name this {}. If you have multiple devices "
"their labels help distinguish them.")
.format(plugin.device))
label_msg.setWordWrap(True)
label_label = QLabel(_("Device Label"))
label_edit = QLineEdit()
label_edit.setMinimumWidth(150)
label_edit.setMaxLength(plugin.MAX_LABEL_LEN)
label_apply = QPushButton(_("Apply"))
label_apply.clicked.connect(rename)
label_edit.textChanged.connect(set_label_enabled)
settings_glayout.addWidget(label_label, 0, 0)
settings_glayout.addWidget(label_edit, 0, 1, 1, 2)
settings_glayout.addWidget(label_apply, 0, 3)
settings_glayout.addWidget(label_msg, 1, 1, 1, -1)
# Settings tab - PIN
pin_label = QLabel(_("PIN Protection"))
pin_button = QPushButton()
pin_button.clicked.connect(set_pin)
settings_glayout.addWidget(pin_label, 2, 0)
settings_glayout.addWidget(pin_button, 2, 1)
pin_msg = QLabel(_("PIN protection is strongly recommended. "
"A PIN is your only protection against someone "
"stealing your Axe coins if they obtain physical "
"access to your {}.").format(plugin.device))
pin_msg.setWordWrap(True)
pin_msg.setStyleSheet("color: red")
settings_glayout.addWidget(pin_msg, 3, 1, 1, -1)
# Settings tab - Homescreen
homescreen_label = QLabel(_("Homescreen"))
homescreen_change_button = QPushButton(_("Change..."))
homescreen_clear_button = QPushButton(_("Reset"))
homescreen_change_button.clicked.connect(change_homescreen)
try:
import PIL
except ImportError:
homescreen_change_button.setDisabled(True)
homescreen_change_button.setToolTip(
_("Required package 'PIL' is not available - Please install it.")
)
homescreen_clear_button.clicked.connect(clear_homescreen)
homescreen_msg = QLabel(_("You can set the homescreen on your "
"device to personalize it. You must "
"choose a {} x {} monochrome black and "
"white image.").format(hs_cols, hs_rows))
homescreen_msg.setWordWrap(True)
settings_glayout.addWidget(homescreen_label, 4, 0)
settings_glayout.addWidget(homescreen_change_button, 4, 1)
settings_glayout.addWidget(homescreen_clear_button, 4, 2)
settings_glayout.addWidget(homescreen_msg, 5, 1, 1, -1)
# Settings tab - Session Timeout
timeout_label = QLabel(_("Session Timeout"))
timeout_minutes = QLabel()
timeout_slider = QSlider(Qt.Horizontal)
timeout_slider.setRange(1, 60)
timeout_slider.setSingleStep(1)
timeout_slider.setTickInterval(5)
timeout_slider.setTickPosition(QSlider.TicksBelow)
timeout_slider.setTracking(True)
timeout_msg = QLabel(
_("Clear the session after the specified period "
"of inactivity. Once a session has timed out, "
"your PIN and passphrase (if enabled) must be "
"re-entered to use the device."))
timeout_msg.setWordWrap(True)
timeout_slider.setSliderPosition(config.get_session_timeout() // 60)
slider_moved()
timeout_slider.valueChanged.connect(slider_moved)
timeout_slider.sliderReleased.connect(slider_released)
settings_glayout.addWidget(timeout_label, 6, 0)
settings_glayout.addWidget(timeout_slider, 6, 1, 1, 3)
settings_glayout.addWidget(timeout_minutes, 6, 4)
settings_glayout.addWidget(timeout_msg, 7, 1, 1, -1)
settings_layout.addLayout(settings_glayout)
settings_layout.addStretch(1)
# Advanced tab
advanced_tab = QWidget()
advanced_layout = QVBoxLayout(advanced_tab)
advanced_glayout = QGridLayout()
# Advanced tab - clear PIN
clear_pin_button = QPushButton(_("Disable PIN"))
clear_pin_button.clicked.connect(clear_pin)
clear_pin_warning = QLabel(
_("If you disable your PIN, anyone with physical access to your "
"{} device can spend your Axe coins.").format(plugin.device))
clear_pin_warning.setWordWrap(True)
clear_pin_warning.setStyleSheet("color: red")
advanced_glayout.addWidget(clear_pin_button, 0, 2)
advanced_glayout.addWidget(clear_pin_warning, 1, 0, 1, 5)
# Advanced tab - toggle passphrase protection
passphrase_button = QPushButton()
passphrase_button.clicked.connect(toggle_passphrase)
passphrase_msg = WWLabel(PASSPHRASE_HELP)
passphrase_warning = WWLabel(PASSPHRASE_NOT_PIN)
passphrase_warning.setStyleSheet("color: red")
advanced_glayout.addWidget(passphrase_button, 3, 2)
advanced_glayout.addWidget(passphrase_msg, 4, 0, 1, 5)
advanced_glayout.addWidget(passphrase_warning, 5, 0, 1, 5)
# Advanced tab - wipe device
wipe_device_button = QPushButton(_("Wipe Device"))
wipe_device_button.clicked.connect(wipe_device)
wipe_device_msg = QLabel(
_("Wipe the device, removing all data from it. The firmware "
"is left unchanged."))
wipe_device_msg.setWordWrap(True)
wipe_device_warning = QLabel(
_("Only wipe a device if you have the recovery seed written down "
"and the device wallet(s) are empty, otherwise the Axe coins "
"will be lost forever."))
wipe_device_warning.setWordWrap(True)
wipe_device_warning.setStyleSheet("color: red")
advanced_glayout.addWidget(wipe_device_button, 6, 2)
advanced_glayout.addWidget(wipe_device_msg, 7, 0, 1, 5)
advanced_glayout.addWidget(wipe_device_warning, 8, 0, 1, 5)
advanced_layout.addLayout(advanced_glayout)
advanced_layout.addStretch(1)
tabs = QTabWidget(self)
tabs.addTab(info_tab, _("Information"))
tabs.addTab(settings_tab, _("Settings"))
tabs.addTab(advanced_tab, _("Advanced"))
dialog_vbox = QVBoxLayout(self)
dialog_vbox.addWidget(tabs)
dialog_vbox.addLayout(Buttons(CloseButton(self)))
# Update information
invoke_client(None)
| 42.319361
| 140
| 0.609659
|
307cf8ef7c06be2eb85e391360aa01149a2f9237
| 1,244
|
py
|
Python
|
teste/migrations/0001_initial.py
|
pussbb/django_teste
|
6674cb96b8ff1a2e62f868d91f397c25ae5f8027
|
[
"WTFPL"
] | null | null | null |
teste/migrations/0001_initial.py
|
pussbb/django_teste
|
6674cb96b8ff1a2e62f868d91f397c25ae5f8027
|
[
"WTFPL"
] | 6
|
2020-06-05T23:19:35.000Z
|
2022-02-10T10:50:13.000Z
|
teste/migrations/0001_initial.py
|
pussbb/django_teste
|
6674cb96b8ff1a2e62f868d91f397c25ae5f8027
|
[
"WTFPL"
] | null | null | null |
# Generated by Django 2.2.5 on 2019-09-08 07:39
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True,
serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200, unique=True)),
('slug', models.SlugField(max_length=200, unique=True)),
('updated_on', models.DateTimeField(auto_now=True)),
('content', models.TextField()),
('created_on', models.DateTimeField(auto_now_add=True)),
('author', models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name='blog_posts',
to=settings.AUTH_USER_MODEL)
),
],
options={
'ordering': ['-created_on'],
},
),
]
| 32.736842
| 77
| 0.54582
|
db9d784932acef67a1839ac5b21271c01e271840
| 2,387
|
py
|
Python
|
nilearn/tests/test_segmentation.py
|
Qin-Ming/nilearn
|
82f4075d8a8ea9aec25e66bd87ebb79a6be6d32f
|
[
"BSD-2-Clause"
] | null | null | null |
nilearn/tests/test_segmentation.py
|
Qin-Ming/nilearn
|
82f4075d8a8ea9aec25e66bd87ebb79a6be6d32f
|
[
"BSD-2-Clause"
] | null | null | null |
nilearn/tests/test_segmentation.py
|
Qin-Ming/nilearn
|
82f4075d8a8ea9aec25e66bd87ebb79a6be6d32f
|
[
"BSD-2-Clause"
] | null | null | null |
"""
Testing functions for random walker segmentation from scikit-image 0.11.3.
Thanks to scikit image.
"""
import numpy as np
import pytest
from nilearn._utils.segmentation import _random_walker
def test_modes_in_random_walker():
img = np.zeros((30, 30, 30)) + 0.1 * np.random.RandomState(
42
).standard_normal(size=(30, 30, 30))
img[9:21, 9:21, 9:21] = 1
img[10:20, 10:20, 10:20] = 0
labels = np.zeros_like(img)
labels[6, 6, 6] = 1
labels[14, 15, 16] = 2
# default mode = cg
random_walker_cg = _random_walker(img, labels, beta=90)
assert (random_walker_cg.reshape(img.shape)[6, 6, 6] == 1).all()
assert img.shape == random_walker_cg.shape
# test `mask` strategy of sub function _mask_edges_weights in laplacian
labels[5:25, 26:29, 26:29] = -1
random_walker_inactive = _random_walker(img, labels, beta=30)
def test_trivial_cases():
# When all voxels are labeled
img = np.ones((10, 10, 10))
labels = np.ones((10, 10, 10))
# It returns same labels which are provided
pass_through = _random_walker(img, labels)
np.testing.assert_array_equal(pass_through, labels)
def test_bad_inputs():
# Too few dimensions
img = np.ones(10)
labels = np.arange(10)
with pytest.raises(ValueError):
_random_walker(img, labels)
# Too many dimensions
rng = np.random.RandomState(42)
img = rng.normal(size=(3, 3, 3, 3, 3))
labels = np.arange(3 ** 5).reshape(img.shape)
with pytest.raises(ValueError):
_random_walker(img, labels)
# Spacing incorrect length
img = rng.normal(size=(10, 10))
labels = np.zeros((10, 10))
labels[2, 4] = 2
labels[6, 8] = 5
with pytest.raises(ValueError):
_random_walker(img, labels, spacing=(1,))
def test_reorder_labels():
# When labels have non-consecutive integers, we make them consecutive
# by reordering them to make no gaps/differences between integers. We expect
# labels to be of same shape even if they are reordered.
# Issue #938, comment #14.
data = np.zeros((5, 5)) + 0.1 * np.random.RandomState(42).standard_normal(
size=(5, 5)
)
data[1:5, 1:5] = 1
labels = np.zeros_like(data)
labels[3, 3] = 1
labels[1, 4] = 4 # giving integer which is non-consecutive
labels = _random_walker(data, labels)
assert data.shape == labels.shape
| 29.469136
| 80
| 0.655216
|
6f731b39dd21b0d9403abf7ba3b88cf200aad014
| 13,471
|
py
|
Python
|
run/completion/trav_trans/train.py
|
CGCL-codes/naturalcc
|
7bab9a97331fafac1235fb32de829ff8d572320f
|
[
"MIT"
] | 71
|
2020-12-04T02:18:13.000Z
|
2022-03-30T15:19:50.000Z
|
run/completion/trav_trans/train.py
|
CGCL-codes/naturalcc
|
7bab9a97331fafac1235fb32de829ff8d572320f
|
[
"MIT"
] | 4
|
2021-03-10T17:48:50.000Z
|
2022-03-13T10:42:22.000Z
|
run/completion/trav_trans/train.py
|
CGCL-codes/naturalcc
|
7bab9a97331fafac1235fb32de829ff8d572320f
|
[
"MIT"
] | 11
|
2020-12-09T12:17:44.000Z
|
2022-03-30T09:02:13.000Z
|
import math
import os
import random
import torch
from ncc import LOGGER
from ncc import tasks
from ncc.data import iterators
from ncc.trainers.ncc_trainers import Trainer
from ncc.utils import checkpoint_utils, distributed_utils
from ncc.utils import set_seed
from ncc.utils import utils
from ncc.utils.file_ops.yaml_io import load_yaml
from ncc.utils.logging import meters
from ncc.utils.logging import metrics, progress_bar
from ncc.utils.path_manager import PathManager
@metrics.aggregate('train')
def train(args, trainer, task, epoch_itr):
"""Train the model for one epoch."""
# Initialize data iterator
itr = epoch_itr.next_epoch_itr(
fix_batches_to_gpus=args['distributed_training']['fix_batches_to_gpus'],
shuffle=(epoch_itr.next_epoch_idx > args['dataset']['curriculum']),
)
update_freq = (
args['optimization']['update_freq'][epoch_itr.epoch - 1]
if epoch_itr.epoch <= len(args['optimization']['update_freq'])
else args['optimization']['update_freq'][-1]
)
itr = iterators.GroupedIterator(itr, update_freq)
progress = progress_bar.progress_bar(
itr,
log_format=args['common']['log_format'],
log_interval=args['common']['log_interval'],
epoch=epoch_itr.epoch,
tensorboard_logdir=(
args['common']['tensorboard_logdir'] if distributed_utils.is_master(args) else None
),
default_log_format=('tqdm' if not args['common']['no_progress_bar'] else 'simple'),
)
# task specific setup per epoch
task.begin_epoch(epoch_itr.epoch, trainer.get_model())
valid_subsets = args['dataset']['valid_subset'].split(',')
max_update = args['optimization']['max_update'] or math.inf
for samples in progress:
with metrics.aggregate('train_inner'):
log_output = trainer.train_step(samples)
if log_output is None: # OOM, overflow, ...
continue
# log mid-epoch stats
num_updates = trainer.get_num_updates()
if num_updates % args['common']['log_interval'] == 0:
stats = get_training_stats(metrics.get_smoothed_values('train_inner'))
progress.log(stats, tag='train_inner', step=num_updates)
# reset epoch-level meters
metrics.reset_meters('train_inner')
if (
not args['dataset']['disable_validation']
and args['checkpoint']['save_interval_updates'] > 0
and num_updates % args['checkpoint']['save_interval_updates'] == 0
and num_updates > 0
):
valid_losses = validate(args, trainer, task, epoch_itr, valid_subsets)
checkpoint_utils.save_checkpoint(args, trainer, epoch_itr, valid_losses[0])
if num_updates >= max_update:
break
# log end-of-epoch stats
stats = get_training_stats(metrics.get_smoothed_values('train'))
progress.print(stats, tag='train', step=num_updates)
# reset epoch-level meters
metrics.reset_meters('train')
def validate(args, trainer, task, epoch_itr, subsets):
"""Evaluate the model on the validation set(s) and return the losses."""
if args['dataset']['fixed_validation_seed'] is not None:
# set fixed seed for every validation
set_seed.set_torch_seed(args['dataset']['fixed_validation_seed'])
valid_losses = []
for subset in subsets:
# Initialize data iterator
itr = task.get_batch_iterator(
dataset=task.dataset(subset),
max_tokens=args['dataset']['max_tokens_valid'],
max_sentences=args['dataset']['max_sentences_valid'],
max_positions=utils.resolve_max_positions(
task.max_positions(),
trainer.get_model().max_positions(),
),
ignore_invalid_inputs=args['dataset']['skip_invalid_size_inputs_valid_test'],
required_batch_size_multiple=args['dataset']['required_batch_size_multiple'],
seed=args['common']['seed'],
num_shards=args['distributed_training']['distributed_world_size'],
shard_id=args['distributed_training']['distributed_rank'],
num_workers=args['dataset']['num_workers'],
).next_epoch_itr(shuffle=False)
progress = progress_bar.progress_bar(
itr,
log_format=args['common']['log_format'],
log_interval=args['common']['log_interval'],
epoch=epoch_itr.epoch,
prefix=f"valid on '{subset}' subset",
tensorboard_logdir=(
args['common']['tensorboard_logdir'] if distributed_utils.is_master(args) else None
),
default_log_format=('tqdm' if not args['common']['no_progress_bar'] else 'simple'),
)
# create a new root metrics aggregator so validation metrics
# don't pollute other aggregators (e.g., train meters)
with metrics.aggregate(new_root=True) as agg:
for sample in progress:
trainer.valid_step(sample)
# log validation stats
stats = get_valid_stats(args, trainer, agg.get_smoothed_values())
progress.print(stats, tag=subset, step=trainer.get_num_updates())
valid_losses.append(stats[args['checkpoint']['best_checkpoint_metric']])
return valid_losses
def get_valid_stats(args, trainer, stats):
if 'nll_loss' in stats and 'ppl' not in stats:
stats['ppl'] = utils.get_perplexity(stats['nll_loss'])
stats['num_updates'] = trainer.get_num_updates()
if hasattr(checkpoint_utils.save_checkpoint, 'best'):
key = 'best_{0}'.format(args['checkpoint']['best_checkpoint_metric'])
best_function = max if args['checkpoint']['maximize_best_checkpoint_metric'] else min
stats[key] = best_function(
checkpoint_utils.save_checkpoint.best,
stats[args['checkpoint']['best_checkpoint_metric']],
)
return stats
def get_training_stats(stats):
if 'nll_loss' in stats and 'ppl' not in stats:
stats['ppl'] = utils.get_perplexity(stats['nll_loss'])
stats['wall'] = round(metrics.get_meter('default', 'wall').elapsed_time, 0)
return stats
def should_stop_early(args, valid_loss):
# skip check if no validation was done in the current epoch
if valid_loss is None:
return False
if args['checkpoint']['patience'] <= 0:
return False
def is_better(a, b):
return a > b if args['checkpoint']['maximize_best_checkpoint_metric'] else a < b
prev_best = getattr(should_stop_early, 'best', None)
if prev_best is None or is_better(valid_loss, prev_best):
should_stop_early.best = valid_loss
should_stop_early.num_runs = 0
return False
else:
should_stop_early.num_runs += 1
if should_stop_early.num_runs >= args['checkpoint']['patience']:
LOGGER.info('early stop since valid performance hasn\'t improved for last {} runs'.format(
args['checkpoint']['patience']))
return should_stop_early.num_runs >= args['checkpoint']['patience']
def single_main(args, init_distributed=False):
assert args['dataset']['max_tokens'] is not None or args['dataset']['max_sentences'] is not None, \
'Must specify batch size either with --max-tokens or --max-sentences'
metrics.reset()
# 0. Initialize CUDA and distributed training
if torch.cuda.is_available() and not args['common']['cpu']:
torch.cuda.set_device(args['distributed_training']['device_id'])
set_seed.set_seed(args['common']['seed'])
if init_distributed:
args['distributed_training']['distributed_rank'] = distributed_utils.distributed_init(args)
# Verify checkpoint directory
if distributed_utils.is_master(args):
save_dir = args['checkpoint']['save_dir']
checkpoint_utils.verify_checkpoint_directory(save_dir)
PathManager.rm(os.path.join(save_dir, '*.pt')) # this code will remove pre-trained models
# Print args
LOGGER.info(args)
# 1. Setup task, e.g., translation, language modeling, etc.
task = tasks.setup_task(args)
# 2. Load valid dataset (we load training data below, based on the latest checkpoint)
task.load_dataset(args['dataset']['valid_subset'], combine=False, epoch=1)
# 3. Build model and criterion
model = task.build_model(args)
criterion = task.build_criterion(args)
LOGGER.info(model)
LOGGER.info('model {}, criterion {}'.format(args['model']['arch'], criterion.__class__.__name__))
LOGGER.info('num. model params: {} (num. trained: {})'.format(
sum(p.numel() for p in model.parameters()),
sum(p.numel() for p in model.parameters() if p.requires_grad),
))
# 4. Build trainer
trainer = Trainer(args, task, model, criterion)
LOGGER.info('training on {} GPUs'.format(args['distributed_training']['distributed_world_size']))
LOGGER.info('max tokens per GPU = {} and max sentences per GPU = {}'.format(
args['dataset']['max_tokens'],
args['dataset']['max_sentences'],
))
# 5. Load the latest checkpoint if one is available and restore the corresponding train iterator
extra_state, epoch_itr = checkpoint_utils.load_checkpoint(args, trainer, combine=False)
# 6. Train until the learning rate gets too small
max_epoch = args['optimization']['max_epoch'] or math.inf
max_update = args['optimization']['max_update'] or math.inf
lr = trainer.get_lr()
train_meter = meters.StopwatchMeter()
train_meter.start()
valid_subsets = args['dataset']['valid_subset'].split(',')
while (
lr > args['optimization']['min_lr']
and epoch_itr.next_epoch_idx <= max_epoch
and trainer.get_num_updates() < max_update
):
# train for one epoch
train(args, trainer, task, epoch_itr)
if not args['dataset']['disable_validation'] and epoch_itr.epoch % args['dataset']['validate_interval'] == 0:
valid_losses = validate(args, trainer, task, epoch_itr, valid_subsets)
else:
valid_losses = [None]
# only use first validation loss to update the learning rate
lr = trainer.lr_step(epoch_itr.epoch, valid_losses[0])
# save checkpoint
if epoch_itr.epoch % args['checkpoint']['save_interval'] == 0:
checkpoint_utils.save_checkpoint(args, trainer, epoch_itr, valid_losses[0])
# early stop
if should_stop_early(args, valid_losses[0]):
LOGGER.info('early stop since valid performance hasn\'t improved for last {} runs'.format(
args['checkpoint']['patience']))
break
epoch_itr = trainer.get_train_iterator(
epoch_itr.next_epoch_idx,
combine=False, # TODO to be checked
# sharded data: get train iterator for next epoch
load_dataset=(os.pathsep in args['task']['data']),
)
train_meter.stop()
LOGGER.info('done training in {:.1f} seconds'.format(train_meter.sum))
def distributed_main(i, args, start_rank=0):
args['distributed_training']['device_id'] = i
if args['distributed_training']['distributed_rank'] is None: # torch.multiprocessing.spawn
args['distributed_training']['distributed_rank'] = start_rank + i
single_main(args, init_distributed=True)
def cli_main():
import argparse
parser = argparse.ArgumentParser(
description="Downloading/Decompressing code_search_net dataset(s) or Tree-Sitter Library(ies)")
parser.add_argument(
"--yaml_file", "-f", type=str, help="load {yaml_file}.yml for train",
default='config/py150/python',
)
args = parser.parse_args()
yaml_file = os.path.join(os.path.dirname(__file__), '{}.yml'.format(args.yaml_file))
LOGGER.info('Load arguments in {}'.format(yaml_file))
args = load_yaml(yaml_file)
LOGGER.info(args)
if args['distributed_training']['distributed_init_method'] is None:
distributed_utils.infer_init_method(args)
if args['distributed_training']['distributed_init_method'] is not None:
# distributed training
if torch.cuda.device_count() > 1 and not args['distributed_training']['distributed_no_spawn']:
start_rank = args['distributed_training']['distributed_rank']
args['distributed_training']['distributed_rank'] = None # assign automatically
torch.multiprocessing.spawn(
fn=distributed_main,
args=(args, start_rank),
nprocs=torch.cuda.device_count(),
)
else:
distributed_main(args['distributed_training']['device_id'], args)
elif args['distributed_training']['distributed_world_size'] > 1:
# fallback for single node with multiple GPUs
assert args['distributed_training']['distributed_world_size'] <= torch.cuda.device_count()
port = random.randint(10000, 20000)
args['distributed_training']['distributed_init_method'] = 'tcp://localhost:{port}'.format(port=port)
args['distributed_training']['distributed_rank'] = None # set based on device id
torch.multiprocessing.spawn(
fn=distributed_main,
args=(args,),
nprocs=args['distributed_training']['distributed_world_size'],
)
else:
LOGGER.info('single GPU training...')
single_main(args)
if __name__ == '__main__':
cli_main()
| 41.070122
| 117
| 0.661644
|
0815443dafd21bc8ef9eff44f0bb743c2d8581de
| 403
|
py
|
Python
|
grab-cores.py
|
wking/cpython-extension
|
d87eeb05813add0c407be4b8a19905cb51c16588
|
[
"CC0-1.0"
] | 1
|
2019-06-11T00:06:50.000Z
|
2019-06-11T00:06:50.000Z
|
grab-cores.py
|
wking/cpython-extension
|
d87eeb05813add0c407be4b8a19905cb51c16588
|
[
"CC0-1.0"
] | null | null | null |
grab-cores.py
|
wking/cpython-extension
|
d87eeb05813add0c407be4b8a19905cb51c16588
|
[
"CC0-1.0"
] | 1
|
2020-03-26T13:21:47.000Z
|
2020-03-26T13:21:47.000Z
|
#!/usr/bin/env python
import threading
import spam
def grab_cores(threads=1, count=int(1e9)):
_threads = []
for i in range(threads):
thread = threading.Thread(target=spam.busy, args=(count,))
_threads.append(thread)
thread.start()
for thread in _threads:
thread.join()
if __name__ == '__main__':
import sys
grab_cores(threads=int(sys.argv[1]))
| 18.318182
| 66
| 0.637717
|
9669c7c868c60fb0d3f1f1fbd1d2667c7149b80b
| 1,393
|
py
|
Python
|
samples/test/placeholder_if_test.py
|
votti/pipelines
|
1c3e2768e6177d5d6e3f4b8eff8fafb9a3b76c1f
|
[
"Apache-2.0"
] | 1
|
2022-03-30T05:22:19.000Z
|
2022-03-30T05:22:19.000Z
|
samples/test/placeholder_if_test.py
|
votti/pipelines
|
1c3e2768e6177d5d6e3f4b8eff8fafb9a3b76c1f
|
[
"Apache-2.0"
] | 1
|
2020-02-06T12:53:44.000Z
|
2020-02-06T12:53:44.000Z
|
samples/test/placeholder_if_test.py
|
votti/pipelines
|
1c3e2768e6177d5d6e3f4b8eff8fafb9a3b76c1f
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import kfp.deprecated as kfp
from .placeholder_if import pipeline_both, pipeline_none
# from .placeholder_if_v2 import pipeline_both as pipeline_both_v2, pipeline_none as pipeline_none_v2
from kfp.samples.test.utils import run_pipeline_func, TestCase
run_pipeline_func([
# TODO(chesu): fix compile failure, https://github.com/kubeflow/pipelines/issues/6966
# TestCase(
# pipeline_func=pipeline_none_v2,
# mode=kfp.dsl.PipelineExecutionMode.V2_ENGINE),
# TestCase(
# pipeline_func=pipeline_both_v2,
# mode=kfp.dsl.PipelineExecutionMode.V2_ENGINE),
TestCase(
pipeline_func=pipeline_none,
mode=kfp.dsl.PipelineExecutionMode.V1_LEGACY),
TestCase(
pipeline_func=pipeline_both,
mode=kfp.dsl.PipelineExecutionMode.V1_LEGACY),
])
| 39.8
| 101
| 0.753769
|
3c392c17091f2798f07c06af44d2de7bfd0705c2
| 862
|
py
|
Python
|
switchrule.py
|
Lin-UN/shadowsocks-mod
|
1061c3d899ec40b66c84cecb4c8f985bedc94016
|
[
"Apache-2.0"
] | 4
|
2020-03-20T01:13:52.000Z
|
2020-12-08T06:13:51.000Z
|
switchrule.py
|
aiastia/testssr
|
070e72964ab3ebcd6c63c3ada39ee188b1fadf1d
|
[
"Apache-2.0"
] | 14
|
2021-01-05T06:58:04.000Z
|
2022-02-04T19:06:02.000Z
|
switchrule.py
|
aiastia/testssr
|
070e72964ab3ebcd6c63c3ada39ee188b1fadf1d
|
[
"Apache-2.0"
] | 8
|
2020-02-22T12:37:17.000Z
|
2021-04-10T14:27:29.000Z
|
from configloader import get_config
def getKeys():
key_list = ["id", "port", "u", "d", "transfer_enable", "passwd", "enable"]
if get_config().API_INTERFACE == "sspanelv3":
key_list += ["method"]
elif get_config().API_INTERFACE == "sspanelv3ssr":
key_list += ["method", "obfs", "protocol"]
elif get_config().API_INTERFACE == "glzjinmod":
key_list += [
"method",
"obfs",
"obfs_param",
"protocol",
"protocol_param",
"id",
"node_speedlimit",
"forbidden_ip",
"forbidden_port",
"disconnect_ip",
"is_multi_user",
]
return key_list
# return key_list + ['plan'] # append the column name 'plan'
def isTurnOn(row):
return True
# return row['plan'] == 'B' # then judge here
| 27.806452
| 78
| 0.532483
|
6f80ef7041b59359fc7a790ede395f9c46c058e0
| 1,829
|
py
|
Python
|
is_pto/preprocess_message.py
|
rohinigopalqxf2/practice-testing-ai-ml
|
ba8c8e14e24048653d8b00819f5337e3dc34757d
|
[
"MIT"
] | 2
|
2022-02-14T12:32:18.000Z
|
2022-03-09T10:57:28.000Z
|
is_pto/preprocess_message.py
|
rohinigopalqxf2/practice-testing-ai-ml
|
ba8c8e14e24048653d8b00819f5337e3dc34757d
|
[
"MIT"
] | 3
|
2021-05-28T09:02:30.000Z
|
2021-07-13T11:17:07.000Z
|
is_pto/preprocess_message.py
|
rohinigopalqxf2/practice-testing-ai-ml
|
ba8c8e14e24048653d8b00819f5337e3dc34757d
|
[
"MIT"
] | 1
|
2022-01-24T03:53:13.000Z
|
2022-01-24T03:53:13.000Z
|
"""
Preprocess incoming message to match what the training model uses
a) Clean the unwanted portions of the Skype message and SQS formatting
b) Mimic the training model - remove stop words and stem the words
"""
import re
from nltk.corpus import stopwords
from nltk.stem import SnowballStemmer
NOT_STOP_WORDS = ['not','off','be','will','before','after','out']
ADD_STOP_WORDS = ['today', 'tomorrow', 'yesterday']
def clean_sqs_skype_formatting(message):
"Clean up unwanted Skype and SQS formatting"
cleaned_message = re.sub(r'<quote.*</quote>', '', message)
cleaned_message = re.sub(r'<.*</.*?>', '', cleaned_message) #quoted message
cleaned_message = re.sub(r'\B@\w+', '', cleaned_message) #@mentions
cleaned_message = re.sub(r'&.*?;', '', cleaned_message) #encoded strings
cleaned_message = re.sub(r'^(\s)*$\n', '', cleaned_message) #emtpy lines
cleaned_message = cleaned_message.replace('<legacyquote>', '')
cleaned_message = cleaned_message.replace(',', ' ')
cleaned_message = cleaned_message.replace('.', ' ')
cleaned_message = cleaned_message.replace('"', ' ')
cleaned_message = cleaned_message.replace("'", ' ')
cleaned_message = cleaned_message.replace('\\', ' ')
return cleaned_message
def preprocess_message(message):
"Preprocess the message"
stemmer = SnowballStemmer('english')
words = stopwords.words("english")
for word in NOT_STOP_WORDS:
words.remove(word)
for word in ADD_STOP_WORDS:
words.append(word)
clean_message = " ".join([stemmer.stem(i) for i in re.sub("[^a-zA-Z]", " ", message).split() if i not in words]).lower()
return clean_message
def get_clean_message(message):
"Clean the message"
message = clean_sqs_skype_formatting(message)
message = preprocess_message(message)
return message
| 39.76087
| 124
| 0.693275
|
350184c1f200436b507f860ff048a9194624f094
| 699
|
py
|
Python
|
basico/core/migrations/0001_initial.py
|
ErickFernan/Django-Estudos
|
c42b9b00e57109389fef60235497d5ef903a7298
|
[
"MIT"
] | null | null | null |
basico/core/migrations/0001_initial.py
|
ErickFernan/Django-Estudos
|
c42b9b00e57109389fef60235497d5ef903a7298
|
[
"MIT"
] | null | null | null |
basico/core/migrations/0001_initial.py
|
ErickFernan/Django-Estudos
|
c42b9b00e57109389fef60235497d5ef903a7298
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.12 on 2022-03-18 15:10
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Produto',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nome', models.CharField(max_length=100, verbose_name='Nome')),
('preco', models.DecimalField(decimal_places=2, max_digits=8, verbose_name='Preço')),
('estoque', models.IntegerField(verbose_name='Quantidade em Estoque')),
],
),
]
| 29.125
| 114
| 0.599428
|
8a6af26441bdaf97b16cbde6c075c10a1a6a54c6
| 1,094
|
py
|
Python
|
tests/063.py
|
abawchen/leetcode
|
41d3b172a7694a46a860fbcb0565a3acccd000f2
|
[
"MIT"
] | null | null | null |
tests/063.py
|
abawchen/leetcode
|
41d3b172a7694a46a860fbcb0565a3acccd000f2
|
[
"MIT"
] | null | null | null |
tests/063.py
|
abawchen/leetcode
|
41d3b172a7694a46a860fbcb0565a3acccd000f2
|
[
"MIT"
] | null | null | null |
import unittest
import sys
sys.path.append('./')
solutions = __import__('solutions.063_unique_paths_ii', fromlist='*')
class Test(unittest.TestCase):
def test_uniquePathsWithObstacles(self):
s = solutions.Solution()
grid = [
[0,0,0],
[0,1,0],
[0,0,0]
]
self.assertEquals(s.uniquePathsWithObstacles(grid), 2)
grid = [
[1,0,0],
[0,1,0],
[0,0,0]
]
self.assertEquals(s.uniquePathsWithObstacles(grid), 0)
grid = [
[0,0,1],
[0,0,0]
]
self.assertEquals(s.uniquePathsWithObstacles(grid), 2)
grid = [
[0,0,0],
[0,0,1]
]
self.assertEquals(s.uniquePathsWithObstacles(grid), 0)
grid = [
[0,0],
[1,1],
[0,0]
]
self.assertEquals(s.uniquePathsWithObstacles(grid), 0)
grid = [[0]]
self.assertEquals(s.uniquePathsWithObstacles(grid), 1)
if __name__ == '__main__':
unittest.main()
| 19.890909
| 69
| 0.490859
|
e57bd668fbe1495c0c1f3aa8a7bcba0147ee624e
| 43
|
py
|
Python
|
idiovec/__init__.py
|
bgutter/idiovec
|
65fc2f4803b022d1a4f2dc63c761f39b138bb484
|
[
"MIT"
] | 3
|
2019-08-06T22:50:29.000Z
|
2022-02-07T02:11:31.000Z
|
idiovec/__init__.py
|
bgutter/idiovec
|
65fc2f4803b022d1a4f2dc63c761f39b138bb484
|
[
"MIT"
] | null | null | null |
idiovec/__init__.py
|
bgutter/idiovec
|
65fc2f4803b022d1a4f2dc63c761f39b138bb484
|
[
"MIT"
] | null | null | null |
from .idiovec import *
from . import cruft
| 14.333333
| 22
| 0.744186
|
4881f8db3f6b2565da5731ee49a5465bbaffecbc
| 875
|
py
|
Python
|
sentiment.py
|
MSJawad/Sentiment-Analysis
|
f39edb71051eeb4d781ece967aac5cea5de141ea
|
[
"MIT"
] | null | null | null |
sentiment.py
|
MSJawad/Sentiment-Analysis
|
f39edb71051eeb4d781ece967aac5cea5de141ea
|
[
"MIT"
] | null | null | null |
sentiment.py
|
MSJawad/Sentiment-Analysis
|
f39edb71051eeb4d781ece967aac5cea5de141ea
|
[
"MIT"
] | null | null | null |
import tflearn
from tflearn.data_utils import to_categorical, pad_sequences
from tflearn.datasets import imdb
train, test , _ = imdb.load_data(path='imdb.pkl',n_words=10000 , valid_portion=0.1)
trainx,trainy= train
testx, testy = test
trainx=pad_sequences(trainx,maxlen=100,value=0.)
testx=pad_sequences(trainy,maxlen=100,value=0.)
trainy=to_catagorical(trainy,nb_classes=2)
testy=to_catagorical(testy,nb_classes=2)
net = tflearn.input_data([None, 100])
net = tflearn.embedding(net, input_dim= 10000, output_dim= 128)
net = tflearn.lstm(net, 128,dropout= 0.8)
net = tflearn.fully_connected(net, 2, activation = 'softmax')
net = tflearn.regression(net, optimizer='adam', learning_rate= 0.0001,loss = 'categorical_crossentropy')
model = tflearn.DDN(net,tensorboard_verbose=0)
model.fit(trainx,trainy, validation_set (testx,testy), show_metric = True, batch_size = 32)
| 32.407407
| 104
| 0.778286
|
218f927bf1f6289f6a9a547072ab987fbc589ee8
| 1,833
|
py
|
Python
|
U3M1_sc/acme.py
|
chadeowen/DS-Unit-3-Sprint-1-Software-Engineering
|
a6459295debc0eb010f5982c7c7ae24b2c3ea8d3
|
[
"MIT"
] | null | null | null |
U3M1_sc/acme.py
|
chadeowen/DS-Unit-3-Sprint-1-Software-Engineering
|
a6459295debc0eb010f5982c7c7ae24b2c3ea8d3
|
[
"MIT"
] | null | null | null |
U3M1_sc/acme.py
|
chadeowen/DS-Unit-3-Sprint-1-Software-Engineering
|
a6459295debc0eb010f5982c7c7ae24b2c3ea8d3
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""
Python module for Acme Products organization.
"""
import numpy as np
class Product:
"""Example class to model an Acme Product
"""
def __init__(self, name=None, price=10, weight=20, flammability=0.5, identifier=np.random.randint(1000000, high=10000000)):
self.name = name
self.price = price
self.weight = weight
self.flammability= flammability
self.identifier= identifier
def stealability(self):
"""Determines how easily stolen product is given price over weight."""
if (self.price / self.weight) < 0.5:
print("Not so stealable...")
elif (self.price / self.weight) < 1:
print("Kinda stealable.")
else:
print("Very stealable!")
def explode(self):
"""Determines how explodable product is given flammability times weight."""
if (self.flammability * self.weight) < 10:
print("...fizzle.")
elif (self.flammability * self.weight) < 50:
print("...boom!")
else:
print("...BABOOM!!")
class BoxingGlove(Product):
"""Class to represent a boxing glove."""
def __init__(self, name=None, price=10, weight=10, flammability=0.5, identifier=np.random.randint(1000000, high=10000000)):
self.name = name
self.price = price
self.weight = weight
self.flammability = flammability
self.identifier=identifier
def explode(self):
"""If boxing glove override explode statement."""
print("...it's a glove.")
def punch(self):
"""Determines punch effect given glove weight."""
if self.weight < 5:
print("That tickles.")
elif self.weight < 15:
print("Hey that hurt!")
else:
print("OUCH!")
| 30.55
| 127
| 0.590835
|
7ee06a5ebdb3d01d540568fa97d6683152dce109
| 6,814
|
py
|
Python
|
kaztron/runner.py
|
Laogeodritt/KazTron
|
42f35e520875b458ffde7c2729865c95de606aca
|
[
"MIT"
] | 6
|
2018-07-04T20:41:01.000Z
|
2021-09-08T08:10:34.000Z
|
kaztron/runner.py
|
Laogeodritt/KazTron
|
42f35e520875b458ffde7c2729865c95de606aca
|
[
"MIT"
] | 259
|
2018-05-01T22:41:32.000Z
|
2022-02-08T23:25:00.000Z
|
kaztron/runner.py
|
Laogeodritt/KazTron
|
42f35e520875b458ffde7c2729865c95de606aca
|
[
"MIT"
] | 6
|
2019-04-16T22:13:15.000Z
|
2021-12-15T08:06:38.000Z
|
import asyncio
import logging
import random
import sys
import time
from discord.ext import commands
import kaztron
from kaztron import KazCog
from kaztron.config import get_kaztron_config, KaztronConfig, get_runtime_config
from kaztron.discord_patches import apply_patches
from kaztron.help_formatter import CoreHelpParser, DiscordHelpFormatter
from kaztron.scheduler import Scheduler
logger = logging.getLogger("kaztron.bootstrap")
class ErrorCodes:
OK = 0
ERROR = 1
DAEMON_RUNNING = 4
DAEMON_NOT_RUNNING = 5
EXTENSION_LOAD = 7
RETRY_MAX_ATTEMPTS = 8
CFG_FILE = 17
def run(loop: asyncio.AbstractEventLoop):
"""
Run the bot once.
"""
config = get_kaztron_config()
state = get_runtime_config()
kaztron.KazCog.static_init(config, state)
# custom help formatters
kaz_help_parser = CoreHelpParser({
'name': config.core.get('name')
})
# create bot instance (+ some custom hacks)
client = commands.Bot(
command_prefix='.',
formatter=DiscordHelpFormatter(kaz_help_parser, show_check_failure=True),
description='This an automated bot for the /r/worldbuilding discord server',
pm_help=True)
apply_patches(client)
# KazTron-specific extension classes
client.scheduler = Scheduler(client)
client.kaz_help_parser = kaz_help_parser
# Load core extension (core + rolemanager)
client.load_extension("kaztron.core")
# Load extensions
startup_extensions = config.get("core", "extensions")
for extension in startup_extensions:
logger.debug("Loading extension: {}".format(extension))
# noinspection PyBroadException
try:
client.load_extension("kaztron.cog." + extension)
except Exception:
logger.exception('Failed to load extension {}'.format(extension))
sys.exit(ErrorCodes.EXTENSION_LOAD)
# noinspection PyBroadException
try:
loop.run_until_complete(client.login(config.get("discord", "token")))
loop.run_until_complete(client.connect())
except KeyboardInterrupt:
logger.info("Interrupted by user")
logger.debug("Waiting for client to close...")
loop.run_until_complete(client.close())
logger.info("Client closed.")
sys.exit(ErrorCodes.OK)
except Exception:
logger.exception("Uncaught exception during bot execution")
logger.debug("Waiting for client to close...")
loop.run_until_complete(client.close())
logger.info("Client closed.")
# Let the external retry reboot the bot - attempt recovery from errors
# sys.exit(ErrorCodes.ERROR)
return
finally:
logger.debug("Cancelling pending tasks...")
# BEGIN CONTRIB
# Modified from code from discord.py.
#
# Source: https://github.com/Rapptz/discord.py/blob/
# 09bd2f4de7cccbd5d33f61e5257e1d4dc96b5caa/discord/client.py#L517
#
# Original code Copyright (c) 2015-2016 Rapptz. MIT licence.
pending = asyncio.Task.all_tasks(loop=loop)
gathered = asyncio.gather(*pending, loop=loop, return_exceptions=True)
# noinspection PyBroadException
try:
gathered.cancel()
loop.run_until_complete(gathered)
gathered.exception()
except Exception:
pass
# END CONTRIB
KazCog.state.write()
def run_reboot_loop(loop: asyncio.AbstractEventLoop):
"""
Run the bot, and re-run it if it fails or disconnects. The bot will still stop if an error
bubbles outside the event loop, in the case that KeyboardInterrupt is raised (Ctrl+C/SIGINT),
or that sys.exit() is called.
"""
def reset_backoff(backoff: Backoff, sequence):
if sequence == backoff.n: # don't do it if we had a retry in the meantime
backoff.reset()
logger.info("Welcome to KazTron v{}, booting up...".format(kaztron.__version__))
# noinspection PyBroadException
try:
bo_timer = Backoff(initial_time=3.0, base=1.58, max_attempts=12)
wait_time = 0
while True:
reset_task = loop.call_later(wait_time, reset_backoff, bo_timer, bo_timer.n)
run(loop)
logger.error("Bot halted unexpectedly.")
reset_task.cancel()
wait_time = bo_timer.next()
logger.info("Restarting bot in {:.1f} seconds...".format(wait_time))
time.sleep(wait_time)
logger.info("Restarting bot...")
except StopIteration:
logger.error("Too many failed attempts. Exiting.")
sys.exit(ErrorCodes.RETRY_MAX_ATTEMPTS)
except KeyboardInterrupt: # outside of runner.run
logger.info("Interrupted by user. Exiting.")
except Exception:
logger.exception("Exception in reboot loop.")
raise
finally:
logger.info("Exiting.")
loop.close()
def get_daemon_context(config: KaztronConfig):
import os
import pwd
import grp
from pathlib import Path
# noinspection PyPackageRequirements
from daemon import DaemonContext, pidfile
bot_dir = Path(sys.modules['__main__'].__file__).resolve().parent
pid = pidfile.TimeoutPIDLockFile(config.get('core', 'daemon_pidfile'))
daemon_log = open(config.get('core', 'daemon_log'), 'w+')
daemon_context = DaemonContext(
working_directory=str(bot_dir),
umask=0o002,
pidfile=pid,
stdout=daemon_log,
stderr=daemon_log
)
username = config.get('core', 'daemon_user', None)
group = config.get('core', 'daemon_group', None)
if username:
pw = pwd.getpwnam(username)
daemon_context.uid = pw.pw_uid
daemon_context.gid = pw.pw_gid
os.environ['HOME'] = pw.pw_dir
if group:
daemon_context.gid = grp.getgrnam(group).gr_gid
return daemon_context
class Backoff:
"""
Exponential backoff driver. Doubles retry time every failure.
:param initial_time: Retry time after first failure.
:param base: Exponential base. Default 2.0.
:param max_attempts: Maximum number of attempts before giving up.
"""
def __init__(self, initial_time=1.0, base=2.0, max_attempts=8):
self.t0 = initial_time
self.max = max_attempts
self.base = base
self.n = 0
self.reset()
def next(self):
""" Return the next wait time in seconds. Raises a RuntimeError if max attempts exceeded."""
if self.n < self.max:
tn = self.t0 * (self.base ** self.n) + (random.randint(0, 1000) / 1000)
self.n += 1
return tn
else:
raise StopIteration("Maximum attempts exceeded")
def reset(self):
""" Reset the number of attempts. """
self.n = 0
| 33.239024
| 100
| 0.656149
|
b894f4bfe00469063bd424980cd52c20a87d16f5
| 1,036
|
py
|
Python
|
Talla_Global_Home/migrations/0001_initial.py
|
denis254/talla
|
c776d5365db6d82b7ae14c8995d601c364594481
|
[
"BSD-3-Clause"
] | null | null | null |
Talla_Global_Home/migrations/0001_initial.py
|
denis254/talla
|
c776d5365db6d82b7ae14c8995d601c364594481
|
[
"BSD-3-Clause"
] | 7
|
2020-06-05T18:14:27.000Z
|
2022-03-11T23:21:13.000Z
|
Talla_Global_Home/migrations/0001_initial.py
|
denis254/talla
|
c776d5365db6d82b7ae14c8995d601c364594481
|
[
"BSD-3-Clause"
] | null | null | null |
# Generated by Django 2.0.3 on 2018-05-26 03:49
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Point_Balance', models.CharField(default=0.0, max_length=30)),
('Cash_Balance', models.CharField(default=0.0, max_length=30)),
('User_Type', models.CharField(choices=[('Free', 'Free'), ('Starter', 'Starter'), ('Business', 'Business'), ('Professional', 'Professional')], default='Free', max_length=100)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 37
| 192
| 0.63417
|
dee9ecff5e77c5965268624f9f03e2a29d285f2f
| 1,067
|
py
|
Python
|
sa-fastapi/otp-maker-atom/scripted-atom/scripts/redis_client.py
|
piximos/scripted-atoms
|
6156803e2512bec11ed0403121b9e7f9bf019d99
|
[
"MIT"
] | 2
|
2020-12-21T20:49:26.000Z
|
2021-12-23T16:04:48.000Z
|
sa-fastapi/otp-maker-atom/scripted-atom/scripts/redis_client.py
|
piximos/scripted-atoms
|
6156803e2512bec11ed0403121b9e7f9bf019d99
|
[
"MIT"
] | null | null | null |
sa-fastapi/otp-maker-atom/scripted-atom/scripts/redis_client.py
|
piximos/scripted-atoms
|
6156803e2512bec11ed0403121b9e7f9bf019d99
|
[
"MIT"
] | null | null | null |
import os
import redis
from .opt_generator import OPTGenerator
class RedisClient:
def __init__(self):
self.host = os.getenv('SA_REDIS_HOST')
self.port = os.getenv('SA_REDIS_PORT')
self.password = os.getenv('SA_REDIS_PASSWORD')
self.db = os.getenv('SA_REDIS_DB')
self.ttl = os.getenv('SA_OPT_TTL')
self.prefix = os.getenv('SA_REDIS_KEY_PREFIX')
self.opt_generator = OPTGenerator()
self.redis_client = redis.Redis(host=self.host, port=self.port, db=self.db, password=self.password,
decode_responses=True)
def save_opt(self, user_id: str, ttl: int = None):
opt = self.opt_generator.generate_code()
self.redis_client.set(self.rc_key(user_id), opt)
self.redis_client.expire(self.rc_key(user_id), ttl if not ttl else self.ttl)
return opt
def get_opt(self, user_id: str):
return self.redis_client.get(self.rc_key(user_id))
def rc_key(self, user_id: str):
return "{}-{}".format(self.prefix, user_id)
| 34.419355
| 107
| 0.645736
|
2bbc2a910d5a57d0b1885f030c6ccf2bd0da7525
| 5,325
|
py
|
Python
|
maskrcnn_benchmark/modeling/rpn/retinanet/retinanet.py
|
amsword/maskrcnn-benchmark
|
660457d5f28c5d7d7887829486a20c60976b1dd8
|
[
"MIT"
] | 2
|
2020-08-18T05:14:58.000Z
|
2020-08-20T05:13:36.000Z
|
maskrcnn_benchmark/modeling/rpn/retinanet/retinanet.py
|
jacobswan1/maskrcnn-benchmark
|
660457d5f28c5d7d7887829486a20c60976b1dd8
|
[
"MIT"
] | null | null | null |
maskrcnn_benchmark/modeling/rpn/retinanet/retinanet.py
|
jacobswan1/maskrcnn-benchmark
|
660457d5f28c5d7d7887829486a20c60976b1dd8
|
[
"MIT"
] | 1
|
2020-08-18T05:15:08.000Z
|
2020-08-18T05:15:08.000Z
|
import math
import torch
import torch.nn.functional as F
from torch import nn
from .inference import make_retinanet_postprocessor
from .loss import make_retinanet_loss_evaluator
from ..anchor_generator import make_anchor_generator_retinanet
from maskrcnn_benchmark.modeling.box_coder import BoxCoder
class RetinaNetHead(torch.nn.Module):
"""
Adds a RetinNet head with classification and regression heads
"""
def __init__(self, cfg, in_channels):
"""
Arguments:
in_channels (int): number of channels of the input feature
num_anchors (int): number of anchors to be predicted
"""
super(RetinaNetHead, self).__init__()
# TODO: Implement the sigmoid version first.
num_classes = cfg.MODEL.RETINANET.NUM_CLASSES - 1
num_anchors = len(cfg.MODEL.RETINANET.ASPECT_RATIOS) \
* cfg.MODEL.RETINANET.SCALES_PER_OCTAVE
cls_tower = []
bbox_tower = []
for i in range(cfg.MODEL.RETINANET.NUM_CONVS):
cls_tower.append(
nn.Conv2d(
in_channels,
in_channels,
kernel_size=3,
stride=1,
padding=1
)
)
cls_tower.append(nn.ReLU(inplace=True))
bbox_tower.append(
nn.Conv2d(
in_channels,
in_channels,
kernel_size=3,
stride=1,
padding=1
)
)
bbox_tower.append(nn.ReLU(inplace=True))
self.add_module('cls_tower', nn.Sequential(*cls_tower))
self.add_module('bbox_tower', nn.Sequential(*bbox_tower))
self.cls_logits = nn.Conv2d(
in_channels, num_anchors * num_classes, kernel_size=3, stride=1,
padding=1
)
self.bbox_pred = nn.Conv2d(
in_channels, num_anchors * 4, kernel_size=3, stride=1,
padding=1
)
# Initialization
for modules in [self.cls_tower, self.bbox_tower, self.cls_logits,
self.bbox_pred]:
for l in modules.modules():
if isinstance(l, nn.Conv2d):
torch.nn.init.normal_(l.weight, std=0.01)
torch.nn.init.constant_(l.bias, 0)
# retinanet_bias_init
prior_prob = cfg.MODEL.RETINANET.PRIOR_PROB
bias_value = -math.log((1 - prior_prob) / prior_prob)
torch.nn.init.constant_(self.cls_logits.bias, bias_value)
def forward(self, x):
logits = []
bbox_reg = []
for feature in x:
logits.append(self.cls_logits(self.cls_tower(feature)))
bbox_reg.append(self.bbox_pred(self.bbox_tower(feature)))
return logits, bbox_reg
class RetinaNetModule(torch.nn.Module):
"""
Module for RetinaNet computation. Takes feature maps from the backbone and
RetinaNet outputs and losses. Only Test on FPN now.
"""
def __init__(self, cfg, in_channels):
super(RetinaNetModule, self).__init__()
self.cfg = cfg.clone()
anchor_generator = make_anchor_generator_retinanet(cfg)
head = RetinaNetHead(cfg, in_channels)
box_coder = BoxCoder(weights=(10., 10., 5., 5.))
box_selector_test = make_retinanet_postprocessor(cfg, box_coder, is_train=False)
loss_evaluator = make_retinanet_loss_evaluator(cfg, box_coder)
self.anchor_generator = anchor_generator
self.head = head
self.box_selector_test = box_selector_test
self.loss_evaluator = loss_evaluator
def forward(self, images, features, targets=None):
"""
Arguments:
images (ImageList): images for which we want to compute the predictions
features (list[Tensor]): features computed from the images that are
used for computing the predictions. Each tensor in the list
correspond to different feature levels
targets (list[BoxList): ground-truth boxes present in the image (optional)
Returns:
boxes (list[BoxList]): the predicted boxes from the RPN, one BoxList per
image.
losses (dict[Tensor]): the losses for the model during training. During
testing, it is an empty dict.
"""
box_cls, box_regression = self.head(features)
anchors = self.anchor_generator(images, features)
if self.training:
return self._forward_train(anchors, box_cls, box_regression, targets)
else:
return self._forward_test(anchors, box_cls, box_regression)
def _forward_train(self, anchors, box_cls, box_regression, targets):
loss_box_cls, loss_box_reg = self.loss_evaluator(
anchors, box_cls, box_regression, targets
)
losses = {
"loss_retina_cls": loss_box_cls,
"loss_retina_reg": loss_box_reg,
}
return anchors, losses
def _forward_test(self, anchors, box_cls, box_regression):
boxes = self.box_selector_test(anchors, box_cls, box_regression)
return boxes, {}
def build_retinanet(cfg, in_channels):
return RetinaNetModule(cfg, in_channels)
| 34.803922
| 88
| 0.611455
|
533af6574f1473e2150ec1af1a13494bab3f0bbf
| 2,052
|
py
|
Python
|
src/models/grid_search.py
|
ds-praveenkumar/m5-accuracy-prediction
|
20255adc95c3e0fe6c6acec9fd16ac88c6e95908
|
[
"MIT"
] | null | null | null |
src/models/grid_search.py
|
ds-praveenkumar/m5-accuracy-prediction
|
20255adc95c3e0fe6c6acec9fd16ac88c6e95908
|
[
"MIT"
] | null | null | null |
src/models/grid_search.py
|
ds-praveenkumar/m5-accuracy-prediction
|
20255adc95c3e0fe6c6acec9fd16ac88c6e95908
|
[
"MIT"
] | null | null | null |
# github link: https://github.com/ds-praveenkumar/kaggle
# Author: ds-praveenkumar
# file: forcasting/grid_search.py/
# Created by ds-praveenkumar at 14-06-2020 10 52
# feature:
from sklearn.model_selection import ParameterGrid
from fbprophet import Prophet
import pandas as pd
import numpy as np
from fbprophet.diagnostics import cross_validation,performance_metrics
from multiprocessing import Pool
import os
import time
params_grid = {
'growth' : ('linear','logistic'),
'changepoint_prior_scale' : [0.1,0.15,0.3,0.5,0.7],
'seasonality_mode':('multiplicative','additive'),
'yearly_seasonality':[10,20,30]
}
def set_parms():
grid = ParameterGrid(params_grid)
print([p for p in grid])
return grid
def search_params(grid):
df = pd.read_csv('H:\\forcasting\\data\\training\\11306.csv')
df['y'] = np.log1p(df.y.astype(float) + 1)
df['cap'] = df.y.max()
df['floor'] = df.y.min()
print(df)
metric_dict = dict()
for g in grid:
model = Prophet(**g)
model.add_country_holidays('US')
model.fit(df)
df_cv = cross_validation(model,initial='300 days',period='30 days',horizon='10 days')
#print(df_cv)
df_p = performance_metrics(df_cv)
print('*'* 50)
print('grid: ', g)
print('rmse: ', df_p.rmse.mean())
print('*' * 50)
metric_dict[str(g)]=df_p.rmse.mean()
return metric_dict
def main():
grid = set_parms()
start = time.time()
# gs_res = search_params(grid)
# for el in gs_res:
# for key in el.keys():
# print(f"{key}:{el[key]}", end='\n')
# print('end time(mins):', float((time.time() -start)/60),end='\n')
pool = Pool(processes=os.cpu_count() )
result = pool.map(search_params, [grid] )
for el in result:
for key in el.keys():
print(f"{key}:{el[key]}", end='\n')
print('end time(mins):', float((time.time() - start) / 60), end='\n')
if __name__ == '__main__':
main()
| 26.649351
| 93
| 0.601365
|
a75cf39252c7332e09692c7a1f23e05dd24e6dd0
| 2,355
|
py
|
Python
|
app/database.py
|
Gichia/questioner-v2
|
b93ffdc521e364c191b770bf1bcb93964e7fa1f3
|
[
"MIT"
] | null | null | null |
app/database.py
|
Gichia/questioner-v2
|
b93ffdc521e364c191b770bf1bcb93964e7fa1f3
|
[
"MIT"
] | 6
|
2019-01-22T17:35:28.000Z
|
2022-01-13T01:01:48.000Z
|
app/database.py
|
Gichia/questioner-v2
|
b93ffdc521e364c191b770bf1bcb93964e7fa1f3
|
[
"MIT"
] | null | null | null |
"""Querirs to initialize app database"""
def db_tables():
"""Queries to create app tables"""
tbl1 = """CREATE TABLE IF NOT EXISTS meetups (
meetup_id serial PRIMARY KEY NOT NULL,
created_by INT NOT NULL,
location CHAR(50) NOT NULL,
topic CHAR(50) NOT NULL,
createdOn CHAR(50) NOT NULL,
images CHAR(50) NULL,
tags CHAR(150) NULL
)"""
tbl2 = """CREATE TABLE IF NOT EXISTS questions (
question_id serial PRIMARY KEY NOT NULL,
meetup_id INT NOT NULL,
user_id INT NOT NULL,
createdOn CHAR(150) NOT NULL,
title CHAR(100) NOT NULL,
body CHAR(150) NOT NULL
)"""
tbl3 = """CREATE TABLE IF NOT EXISTS comments (
comment_id serial PRIMARY KEY NOT NULL,
question_id INT NOT NULL,
user_id INT NOT NULL,
createdOn CHAR(100) NOT NULL,
comment CHAR(150) NOT NULL
)"""
tbl4 = """CREATE TABLE IF NOT EXISTS rsvp (
rsvp_id serial PRIMARY KEY NOT NULL,
meetup_id INT NOT NULL,
user_id INT NOT NULL,
response CHAR(10) NOT NULL,
createdOn CHAR(100) NOT NULL
)"""
tbl5 = """CREATE TABLE IF NOT EXISTS users (
user_id serial PRIMARY KEY NOT NULL,
firstname CHAR(40) NOT NULL,
lastname CHAR(45) NOT NULL,
email CHAR(45) NOT NULL,
created_on CHAR(50) NOT NULL,
is_admin BOOLEAN NOT NULL DEFAULT FALSE,
password CHAR(150) NOT NULL,
username CHAR(20) NULL,
phoneNumber INT NULL
)"""
tbl6 = """CREATE TABLE IF NOT EXISTS votes (
vote_id serial PRIMARY KEY NOT NULL,
user_id INT NOT NULL,
question_id INT NOT NULL,
createdOn CHAR(100) NOT NULL,
is_like BOOLEAN NOT NULL
)"""
tables = [tbl1, tbl2, tbl3, tbl4, tbl5, tbl6]
return tables
def drop_tables():
"""Function to drop all tables after tests"""
tbl1 = """DROP TABLE IF EXISTS users CASCADE"""
tbl2 = """DROP TABLE IF EXISTS meetups CASCADE"""
tbl3 = """DROP TABLE IF EXISTS questions CASCADE"""
tbl4 = """DROP TABLE IF EXISTS comments CASCADE"""
tbl5 = """DROP TABLE IF EXISTS rsvp CASCADE"""
tbl6 = """DROP TABLE IF EXISTS votes CASCADE"""
tables = [tbl1, tbl2, tbl3, tbl4, tbl5, tbl6]
return tables
| 31.824324
| 55
| 0.600425
|
77da2c0934bedbc1b1e455c6fc615c01791cba46
| 1,252
|
py
|
Python
|
pbsmrtpipe/external_tools.py
|
PacificBiosciences/pbsmrtpipe
|
4d532c823d3a46b82c2eb20b9d46e63544c8ba83
|
[
"BSD-3-Clause"
] | 26
|
2015-08-06T02:09:51.000Z
|
2021-02-26T02:25:01.000Z
|
pbsmrtpipe/external_tools.py
|
mpkocher/pbsmrtpipe
|
4d532c823d3a46b82c2eb20b9d46e63544c8ba83
|
[
"BSD-3-Clause"
] | 181
|
2015-08-07T18:13:02.000Z
|
2021-04-13T16:24:32.000Z
|
pbsmrtpipe/external_tools.py
|
PacificBiosciences/pbsmrtpipe
|
4d532c823d3a46b82c2eb20b9d46e63544c8ba83
|
[
"BSD-3-Clause"
] | 29
|
2015-08-07T17:29:42.000Z
|
2021-09-15T18:22:37.000Z
|
"""Externally/subprocess calls.
The dot to image should be done within pygraphviz if the dependency isn't too
difficult to consistent to install.
"""
import os
import logging
import functools
from pbsmrtpipe.exceptions import RequiredExeNotFoundError
from pbsmrtpipe.engine import backticks
from pbcommand.utils import which
log = logging.getLogger(__name__)
_SUPPORTED_IMAGE_TYPES = 'png svg eps'.split()
DOT_EXE = 'dot'
def _dot_to_image(image_type, dot_file, image_file):
assert image_type.lower() in _SUPPORTED_IMAGE_TYPES
if not os.path.exists(dot_file):
raise IOError("Unable to find {f}".format(f=dot_file))
if which(DOT_EXE) is None:
raise RequiredExeNotFoundError("Unable to find required external exe '{x}'".format(x=DOT_EXE))
cmd_str = "{e} -T{t} {i} -o {o}"
d = dict(e=DOT_EXE, t=image_type, i=dot_file, o=image_file)
cmd = cmd_str.format(**d)
rcode, stdout, stderr, run_time = backticks(cmd)
state = True if rcode == 0 else False
return state
# For backward compatibility
dot_to_image = _dot_to_image
dot_file_to_png = functools.partial(_dot_to_image, 'png')
dot_file_to_svg = functools.partial(_dot_to_image, 'svg')
dot_file_to_eps = functools.partial(_dot_to_image, 'eps')
| 29.116279
| 102
| 0.744409
|
62b652d67bb8183829d4af0af24ede2be62f0dff
| 853
|
py
|
Python
|
env/Lib/site-packages/OpenGL/GL/SGIX/ycrcb_subsample.py
|
5gconnectedbike/Navio2
|
8c3f2b5d8bbbcea1fc08739945183c12b206712c
|
[
"BSD-3-Clause"
] | 210
|
2016-04-09T14:26:00.000Z
|
2022-03-25T18:36:19.000Z
|
env/Lib/site-packages/OpenGL/GL/SGIX/ycrcb_subsample.py
|
5gconnectedbike/Navio2
|
8c3f2b5d8bbbcea1fc08739945183c12b206712c
|
[
"BSD-3-Clause"
] | 72
|
2016-09-04T09:30:19.000Z
|
2022-03-27T17:06:53.000Z
|
env/Lib/site-packages/OpenGL/GL/SGIX/ycrcb_subsample.py
|
5gconnectedbike/Navio2
|
8c3f2b5d8bbbcea1fc08739945183c12b206712c
|
[
"BSD-3-Clause"
] | 64
|
2016-04-09T14:26:49.000Z
|
2022-03-21T11:19:47.000Z
|
'''OpenGL extension SGIX.ycrcb_subsample
This module customises the behaviour of the
OpenGL.raw.GL.SGIX.ycrcb_subsample to provide a more
Python-friendly API
Overview (from the spec)
(Need to construct a real extension spec based on this)
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/SGIX/ycrcb_subsample.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.SGIX.ycrcb_subsample import *
from OpenGL.raw.GL.SGIX.ycrcb_subsample import _EXTENSION_NAME
def glInitYcrcbSubsampleSGIX():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
| 31.592593
| 71
| 0.801876
|
0d72b52ab743a3dc3ef9f5bee0b49e31afa62fbc
| 1,284
|
py
|
Python
|
thesis/utils.py
|
alki22/thesis
|
079649cee05c1f6aa6b533ef4a41e6658b29b324
|
[
"MIT"
] | null | null | null |
thesis/utils.py
|
alki22/thesis
|
079649cee05c1f6aa6b533ef4a41e6658b29b324
|
[
"MIT"
] | null | null | null |
thesis/utils.py
|
alki22/thesis
|
079649cee05c1f6aa6b533ef4a41e6658b29b324
|
[
"MIT"
] | 1
|
2018-09-13T15:44:25.000Z
|
2018-09-13T15:44:25.000Z
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import fnmatch
import os
import numpy as np
def find(path, pattern):
"""
Implementation of unix `find`
:param path: Path to traverse
:param pattern: File pattern to look for
:return: Generator traversing the path yielding files matching the pattern
"""
for root, _, filenames in os.walk(path):
for filename in fnmatch.filter(filenames, pattern):
yield os.path.join(root, filename)
def try_number(item):
try:
return int(item)
except ValueError:
pass
try:
return float(item)
except ValueError:
return item
def cumulative_index_split(target, splits=3, min_count=2):
# Ensure there is at least 'min_count' items per class in the first split
split_begin = np.concatenate([np.where(target == label)[0][:min_count] for label in np.unique(target)])
mask = np.ones_like(target, dtype=np.bool)
mask[split_begin] = False
index_accumulator = []
# Yield each split appended to the previous ones
for spi in np.array_split(np.concatenate((split_begin, np.arange(target.shape[0])[mask])), splits):
index_accumulator.extend(spi)
yield index_accumulator
| 27.913043
| 107
| 0.683801
|
55d8d2b0ecf4b92346df0ca7cd637cf123e721b9
| 7,049
|
py
|
Python
|
indico/modules/rb/notifications/reservations.py
|
CrownedSilverFox/conference-platform
|
1858a2908763dc7e4c29d3157369e9aab6064933
|
[
"MIT"
] | 1
|
2021-02-24T10:20:14.000Z
|
2021-02-24T10:20:14.000Z
|
indico/modules/rb/notifications/reservations.py
|
CrownedSilverFox/conference-platform
|
1858a2908763dc7e4c29d3157369e9aab6064933
|
[
"MIT"
] | 2
|
2021-05-26T09:16:52.000Z
|
2021-05-26T09:28:59.000Z
|
indico/modules/rb/notifications/reservations.py
|
andrea-guarino-sonarsource/indico
|
2dada1f293daea913dec85ebb33c29a9f2cb92ac
|
[
"MIT"
] | null | null | null |
# This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from flask import render_template
from sqlalchemy import and_
from sqlalchemy.orm import load_only
from indico.core.notifications import email_sender, make_email
from indico.modules.rb.settings import RoomEmailMode, rb_user_settings
from indico.modules.users import User, UserSetting
from indico.util.date_time import format_datetime
from indico.web.flask.templating import get_template_module
def get_manager_emails(room):
emails = set(room.notification_emails)
if (rb_user_settings.get(room.owner, 'email_mode') in (RoomEmailMode.owned, RoomEmailMode.all)
and room not in rb_user_settings.get(room.owner, 'email_blacklist')):
emails.add(room.owner.email)
# skip people who don't want manager emails
email_mode_filter = and_(
UserSetting.name == 'email_mode',
UserSetting.value[()].astext.in_([RoomEmailMode.none.name, RoomEmailMode.owned.name])
)
# skip people who don't want emails for the room
room_blacklist_filter = and_(
UserSetting.name == 'email_blacklist',
UserSetting.value.contains(str(room.id))
)
query = (User.query
.join(UserSetting)
.options(load_only('id'))
.filter(UserSetting.module == 'roombooking',
email_mode_filter | room_blacklist_filter))
disabled_emails = {u.email for u in query}
emails |= room.get_manager_emails() - disabled_emails
return emails
class ReservationNotification:
def __init__(self, reservation):
self.reservation = reservation
self.start_dt = format_datetime(reservation.start_dt)
def _get_email_subject(self, **mail_params):
return '{prefix}[{room}] {subject} ({date}) {suffix}'.format(
prefix=mail_params.get('subject_prefix', ''),
room=self.reservation.room.full_name,
subject=mail_params.get('subject', ''),
date=self.start_dt,
suffix=mail_params.get('subject_suffix', '')
).strip()
def _make_body(self, mail_params, **body_params):
from indico.modules.rb.models.reservations import RepeatFrequency, RepeatMapping
template_params = dict(mail_params, **body_params)
template_params['RepeatFrequency'] = RepeatFrequency
template_params['RepeatMapping'] = RepeatMapping
return render_template('rb/emails/reservations/{}.txt'.format(mail_params['template_name']), **template_params)
def compose_email_to_user(self, **mail_params):
creator = self.reservation.created_by_user
to_list = {creator.email}
if self.reservation.contact_email:
to_list.add(self.reservation.contact_email)
subject = self._get_email_subject(**mail_params)
body = self._make_body(mail_params, reservation=self.reservation)
return make_email(to_list=to_list, subject=subject, body=body)
def compose_email_to_manager(self, **mail_params):
room = self.reservation.room
subject = self._get_email_subject(**mail_params)
body = self._make_body(mail_params, reservation=self.reservation)
return make_email(to_list=get_manager_emails(room), subject=subject, body=body)
@email_sender
def notify_reset_approval(reservation):
notification = ReservationNotification(reservation)
return [_f for _f in [
notification.compose_email_to_user(
subject='Booking approval changed state on',
template_name='change_state_email_to_user'
),
notification.compose_email_to_manager(
subject='Booking approval changed state on',
template_name='change_state_email_to_manager'
)
] if _f]
@email_sender
def notify_cancellation(reservation):
if not reservation.is_cancelled:
raise ValueError('Reservation is not cancelled')
notification = ReservationNotification(reservation)
return [_f for _f in [
notification.compose_email_to_user(
subject='Booking cancelled',
template_name='cancellation_email_to_user'
),
notification.compose_email_to_manager(
subject='Booking cancelled',
template_name='cancellation_email_to_manager'
),
] if _f]
@email_sender
def notify_confirmation(reservation, reason=None):
if not reservation.is_accepted:
raise ValueError('Reservation is not confirmed')
notification = ReservationNotification(reservation)
return [_f for _f in [
notification.compose_email_to_user(
subject='Booking confirmed',
template_name='confirmation_email_to_user',
reason=reason
),
notification.compose_email_to_manager(
subject='Booking confirmed',
template_name='confirmation_email_to_manager',
reason=reason
),
] if _f]
@email_sender
def notify_creation(reservation):
notification = ReservationNotification(reservation)
return [_f for _f in [
notification.compose_email_to_user(
subject='New Booking' if reservation.is_accepted else 'Pre-Booking awaiting acceptance',
template_name='creation_email_to_user' if reservation.is_accepted else 'creation_pre_email_to_user'
),
notification.compose_email_to_manager(
subject='New booking on' if reservation.is_accepted else 'New Pre-Booking on',
template_name='creation_email_to_manager' if reservation.is_accepted else 'creation_pre_email_to_manager'
),
] if _f]
@email_sender
def notify_rejection(reservation):
if not reservation.is_rejected:
raise ValueError('Reservation is not rejected')
notification = ReservationNotification(reservation)
return [_f for _f in [
notification.compose_email_to_user(
subject='Booking rejected',
template_name='rejection_email_to_user',
),
notification.compose_email_to_manager(
subject='Booking rejected',
template_name='rejection_email_to_manager',
)
] if _f]
@email_sender
def notify_modification(reservation, changes):
notification = ReservationNotification(reservation)
return [_f for _f in [
notification.compose_email_to_user(
subject='Booking modified',
template_name='modification_email_to_user'
),
notification.compose_email_to_manager(
subject='Booking modified',
template_name='modification_email_to_manager'
),
] if _f]
@email_sender
def notify_about_finishing_bookings(user, reservations):
tpl = get_template_module('rb/emails/reservations/reminders/finishing_bookings.html',
reservations=reservations, user=user)
return make_email(to_list={user.email}, template=tpl, html=True)
| 38.309783
| 119
| 0.695276
|
c6c0e74050dd072a3797b3c582d8a32b9a41c7f3
| 15,378
|
py
|
Python
|
tests/components/zha/test_cover.py
|
inishchith/core
|
90892d275c259088ed302bdaa8838303a6ef4094
|
[
"Apache-2.0"
] | 6
|
2016-11-25T06:36:27.000Z
|
2021-11-16T11:20:23.000Z
|
tests/components/zha/test_cover.py
|
SicAriuSx83/core
|
162c39258e68ae42fe4e1560ae91ed54f5662409
|
[
"Apache-2.0"
] | 45
|
2020-10-15T06:47:06.000Z
|
2022-03-31T06:26:16.000Z
|
tests/components/zha/test_cover.py
|
SicAriuSx83/core
|
162c39258e68ae42fe4e1560ae91ed54f5662409
|
[
"Apache-2.0"
] | 2
|
2020-05-11T00:38:26.000Z
|
2021-01-15T13:23:44.000Z
|
"""Test zha cover."""
import asyncio
import pytest
import zigpy.types
import zigpy.zcl.clusters.closures as closures
import zigpy.zcl.clusters.general as general
import zigpy.zcl.foundation as zcl_f
from homeassistant.components.cover import (
ATTR_CURRENT_POSITION,
DOMAIN,
SERVICE_CLOSE_COVER,
SERVICE_OPEN_COVER,
SERVICE_SET_COVER_POSITION,
SERVICE_STOP_COVER,
)
from homeassistant.const import (
ATTR_COMMAND,
STATE_CLOSED,
STATE_OPEN,
STATE_UNAVAILABLE,
)
from homeassistant.core import CoreState, State
from .common import (
async_enable_traffic,
async_test_rejoin,
find_entity_id,
make_zcl_header,
send_attributes_report,
)
from tests.async_mock import AsyncMock, MagicMock, call, patch
from tests.common import async_capture_events, mock_coro, mock_restore_cache
@pytest.fixture
def zigpy_cover_device(zigpy_device_mock):
"""Zigpy cover device."""
endpoints = {
1: {
"device_type": 1026,
"in_clusters": [closures.WindowCovering.cluster_id],
"out_clusters": [],
}
}
return zigpy_device_mock(endpoints)
@pytest.fixture
def zigpy_cover_remote(zigpy_device_mock):
"""Zigpy cover remote device."""
endpoints = {
1: {
"device_type": 0x0203,
"in_clusters": [],
"out_clusters": [closures.WindowCovering.cluster_id],
}
}
return zigpy_device_mock(endpoints)
@pytest.fixture
def zigpy_shade_device(zigpy_device_mock):
"""Zigpy shade device."""
endpoints = {
1: {
"device_type": 512,
"in_clusters": [
closures.Shade.cluster_id,
general.LevelControl.cluster_id,
general.OnOff.cluster_id,
],
"out_clusters": [],
}
}
return zigpy_device_mock(endpoints)
@pytest.fixture
def zigpy_keen_vent(zigpy_device_mock):
"""Zigpy Keen Vent device."""
endpoints = {
1: {
"device_type": 3,
"in_clusters": [general.LevelControl.cluster_id, general.OnOff.cluster_id],
"out_clusters": [],
}
}
return zigpy_device_mock(
endpoints, manufacturer="Keen Home Inc", model="SV02-612-MP-1.3"
)
@patch(
"homeassistant.components.zha.core.channels.closures.WindowCovering.async_initialize"
)
async def test_cover(m1, hass, zha_device_joined_restored, zigpy_cover_device):
"""Test zha cover platform."""
async def get_chan_attr(*args, **kwargs):
return 100
with patch(
"homeassistant.components.zha.core.channels.base.ZigbeeChannel.get_attribute_value",
new=MagicMock(side_effect=get_chan_attr),
) as get_attr_mock:
# load up cover domain
zha_device = await zha_device_joined_restored(zigpy_cover_device)
assert get_attr_mock.call_count == 2
assert get_attr_mock.call_args[0][0] == "current_position_lift_percentage"
cluster = zigpy_cover_device.endpoints.get(1).window_covering
entity_id = await find_entity_id(DOMAIN, zha_device, hass)
assert entity_id is not None
await async_enable_traffic(hass, [zha_device], enabled=False)
# test that the cover was created and that it is unavailable
assert hass.states.get(entity_id).state == STATE_UNAVAILABLE
# allow traffic to flow through the gateway and device
await async_enable_traffic(hass, [zha_device])
await hass.async_block_till_done()
# test that the state has changed from unavailable to off
await send_attributes_report(hass, cluster, {0: 0, 8: 100, 1: 1})
assert hass.states.get(entity_id).state == STATE_CLOSED
# test to see if it opens
await send_attributes_report(hass, cluster, {0: 1, 8: 0, 1: 100})
assert hass.states.get(entity_id).state == STATE_OPEN
# close from UI
with patch(
"zigpy.zcl.Cluster.request", return_value=mock_coro([0x1, zcl_f.Status.SUCCESS])
):
await hass.services.async_call(
DOMAIN, SERVICE_CLOSE_COVER, {"entity_id": entity_id}, blocking=True
)
assert cluster.request.call_count == 1
assert cluster.request.call_args == call(
False, 0x1, (), expect_reply=True, manufacturer=None, tsn=None
)
# open from UI
with patch(
"zigpy.zcl.Cluster.request", return_value=mock_coro([0x0, zcl_f.Status.SUCCESS])
):
await hass.services.async_call(
DOMAIN, SERVICE_OPEN_COVER, {"entity_id": entity_id}, blocking=True
)
assert cluster.request.call_count == 1
assert cluster.request.call_args == call(
False, 0x0, (), expect_reply=True, manufacturer=None, tsn=None
)
# set position UI
with patch(
"zigpy.zcl.Cluster.request", return_value=mock_coro([0x5, zcl_f.Status.SUCCESS])
):
await hass.services.async_call(
DOMAIN,
SERVICE_SET_COVER_POSITION,
{"entity_id": entity_id, "position": 47},
blocking=True,
)
assert cluster.request.call_count == 1
assert cluster.request.call_args == call(
False,
0x5,
(zigpy.types.uint8_t,),
53,
expect_reply=True,
manufacturer=None,
tsn=None,
)
# stop from UI
with patch(
"zigpy.zcl.Cluster.request", return_value=mock_coro([0x2, zcl_f.Status.SUCCESS])
):
await hass.services.async_call(
DOMAIN, SERVICE_STOP_COVER, {"entity_id": entity_id}, blocking=True
)
assert cluster.request.call_count == 1
assert cluster.request.call_args == call(
False, 0x2, (), expect_reply=True, manufacturer=None, tsn=None
)
# test rejoin
await async_test_rejoin(hass, zigpy_cover_device, [cluster], (1,))
assert hass.states.get(entity_id).state == STATE_OPEN
async def test_shade(hass, zha_device_joined_restored, zigpy_shade_device):
"""Test zha cover platform for shade device type."""
# load up cover domain
zha_device = await zha_device_joined_restored(zigpy_shade_device)
cluster_on_off = zigpy_shade_device.endpoints.get(1).on_off
cluster_level = zigpy_shade_device.endpoints.get(1).level
entity_id = await find_entity_id(DOMAIN, zha_device, hass)
assert entity_id is not None
await async_enable_traffic(hass, [zha_device], enabled=False)
# test that the cover was created and that it is unavailable
assert hass.states.get(entity_id).state == STATE_UNAVAILABLE
# allow traffic to flow through the gateway and device
await async_enable_traffic(hass, [zha_device])
await hass.async_block_till_done()
# test that the state has changed from unavailable to off
await send_attributes_report(hass, cluster_on_off, {8: 0, 0: False, 1: 1})
assert hass.states.get(entity_id).state == STATE_CLOSED
# test to see if it opens
await send_attributes_report(hass, cluster_on_off, {8: 0, 0: True, 1: 1})
assert hass.states.get(entity_id).state == STATE_OPEN
# close from UI command fails
with patch("zigpy.zcl.Cluster.request", side_effect=asyncio.TimeoutError):
await hass.services.async_call(
DOMAIN, SERVICE_CLOSE_COVER, {"entity_id": entity_id}, blocking=True
)
assert cluster_on_off.request.call_count == 1
assert cluster_on_off.request.call_args[0][0] is False
assert cluster_on_off.request.call_args[0][1] == 0x0000
assert hass.states.get(entity_id).state == STATE_OPEN
with patch(
"zigpy.zcl.Cluster.request", AsyncMock(return_value=[0x1, zcl_f.Status.SUCCESS])
):
await hass.services.async_call(
DOMAIN, SERVICE_CLOSE_COVER, {"entity_id": entity_id}, blocking=True
)
assert cluster_on_off.request.call_count == 1
assert cluster_on_off.request.call_args[0][0] is False
assert cluster_on_off.request.call_args[0][1] == 0x0000
assert hass.states.get(entity_id).state == STATE_CLOSED
# open from UI command fails
assert ATTR_CURRENT_POSITION not in hass.states.get(entity_id).attributes
await send_attributes_report(hass, cluster_level, {0: 0})
with patch("zigpy.zcl.Cluster.request", side_effect=asyncio.TimeoutError):
await hass.services.async_call(
DOMAIN, SERVICE_OPEN_COVER, {"entity_id": entity_id}, blocking=True
)
assert cluster_on_off.request.call_count == 1
assert cluster_on_off.request.call_args[0][0] is False
assert cluster_on_off.request.call_args[0][1] == 0x0001
assert hass.states.get(entity_id).state == STATE_CLOSED
# open from UI succeeds
with patch(
"zigpy.zcl.Cluster.request", AsyncMock(return_value=[0x0, zcl_f.Status.SUCCESS])
):
await hass.services.async_call(
DOMAIN, SERVICE_OPEN_COVER, {"entity_id": entity_id}, blocking=True
)
assert cluster_on_off.request.call_count == 1
assert cluster_on_off.request.call_args[0][0] is False
assert cluster_on_off.request.call_args[0][1] == 0x0001
assert hass.states.get(entity_id).state == STATE_OPEN
# set position UI command fails
with patch("zigpy.zcl.Cluster.request", side_effect=asyncio.TimeoutError):
await hass.services.async_call(
DOMAIN,
SERVICE_SET_COVER_POSITION,
{"entity_id": entity_id, "position": 47},
blocking=True,
)
assert cluster_level.request.call_count == 1
assert cluster_level.request.call_args[0][0] is False
assert cluster_level.request.call_args[0][1] == 0x0004
assert int(cluster_level.request.call_args[0][3] * 100 / 255) == 47
assert hass.states.get(entity_id).attributes[ATTR_CURRENT_POSITION] == 0
# set position UI success
with patch(
"zigpy.zcl.Cluster.request", AsyncMock(return_value=[0x5, zcl_f.Status.SUCCESS])
):
await hass.services.async_call(
DOMAIN,
SERVICE_SET_COVER_POSITION,
{"entity_id": entity_id, "position": 47},
blocking=True,
)
assert cluster_level.request.call_count == 1
assert cluster_level.request.call_args[0][0] is False
assert cluster_level.request.call_args[0][1] == 0x0004
assert int(cluster_level.request.call_args[0][3] * 100 / 255) == 47
assert hass.states.get(entity_id).attributes[ATTR_CURRENT_POSITION] == 47
# report position change
await send_attributes_report(hass, cluster_level, {8: 0, 0: 100, 1: 1})
assert hass.states.get(entity_id).attributes[ATTR_CURRENT_POSITION] == int(
100 * 100 / 255
)
# test rejoin
await async_test_rejoin(
hass, zigpy_shade_device, [cluster_level, cluster_on_off], (1,)
)
assert hass.states.get(entity_id).state == STATE_OPEN
# test cover stop
with patch("zigpy.zcl.Cluster.request", side_effect=asyncio.TimeoutError):
await hass.services.async_call(
DOMAIN,
SERVICE_STOP_COVER,
{"entity_id": entity_id},
blocking=True,
)
assert cluster_level.request.call_count == 1
assert cluster_level.request.call_args[0][0] is False
assert cluster_level.request.call_args[0][1] in (0x0003, 0x0007)
async def test_restore_state(hass, zha_device_restored, zigpy_shade_device):
"""Ensure states are restored on startup."""
mock_restore_cache(
hass,
(
State(
"cover.fakemanufacturer_fakemodel_e769900a_level_on_off_shade",
STATE_OPEN,
{ATTR_CURRENT_POSITION: 50},
),
),
)
hass.state = CoreState.starting
zha_device = await zha_device_restored(zigpy_shade_device)
entity_id = await find_entity_id(DOMAIN, zha_device, hass)
assert entity_id is not None
# test that the cover was created and that it is unavailable
assert hass.states.get(entity_id).state == STATE_OPEN
assert hass.states.get(entity_id).attributes[ATTR_CURRENT_POSITION] == 50
async def test_keen_vent(hass, zha_device_joined_restored, zigpy_keen_vent):
"""Test keen vent."""
# load up cover domain
zha_device = await zha_device_joined_restored(zigpy_keen_vent)
cluster_on_off = zigpy_keen_vent.endpoints.get(1).on_off
cluster_level = zigpy_keen_vent.endpoints.get(1).level
entity_id = await find_entity_id(DOMAIN, zha_device, hass)
assert entity_id is not None
await async_enable_traffic(hass, [zha_device], enabled=False)
# test that the cover was created and that it is unavailable
assert hass.states.get(entity_id).state == STATE_UNAVAILABLE
# allow traffic to flow through the gateway and device
await async_enable_traffic(hass, [zha_device])
await hass.async_block_till_done()
# test that the state has changed from unavailable to off
await send_attributes_report(hass, cluster_on_off, {8: 0, 0: False, 1: 1})
assert hass.states.get(entity_id).state == STATE_CLOSED
# open from UI command fails
p1 = patch.object(cluster_on_off, "request", side_effect=asyncio.TimeoutError)
p2 = patch.object(cluster_level, "request", AsyncMock(return_value=[4, 0]))
with p1, p2:
await hass.services.async_call(
DOMAIN, SERVICE_OPEN_COVER, {"entity_id": entity_id}, blocking=True
)
assert cluster_on_off.request.call_count == 1
assert cluster_on_off.request.call_args[0][0] is False
assert cluster_on_off.request.call_args[0][1] == 0x0001
assert cluster_level.request.call_count == 1
assert hass.states.get(entity_id).state == STATE_CLOSED
# open from UI command success
p1 = patch.object(cluster_on_off, "request", AsyncMock(return_value=[1, 0]))
p2 = patch.object(cluster_level, "request", AsyncMock(return_value=[4, 0]))
with p1, p2:
await hass.services.async_call(
DOMAIN, SERVICE_OPEN_COVER, {"entity_id": entity_id}, blocking=True
)
await asyncio.sleep(0)
assert cluster_on_off.request.call_count == 1
assert cluster_on_off.request.call_args[0][0] is False
assert cluster_on_off.request.call_args[0][1] == 0x0001
assert cluster_level.request.call_count == 1
assert hass.states.get(entity_id).state == STATE_OPEN
assert hass.states.get(entity_id).attributes[ATTR_CURRENT_POSITION] == 100
async def test_cover_remote(hass, zha_device_joined_restored, zigpy_cover_remote):
"""Test zha cover remote."""
# load up cover domain
await zha_device_joined_restored(zigpy_cover_remote)
cluster = zigpy_cover_remote.endpoints[1].out_clusters[
closures.WindowCovering.cluster_id
]
zha_events = async_capture_events(hass, "zha_event")
# up command
hdr = make_zcl_header(0, global_command=False)
cluster.handle_message(hdr, [])
await hass.async_block_till_done()
assert len(zha_events) == 1
assert zha_events[0].data[ATTR_COMMAND] == "up_open"
# down command
hdr = make_zcl_header(1, global_command=False)
cluster.handle_message(hdr, [])
await hass.async_block_till_done()
assert len(zha_events) == 2
assert zha_events[1].data[ATTR_COMMAND] == "down_close"
| 35.597222
| 92
| 0.675575
|
3857956735279527f5c07243347fe853ed794a93
| 592
|
py
|
Python
|
plot_mesh.py
|
JoeyValentine/gen-centerlines
|
fff03ee762eb1a852185ceb507e3850e1a47778c
|
[
"MIT"
] | 1
|
2021-04-27T07:26:44.000Z
|
2021-04-27T07:26:44.000Z
|
plot_mesh.py
|
JoeyValentine/gen-centerlines
|
fff03ee762eb1a852185ceb507e3850e1a47778c
|
[
"MIT"
] | null | null | null |
plot_mesh.py
|
JoeyValentine/gen-centerlines
|
fff03ee762eb1a852185ceb507e3850e1a47778c
|
[
"MIT"
] | null | null | null |
import pyvista as pv
def confirm():
if len(plotter.picked_path.points) == 2:
plotter.close()
if __name__ == '__main__':
vti_file_name = 'level_sets.vti'
data = pv.read(vti_file_name)
vol = data.threshold_percent(30, invert=1)
surf = vol.extract_geometry()
smooth_surf = surf.smooth(n_iter=1000)
plotter = pv.Plotter()
plotter.add_mesh(smooth_surf, style='wireframe', color='black')
plotter.add_key_event('a', confirm)
plotter.enable_path_picking(color='red')
plotter.show()
points = plotter.picked_path.points
print(points)
| 22.769231
| 67
| 0.680743
|
c781139ec27f68515871d3ac0d70dc8358389d47
| 1,254
|
py
|
Python
|
examples/flatten/test_flatten.py
|
mdawar/pretf
|
f11f8d646965e27dd2ade182fda3e8ce55d13804
|
[
"MIT"
] | null | null | null |
examples/flatten/test_flatten.py
|
mdawar/pretf
|
f11f8d646965e27dd2ade182fda3e8ce55d13804
|
[
"MIT"
] | null | null | null |
examples/flatten/test_flatten.py
|
mdawar/pretf
|
f11f8d646965e27dd2ade182fda3e8ce55d13804
|
[
"MIT"
] | null | null | null |
from unittest.mock import ANY
import pytest
from pretf import test
class TestFlatten(test.SimpleTest):
@pytest.mark.parametrize(
"stack,env", [
("iam", "dev"),
("iam", "prod"),
("vpc", "dev"),
("vpc", "prod"),
("vpc-peering", "prod"),
],
)
def test_init(self, stack, env):
self.pretf(f"stacks/{stack}/{env}").init()
@pytest.mark.parametrize(
"stack,env,expected", [
("iam", "dev", {"user_name": "pretf-flatten-dev"}),
("iam", "prod", {"user_name": "pretf-flatten-prod"}),
("vpc", "dev", {"vpc_id": ANY}),
("vpc", "prod", {"vpc_id": ANY}),
("vpc-peering", "prod", {"status": "active"}),
],
)
def test_apply(self, stack, env, expected):
outputs = self.pretf(f"stacks/{stack}/{env}").apply()
assert outputs == expected
@test.always
@pytest.mark.parametrize(
"stack,env", [
("vpc-peering", "prod"),
("vpc", "prod"),
("vpc", "dev"),
("iam", "prod"),
("iam", "dev"),
],
)
def test_destroy(self, stack, env):
self.pretf(f"stacks/{stack}/{env}").destroy()
| 27.26087
| 65
| 0.470494
|
6002911e9b792b6f6d1666dc593d83a7300ce995
| 799
|
py
|
Python
|
isiscb/openurl/migrations/0002_auto_20160323_1323.py
|
bgopalachary/IsisCB
|
c28e3f504eea60ebeff38318d8bb2071abb28ebb
|
[
"MIT"
] | 4
|
2016-01-25T20:35:33.000Z
|
2020-04-07T15:39:52.000Z
|
isiscb/openurl/migrations/0002_auto_20160323_1323.py
|
bgopalachary/IsisCB
|
c28e3f504eea60ebeff38318d8bb2071abb28ebb
|
[
"MIT"
] | 41
|
2015-08-19T17:34:41.000Z
|
2022-03-11T23:19:01.000Z
|
isiscb/openurl/migrations/0002_auto_20160323_1323.py
|
bgopalachary/IsisCB
|
c28e3f504eea60ebeff38318d8bb2071abb28ebb
|
[
"MIT"
] | 2
|
2020-11-25T20:18:18.000Z
|
2021-06-24T15:15:41.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('openurl', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='resolver',
name='link_icon',
field=models.URLField(help_text=b'Location of an image that will be rendered as a link next to search results.', max_length=1000, null=True, blank=True),
),
migrations.AddField(
model_name='resolver',
name='link_text',
field=models.CharField(help_text=b'Text that will be rendered as a link next to search results if ``link_icon`` is not available.', max_length=1000, null=True, blank=True),
),
]
| 31.96
| 184
| 0.634543
|
d26475bf2844c3a80e87566dd696ba220512d9a5
| 13,498
|
py
|
Python
|
sym_api_client_python/clients/admin_client.py
|
3tilley/symphony-api-client-python
|
8a743c27bcb2bba3fb22493c5494a19c78961ff4
|
[
"MIT"
] | null | null | null |
sym_api_client_python/clients/admin_client.py
|
3tilley/symphony-api-client-python
|
8a743c27bcb2bba3fb22493c5494a19c78961ff4
|
[
"MIT"
] | null | null | null |
sym_api_client_python/clients/admin_client.py
|
3tilley/symphony-api-client-python
|
8a743c27bcb2bba3fb22493c5494a19c78961ff4
|
[
"MIT"
] | null | null | null |
import requests
import json
import logging
import base64
from .api_client import APIClient
from ..exceptions.UnauthorizedException import UnauthorizedException
from requests_toolbelt.multipart.encoder import MultipartEncoder
# child class of APIClient --> Extends error handling functionality
# AdminClient class contains a series of functions corresponding to all
# pod admin endpoints on the REST API.
class AdminClient(APIClient):
def __init__(self, bot_client):
self.bot_client = bot_client
def admin_get_user(self, user_id):
"""
Returns details for a particular user.
Calling this endpoint requires the
ACCESS_USER_PROVISIONING_API and ACCESS_ADMIN_API privileges.
"""
logging.debug('AdminClient/admin_get_user()')
url = '/pod/v2/admin/user/{0}'.format(user_id)
return self.bot_client.execute_rest_call("GET", url)
def admin_list_users(self, skip=0, limit=50):
"""
Returns a list of users ID, including user metadata
Calling this endpoint requires the ACCESS_USER_PROVISIONING_API privilege.
"""
logging.debug('AdminClient/admin_list_users()')
url = '/pod/v2/admin/user/list'
params = {'skip': skip, 'limit': limit}
headers = {
'cache-control': 'no-cache'
}
return self.bot_client.execute_rest_call("GET", url, params=params, headers=headers)
def admin_create_user(self, user_attributes):
"""
Creates a new user, either End-User or Service User.
--End-User Accounts are assigned to employees.
To create an end user account, the accountType field must be NORMAL.
-- Service User Accounts are a type of account used for bots or applications,
rather than end-users. To create a service user account,
the accountType field must be SYSTEM.
Calling this endpoint requires the
ACCESS_USER_PROVISIONING_API and ACCESS_ADMIN_API privileges.
TODO: Add `Password` object generation
"""
logging.debug('AdminClient/admin_list_users()')
url = '/pod/v2/admin/user/create'
return self.bot_client.execute_rest_call("POST", url, json=user_attributes)
def admin_update_user(self, user_id, updated_user_attributes):
"""
Updates an existing user.
BODY PARAMS
emailAddress
The user's email address.
firstName
The user's first name.
lastName
The user's last name.
userName
The user's name. The username must be unique and must not match any existing usernames or app IDs.
displayName
The user's display name.
companyName
The name of the user's company.
department
The user's department.
division
The user's division.
title
The user's title.
twoFactorAuthPhone
The user's two factor authentication mobile phone number.
workPhoneNumber
The user's work phone number.
mobilePhoneNumber
The user's mobile number.
smsNumber
The user's SMS number.
accountType
The user's account type. Possible values: NORMAL, SYSTEM.
location
The user's location.
jobFunction
The user's job function. Possible values: Project Manager, Trader, Sales, Strategist, Business Development Executive, Corporate Access, Analyst, Other, Research Analyst, Developer, Economist, Portfolio Manager, Director.
assetClasses
The user's asset classes. Possible values: Fixed Income, Currencies, Commodities, Equities.
industries
The user's industries. Possible values: Conglomerates, Healthcare, Transportation, Services, Energy & Utilities, Real Estate, Consumer Cyclicals, Financials, Basic Materials, Consumer Non-Cyclicals, Technology.
currentKey
A UserKeyRequest Object object containing the current RSA key information to use for authenticating the user. When updating the user, this can be set (rotated) to a new key, in which case the existing key is stored in the previousKey and can be used for authentication during the current session. For more information see RSA Bot Authentication Workflow.
previousKey
A UserKeyRequest Object object containing the RSA key information from the previous key in a key rotation scenario. The previous key can be used when the currentKey has been updated during the current session. The previousKey is valid for 72 hours. For more information see RSA Bot Authentication Workflow.
Calling this endpoint requires the ACCESS_USER_PROVISIONING_API and ACCESS_ADMIN_API privileges.
"""
logging.debug('AdminClient/admin_update_user()')
url = '/pod/v2/admin/user/{0}/update'.format(user_id)
return self.bot_client.execute_rest_call("POST", url, json=updated_user_attributes)
def admin_get_user_avatar(self, user_id):
"""
Returns the URL of the avatar of a particular user.
Calling this endpoint requires the ACCESS_USER_PROVISIONING_API privilege.
"""
logging.debug('AdminClient/admin_get_user_avatar')
url = '/pod/v1/admin/user/{0}/avatar'.format(user_id)
return self.bot_client.execute_rest_call("GET", url)
def admin_update_avatar(self, user_id, image_encoded_string):
"""
Updates the avatar of a particular user. file_path to base64 encoded image
TODO: base64 encode the image in the file_path
Calling this endpoint requires the User Provisioning role with ACCESS_USER_PROVISIONING_API privilege.
"""
logging.debug('AdminClient/admin_update_avatar')
url = '/pod/v1/admin/user/{0}/avatar/update'.format(user_id)
#base64encode the image file
data = {'image': image_encoded_string}
return self.bot_client.execute_rest_call("POST", url, json=data)
def admin_get_user_status(self, user_id):
"""
Get the status, active or inactive, for a particular user.
Calling this endpoint requires the User Provisioning role with ACCESS_USER_PROVISIONING_API privilege.
"""
logging.debug('AdminClient/admin_get_user_status()')
url = '/pod/v1/admin/user/{0}/status'.format(user_id)
return self.bot_client.execute_rest_call("GET", url)
def admin_update_user_status(self, user_id, status):
"""
Update the status of a particular user.
`status` can be 'ENABLED' or 'DISABLED'
Calling this endpoint requires the User Provisioning role with ACCESS_USER_PROVISIONING_API privilege.
"""
logging.debug('AdminClient/admin_update_user_status()')
url = '/pod/v1/admin/user/{0}/status/update'.format(user_id)
data = {'status': status}
return self.bot_client.execute_rest_call("POST", url, json=data)
def admin_list_pod_features(self):
"""
Returns the full set of Symphony features available for this pod.
Features entitlements definition:
postReadEnabled: Allows the user to read wall posts.
postWriteEnabled: Allows the user to write wall posts.
delegatesEnabled: Allows the user to have delegates.
isExternalIMEnabled: Allows the user to chat in external IM/MIMs.
canShareFilesExternally: Allows the user to share files externally.
canCreatePublicRoom: Allows the user to create internal public rooms.
canUpdateAvatar: Allows the user to edit profile picture.
isExternalRoomEnabled: Allows the user to chat in private external rooms.
canCreatePushedSignals: Allows the user to create push signals.
canUseCompactMode: Enables Lite Mode.
mustBeRecorded: Must be recorded in meetings.
sendFilesEnabled: Allows the user to send files internally.
canUseInternalAudio: Allows the user to use audio in internal Meetings.
canUseInternalVideo: Allows the user to use video in internal Meetings.
canProjectInternalScreenShare: Allows the user to share screens in internal Meetings.
canViewInternalScreenShare: Allows the user to view shared screens in internal Meetings.
canCreateMultiLateralRoom: Allows the user to create multi-lateral room.
canJoinMultiLateralRoom: Allows the user to join multi-lateral room.
canUseFirehose: Allows the user to use Firehose.
canUseInternalAudioMobile: Allows the user to use audio in internal meetings on mobile.
canUseInternalVideoMobile: Allows the user to use video in internal meetings on mobile.
canProjectInternalScreenShareMobile: Allows the user to share screens in internal meetings on mobile.
canViewInternalScreenShareMobile: Allows the user to view shared screens in internal meetings on mobile.
canManageSignalSubscription: Allows the user to manage signal subscriptions.
Calling this endpoint requires the User Provisioning role with ACCESS_USER_PROVISIONING_API privilege.
"""
logging.debug('AdminClient/admin_list_pod_features()')
url = '/pod/v1/admin/system/features/list'
return self.bot_client.execute_rest_call("GET", url)
def admin_get_user_features(self, user_id):
"""
Returns the list of Symphony feature entitlements for a particular user.
Use the data returned from this endpoint with Find Users to filter users by a specific entitlement.
Calling this endpoint requires the User Provisioning role with ACCESS_USER_PROVISIONING_API privilege.
"""
logging.debug('AdminClient/admin_get_user_features()')
url = '/pod/v1/admin/user/{0}/features'.format(user_id)
return self.bot_client.execute_rest_call("GET", url)
def admin_update_user_features(self, user_id, feature_list):
"""
Updates the feature entitlements for a particular user.
featureList -the features to update. Specified by entitlement name and enabled (true or false).
Calling this endpoint requires the User Provisioning role with ACCESS_USER_PROVISIONING_API privilege.
"""
logging.debug('AdminClient/admin_update_user_features()')
url = '/pod/v1/admin/user/{0}/features/update'.format(user_id)
return self.bot_client.execute_rest_call("POST", url, json=feature_list)
def admin_find_users(self, filters, skip=0, limit=50):
"""
Finds a list of users based on a specified role or feature entitlement.
'filters' is a dictionary that can contain:
'role' -- int64: User role
'feature' -- string: Feature entitlement value
'status' -- string: 'ENABLED' or 'DISABLED'
Calling this endpoint requires the User Provisioning role
with ACCESS_USER_PROVISIONING_API privilege.
"""
logging.debug('AdminClient/admin_find_users()')
url = '/pod/v1/admin/user/find'
params = {'skip': skip, 'limit': limit}
return self.bot_client.execute_rest_call("POST", url, params=params, json=filters)
def admin_list_roles(self):
"""
Returns a list of all roles available in the company (pod).
Calling this endpoint requires the ACCESS_ADMIN_API privilege.
"""
logging.debug('AdminClient/admin_list_roles()')
url = '/pod/v1/admin/system/roles/list'
return self.bot_client.execute_rest_call("GET", url)
def admin_add_role(self, user_id, payload={}):
"""
Add a role or optional entitleable action to a user's account. For example: {"id":"COMPLIANCE_OFFICER.MONITOR_ROOMS"}
Calling this endpoint requires the User Provisioning role with ACCESS_USER_PROVISIONING_API privilege.
"""
logging.debug('AdminClient/admin_add_role()')
url = '/pod/v1/admin/user/{0}/roles/add'.format(user_id)
headers = {
'cache-control': 'no-cache'
}
return self.bot_client.execute_rest_call("POST", url, json=payload, headers=headers)
def admin_remove_role(self, user_id, payload={}):
"""
Remove a role or optional entitleable action to a user's account. For example: {"id":"L2_SUPPORT"}
Calling this endpoint requires the User Provisioning role with ACCESS_USER_PROVISIONING_API privilege.
"""
logging.debug('AdminClient/admin_remove_role()')
url = '/pod/v1/admin/user/{0}/roles/remove'.format(user_id)
headers = {
'cache-control': 'no-cache'
}
return self.bot_client.execute_rest_call("POST", url, json=payload, headers=headers)
def import_message(self, imported_message):
logging.debug('MessageClient/import_message()')
url = '/agent/v4/message/import'
return self.bot_client.execute_rest_call("POST", url, json=imported_message)
# go on admin clients
def suppress_message(self, message_id):
logging.debug('MessageClient/suppress_message()')
url = '/pod/v1/admin/messagesuppression/{0}/suppress'.format(message_id)
return self.bot_client.execute_rest_call("POST", url)
| 43.401929
| 366
| 0.678989
|
f57aea54785b604ce74282fb60cf6943d6fb0028
| 4,894
|
py
|
Python
|
doc/conf.py
|
ClericPy/trequests
|
51dbf398da9383850bd0688fa3fb4edbe971597c
|
[
"MIT"
] | 27
|
2015-11-15T10:59:27.000Z
|
2021-12-30T14:12:06.000Z
|
doc/conf.py
|
ClericPy/trequests
|
51dbf398da9383850bd0688fa3fb4edbe971597c
|
[
"MIT"
] | 29
|
2015-11-15T12:47:22.000Z
|
2020-03-23T09:28:56.000Z
|
doc/conf.py
|
ClericPy/trequests
|
51dbf398da9383850bd0688fa3fb4edbe971597c
|
[
"MIT"
] | 5
|
2016-02-23T07:38:32.000Z
|
2021-02-25T14:58:00.000Z
|
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/stable/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
import torequests
# -- Project information -----------------------------------------------------
project = 'torequests'
copyright = '2018, Clericpy'
author = 'Clericpy'
# The short X.Y version
version = torequests.__version__
# The full version, including alpha/beta/rc tags
release = version
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'nature'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'torequestsdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'torequests.tex', 'torequests Documentation', 'Clericpy',
'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, 'torequests', 'torequests Documentation', [author],
1)]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'torequests', 'torequests Documentation', author, 'torequests',
'One line description of project.', 'Miscellaneous'),
]
# -- Extension configuration -------------------------------------------------
| 32.410596
| 80
| 0.653453
|
a63b9047e5d8c48354a889880618a61b2487442e
| 1,609
|
py
|
Python
|
aliyun-python-sdk-drds/aliyunsdkdrds/request/v20171016/DeleteDrdsDBRequest.py
|
liuzheng/aliyun-openapi-python-sdk
|
1ba6743f3d6f2cef57ec9e3be1754b04293c3150
|
[
"Apache-2.0"
] | 1
|
2021-03-08T02:59:17.000Z
|
2021-03-08T02:59:17.000Z
|
aliyun-python-sdk-drds/aliyunsdkdrds/request/v20171016/DeleteDrdsDBRequest.py
|
bricklayer-Liu/aliyun-openapi-python-sdk
|
20da2554de22679fc7c5462c483663e4d79512aa
|
[
"Apache-2.0"
] | null | null | null |
aliyun-python-sdk-drds/aliyunsdkdrds/request/v20171016/DeleteDrdsDBRequest.py
|
bricklayer-Liu/aliyun-openapi-python-sdk
|
20da2554de22679fc7c5462c483663e4d79512aa
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkdrds.endpoint import endpoint_data
class DeleteDrdsDBRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Drds', '2017-10-16', 'DeleteDrdsDB','Drds')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_DrdsInstanceId(self):
return self.get_query_params().get('DrdsInstanceId')
def set_DrdsInstanceId(self,DrdsInstanceId):
self.add_query_param('DrdsInstanceId',DrdsInstanceId)
def get_DbName(self):
return self.get_query_params().get('DbName')
def set_DbName(self,DbName):
self.add_query_param('DbName',DbName)
| 36.568182
| 74
| 0.766314
|
5b156aed61569eaff414d29fd7ad6eb71d793dba
| 42
|
py
|
Python
|
trolly/__init__.py
|
lhh/trolly
|
178bfe24871889b35202f2b7eeebef001c9b9c73
|
[
"BSD-2-Clause"
] | 1
|
2021-05-04T20:55:45.000Z
|
2021-05-04T20:55:45.000Z
|
trolly/__init__.py
|
lhh/trolly
|
178bfe24871889b35202f2b7eeebef001c9b9c73
|
[
"BSD-2-Clause"
] | null | null | null |
trolly/__init__.py
|
lhh/trolly
|
178bfe24871889b35202f2b7eeebef001c9b9c73
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/python3
__version__ = '0.1.4'
| 10.5
| 21
| 0.642857
|
f254946aa94bd2f8972b9fa880c22c161aad48f5
| 4,035
|
py
|
Python
|
main.py
|
srlee-ai/KoBERT-nsmc
|
9e0b03c5a20d444eea31544724e898aec988cb0f
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
srlee-ai/KoBERT-nsmc
|
9e0b03c5a20d444eea31544724e898aec988cb0f
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
srlee-ai/KoBERT-nsmc
|
9e0b03c5a20d444eea31544724e898aec988cb0f
|
[
"Apache-2.0"
] | null | null | null |
import argparse
import json
from trainer import Trainer
from predict import Predict
from utils import init_logger, load_tokenizer, MODEL_CLASSES, MODEL_PATH_MAP
from data_loader import load_and_cache_examples
def main(args):
init_logger()
tokenizer = load_tokenizer(args)
train_dataset = None if args.do_predict else load_and_cache_examples(args, tokenizer, mode="train")
dev_dataset = None
test_dataset = None if args.do_predict else load_and_cache_examples(args, tokenizer, mode="test")
if args.do_train:
trainer = Trainer(args, train_dataset, dev_dataset, test_dataset)
trainer.train()
if args.do_eval:
trainer = Trainer(args, train_dataset, dev_dataset, test_dataset)
trainer.load_model()
trainer.evaluate("test")
if args.do_predict:
predict = Predict(args, tokenizer)
predict.load_model()
sentences = [args.sentence]
result_json = dict()
result_json['result'] = int(predict.predict(sentences))
print(json.dumps(result_json, ensure_ascii=False))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--task", default="nsmc", type=str, help="The name of the task to train")
parser.add_argument("--model_dir", default="./model", type=str, help="Path to save, load model")
parser.add_argument("--data_dir", default="./data", type=str, help="The input data dir")
parser.add_argument("--train_file", default="ratings_train.txt", type=str, help="Train file")
parser.add_argument("--test_file", default="ratings_test.txt", type=str, help="Test file")
parser.add_argument("--sentence", default="연기는 별로지만 재미 하나는 끝내줌!", type=str, help="predict sentence")
parser.add_argument("--model_type", default="kobert", type=str, help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()))
parser.add_argument('--seed', type=int, default=42, help="random seed for initialization")
parser.add_argument("--batch_size", default=32, type=int, help="Batch size for training and evaluation.")
parser.add_argument("--max_seq_len", default=50, type=int, help="The maximum total input sequence length after tokenization.")
parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.")
parser.add_argument("--num_train_epochs", default=5.0, type=float, help="Total number of training epochs to perform.")
parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.")
parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
parser.add_argument("--max_steps", default=-1, type=int, help="If > 0: set total number of training steps to perform. Override num_train_epochs.")
parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.")
parser.add_argument("--dropout_rate", default=0.1, type=float, help="Dropout for fully-connected layers")
parser.add_argument('--logging_steps', type=int, default=2000, help="Log every X updates steps.")
parser.add_argument('--save_steps', type=int, default=2000, help="Save checkpoint every X updates steps.")
parser.add_argument("--do_train", action="store_true", help="Whether to run training.")
parser.add_argument("--do_eval", action="store_true", help="Whether to run eval on the test set.")
parser.add_argument("--do_predict", action="store_true", help="Whether to run predict senctence.")
parser.add_argument("--no_cuda", action="store_true", help="Avoid using CUDA when available")
args = parser.parse_args()
args.model_name_or_path = MODEL_PATH_MAP[args.model_type]
main(args)
| 54.527027
| 150
| 0.717968
|
fbbb5a05155b2e4a9a5ee671d221b14a49af0d67
| 805
|
py
|
Python
|
documentation/scripts/supersampling_figure.py
|
diehlpk/muDIC
|
b5d90aa62267b4bd0b88ae0a989cf09a51990654
|
[
"MIT"
] | 70
|
2019-04-15T08:08:23.000Z
|
2022-03-23T08:24:25.000Z
|
documentation/scripts/supersampling_figure.py
|
diehlpk/muDIC
|
b5d90aa62267b4bd0b88ae0a989cf09a51990654
|
[
"MIT"
] | 34
|
2019-05-03T18:09:43.000Z
|
2022-02-10T11:36:29.000Z
|
documentation/scripts/supersampling_figure.py
|
diehlpk/muDIC
|
b5d90aa62267b4bd0b88ae0a989cf09a51990654
|
[
"MIT"
] | 37
|
2019-04-25T15:39:23.000Z
|
2022-03-28T21:40:24.000Z
|
import numpy as np
import matplotlib.pyplot as plt
from muDIC.phantoms.downsampler import coord_subpos,chess_board
N = 4
# make an empty data set
image = chess_board(1)[:4,:4]
center_coordinate = np.ones((1,1))*1.5
sub_coordinates_i = coord_subpos(center_coordinate,1.0,4,np.arange(4)[:,np.newaxis]*np.ones((4,4)),0)*4.
sub_coordinates_j = coord_subpos(center_coordinate,1.0,4,np.arange(4)[np.newaxis,:]*np.ones((4,4)),0)*4.
plt.plot(sub_coordinates_i.flatten(),sub_coordinates_j.flatten(),'o')
# make a figure + axes
for x in range(N + 1):
plt.axhline(x-0.5, lw=2, color='k', zorder=5)
plt.axvline(x-0.5, lw=2, color='k', zorder=5)
# draw the boxes
plt.imshow(image,alpha=0.7)#, interpolation='none', extent=[0, N, 0, N], zorder=0)
# turn off the axis labels
plt.axis('off')
plt.show()
| 30.961538
| 105
| 0.701863
|
52eacd2c45309a3d2fd5efe0160094d17c9931c1
| 1,114
|
py
|
Python
|
plos_bio_scripts/postprocess_all_midas_data_serial.py
|
zhiru-liu/microbiome_evolution
|
5a08fbf41357d845236e3ff46c31315929d2b649
|
[
"BSD-2-Clause"
] | 2
|
2020-08-09T06:19:11.000Z
|
2021-08-18T17:12:23.000Z
|
plos_bio_scripts/postprocess_all_midas_data_serial.py
|
zhiru-liu/microbiome_evolution
|
5a08fbf41357d845236e3ff46c31315929d2b649
|
[
"BSD-2-Clause"
] | null | null | null |
plos_bio_scripts/postprocess_all_midas_data_serial.py
|
zhiru-liu/microbiome_evolution
|
5a08fbf41357d845236e3ff46c31315929d2b649
|
[
"BSD-2-Clause"
] | 8
|
2019-02-20T22:21:55.000Z
|
2021-02-13T00:55:40.000Z
|
#!/usr/bin/env python
### This script runs the necessary post-processing of the MIDAS output
### across all species in serial.
import os
import sys
import config
if len(sys.argv) > 1:
argument=sys.argv[1]
else:
argument = 'all'
# First calculate core genes for each species
#os.system('python core_gene_utils.py')
# Call postprocess_midas_data.py for each species
os.system('python loop_over_species_wrapper.py %s python postprocess_midas_data.py' % argument)
# Calculate substitution rates for the most prevalent species
#os.system('python calculate_substitution_rates.py')
# Calculate temporal changes for the most prevalent species
#os.system('python calculate_private_snvs.py')
# Calculate temporal changes for the most prevalent species
#os.system('python calculate_temporal_changes.py')
# Calculate linkage disequilibria for the most prevalent species
#os.system('python calculate_singletons.py')
# This one has to go here because we have to estimate clades first...
# Calculate linkage disequilibria for the most prevalent species
#os.system('python calculate_linkage_disequilibria.py')
| 31.828571
| 95
| 0.789048
|
e246f0a79a66246066b8827acd4c3546c7fe1ada
| 1,783
|
py
|
Python
|
pontoon/translate/tests/test_views.py
|
anurzhanova/pontoon
|
b29f238e343ba81b6624d36d69fbb47c60bf4b8c
|
[
"BSD-3-Clause"
] | null | null | null |
pontoon/translate/tests/test_views.py
|
anurzhanova/pontoon
|
b29f238e343ba81b6624d36d69fbb47c60bf4b8c
|
[
"BSD-3-Clause"
] | 4
|
2018-05-25T13:38:07.000Z
|
2021-12-13T20:47:16.000Z
|
pontoon/translate/tests/test_views.py
|
anurzhanova/pontoon
|
b29f238e343ba81b6624d36d69fbb47c60bf4b8c
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import absolute_import
import pytest
from django.urls import reverse
from pontoon.translate.views import get_preferred_locale
@pytest.fixture
def user_arabic(user_a):
user_a.profile.custom_homepage = 'ar'
user_a.save()
return user_a
@pytest.mark.django_db
def test_translate_template(client, project_locale_a, resource_a):
url = reverse(
'pontoon.translate',
kwargs={
'locale': project_locale_a.locale.code,
'project': project_locale_a.project.slug,
'resource': 'all-resources',
},
)
response = client.get(url)
assert b'Pontoon' in response.content
@pytest.mark.django_db
def test_translate_validate_parameters(client, project_locale_a, resource_a):
url_invalid = reverse(
'pontoon.translate',
kwargs={
'locale': 'locale',
'project': 'project',
'resource': 'resource',
}
)
url_valid = reverse(
'pontoon.translate',
kwargs={
'locale': project_locale_a.locale.code,
'project': project_locale_a.project.slug,
'resource': 'resource',
}
)
response = client.get(url_invalid)
assert response.status_code == 404
response = client.get(url_valid)
assert response.status_code == 200
@pytest.mark.django_db
def test_get_preferred_locale_from_user_prefs(rf, user_arabic):
# This user has 'ar' set as their favorite locale.
rf.user = user_arabic
locale = get_preferred_locale(rf)
assert locale == 'ar'
@pytest.mark.django_db
def test_get_preferred_locale_default(rf, user_a):
# This user has no preferred locale set.
rf.user = user_a
locale = get_preferred_locale(rf)
assert locale is None
| 23.773333
| 77
| 0.661806
|
d24520457aa51c2cf64b2b3eeb75d64b02d0f80e
| 142
|
py
|
Python
|
tests/test_objects.py
|
holachau23/backend
|
303d2044e117b7ed5a6932e9257212bc6a9f5e86
|
[
"MIT"
] | 1
|
2021-12-03T16:10:27.000Z
|
2021-12-03T16:10:27.000Z
|
tests/test_objects.py
|
holachau23/backend
|
303d2044e117b7ed5a6932e9257212bc6a9f5e86
|
[
"MIT"
] | null | null | null |
tests/test_objects.py
|
holachau23/backend
|
303d2044e117b7ed5a6932e9257212bc6a9f5e86
|
[
"MIT"
] | 5
|
2021-11-15T23:30:05.000Z
|
2021-11-30T13:10:59.000Z
|
import unittest
@unittest.skip("showing class skipping")
class MySkippedTestCase(unittest.TestCase):
def test_not_run(self):
pass
| 23.666667
| 43
| 0.753521
|
903aa81576e3e4756439b02bf3dc3fac9580fec3
| 29,096
|
py
|
Python
|
api/ccxt-master/python/ccxt/async/kraken.py
|
EdgarSargsyan/post
|
da26b98f4e68df5510fa0603645b1c1c6633f058
|
[
"MIT"
] | 1
|
2021-10-14T09:59:54.000Z
|
2021-10-14T09:59:54.000Z
|
api/ccxt-master/python/ccxt/async/kraken.py
|
EdgarSargsyan/post
|
da26b98f4e68df5510fa0603645b1c1c6633f058
|
[
"MIT"
] | null | null | null |
api/ccxt-master/python/ccxt/async/kraken.py
|
EdgarSargsyan/post
|
da26b98f4e68df5510fa0603645b1c1c6633f058
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from ccxt.async.base.exchange import Exchange
import base64
import hashlib
import math
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import InvalidNonce
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import CancelPending
from ccxt.base.errors import DDoSProtection
from ccxt.base.errors import ExchangeNotAvailable
class kraken (Exchange):
def describe(self):
return self.deep_extend(super(kraken, self).describe(), {
'id': 'kraken',
'name': 'Kraken',
'countries': 'US',
'version': '0',
'rateLimit': 3000,
'hasCORS': False,
# obsolete metainfo interface
'hasFetchTickers': True,
'hasFetchOHLCV': True,
'hasFetchOrder': True,
'hasFetchOpenOrders': True,
'hasFetchClosedOrders': True,
'hasFetchMyTrades': True,
'hasWithdraw': True,
'hasFetchCurrencies': True,
# new metainfo interface
'has': {
'fetchCurrencies': True,
'fetchTickers': True,
'fetchOHLCV': True,
'fetchOrder': True,
'fetchOpenOrders': True,
'fetchClosedOrders': True,
'fetchMyTrades': True,
'withdraw': True,
},
'marketsByAltname': {},
'timeframes': {
'1m': '1',
'5m': '5',
'15m': '15',
'30m': '30',
'1h': '60',
'4h': '240',
'1d': '1440',
'1w': '10080',
'2w': '21600',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/27766599-22709304-5ede-11e7-9de1-9f33732e1509.jpg',
'api': 'https://api.kraken.com',
'www': 'https://www.kraken.com',
'doc': [
'https://www.kraken.com/en-us/help/api',
'https://github.com/nothingisdead/npm-kraken-api',
],
'fees': [
'https://www.kraken.com/en-us/help/fees',
'https://support.kraken.com/hc/en-us/articles/201396777-What-are-the-deposit-fees-',
'https://support.kraken.com/hc/en-us/articles/201893608-What-are-the-withdrawal-fees-',
],
},
'fees': {
'trading': {
'tierBased': True,
'percentage': True,
'taker': 0.26 / 100,
'maker': 0.16 / 100,
'tiers': {
'taker': [
[0, 0.26 / 100],
[50000, 0.24 / 100],
[100000, 0.22 / 100],
[250000, 0.2 / 100],
[500000, 0.18 / 100],
[1000000, 0.16 / 100],
[2500000, 0.14 / 100],
[5000000, 0.12 / 100],
[10000000, 0.1 / 100],
],
'maker': [
[0, 0.16 / 100],
[50000, 0.14 / 100],
[100000, 0.12 / 100],
[250000, 0.10 / 100],
[500000, 0.8 / 100],
[1000000, 0.6 / 100],
[2500000, 0.4 / 100],
[5000000, 0.2 / 100],
[10000000, 0.0 / 100],
],
},
},
'funding': {
'tierBased': False,
'percentage': False,
'withdraw': {
'BTC': 0.001,
'ETH': 0.005,
'XRP': 0.02,
'XLM': 0.00002,
'LTC': 0.02,
'DOGE': 2,
'ZEC': 0.00010,
'ICN': 0.02,
'REP': 0.01,
'ETC': 0.005,
'MLN': 0.003,
'XMR': 0.05,
'DASH': 0.005,
'GNO': 0.01,
'EOS': 0.5,
'BCH': 0.001,
'USD': 5, # if domestic wire
'EUR': 5, # if domestic wire
'CAD': 10, # CAD EFT Withdrawal
'JPY': 300, # if domestic wire
},
'deposit': {
'BTC': 0,
'ETH': 0,
'XRP': 0,
'XLM': 0,
'LTC': 0,
'DOGE': 0,
'ZEC': 0,
'ICN': 0,
'REP': 0,
'ETC': 0,
'MLN': 0,
'XMR': 0,
'DASH': 0,
'GNO': 0,
'EOS': 0,
'BCH': 0,
'USD': 5, # if domestic wire
'EUR': 0, # free deposit if EUR SEPA Deposit
'CAD': 5, # if domestic wire
'JPY': 0, # Domestic Deposit(Free, ¥5,000 deposit minimum)
},
},
},
'api': {
'public': {
'get': [
'Assets',
'AssetPairs',
'Depth',
'OHLC',
'Spread',
'Ticker',
'Time',
'Trades',
],
},
'private': {
'post': [
'AddOrder',
'Balance',
'CancelOrder',
'ClosedOrders',
'DepositAddresses',
'DepositMethods',
'DepositStatus',
'Ledgers',
'OpenOrders',
'OpenPositions',
'QueryLedgers',
'QueryOrders',
'QueryTrades',
'TradeBalance',
'TradesHistory',
'TradeVolume',
'Withdraw',
'WithdrawCancel',
'WithdrawInfo',
'WithdrawStatus',
],
},
},
})
def cost_to_precision(self, symbol, cost):
return self.truncate(float(cost), self.markets[symbol]['precision']['price'])
def fee_to_precision(self, symbol, fee):
return self.truncate(float(fee), self.markets[symbol]['precision']['amount'])
def handle_errors(self, code, reason, url, method, headers, body):
if body.find('Invalid nonce') >= 0:
raise InvalidNonce(self.id + ' ' + body)
if body.find('Insufficient funds') >= 0:
raise InsufficientFunds(self.id + ' ' + body)
if body.find('Cancel pending') >= 0:
raise CancelPending(self.id + ' ' + body)
if body.find('Invalid arguments:volume') >= 0:
raise InvalidOrder(self.id + ' ' + body)
async def fetch_markets(self):
markets = await self.publicGetAssetPairs()
keys = list(markets['result'].keys())
result = []
for i in range(0, len(keys)):
id = keys[i]
market = markets['result'][id]
base = market['base']
quote = market['quote']
if (base[0] == 'X') or (base[0] == 'Z'):
base = base[1:]
if (quote[0] == 'X') or (quote[0] == 'Z'):
quote = quote[1:]
base = self.common_currency_code(base)
quote = self.common_currency_code(quote)
darkpool = id.find('.d') >= 0
symbol = market['altname'] if darkpool else(base + '/' + quote)
maker = None
if 'fees_maker' in market:
maker = float(market['fees_maker'][0][1]) / 100
precision = {
'amount': market['lot_decimals'],
'price': market['pair_decimals'],
}
lot = math.pow(10, -precision['amount'])
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'darkpool': darkpool,
'info': market,
'altname': market['altname'],
'maker': maker,
'taker': float(market['fees'][0][1]) / 100,
'lot': lot,
'active': True,
'precision': precision,
'limits': {
'amount': {
'min': lot,
'max': math.pow(10, precision['amount']),
},
'price': {
'min': math.pow(10, -precision['price']),
'max': None,
},
'cost': {
'min': 0,
'max': None,
},
},
})
result = self.append_inactive_markets(result)
self.marketsByAltname = self.index_by(result, 'altname')
return result
def append_inactive_markets(self, result=[]):
precision = {'amount': 8, 'price': 8}
costLimits = {'min': 0, 'max': None}
priceLimits = {'min': math.pow(10, -precision['price']), 'max': None}
amountLimits = {'min': math.pow(10, -precision['amount']), 'max': math.pow(10, precision['amount'])}
limits = {'amount': amountLimits, 'price': priceLimits, 'cost': costLimits}
defaults = {
'darkpool': False,
'info': None,
'maker': None,
'taker': None,
'lot': amountLimits['min'],
'active': False,
'precision': precision,
'limits': limits,
}
markets = [
{'id': 'XXLMZEUR', 'symbol': 'XLM/EUR', 'base': 'XLM', 'quote': 'EUR', 'altname': 'XLMEUR'},
]
for i in range(0, len(markets)):
result.append(self.extend(defaults, markets[i]))
return result
async def fetch_currencies(self, params={}):
response = await self.publicGetAssets(params)
currencies = response['result']
ids = list(currencies.keys())
result = {}
for i in range(0, len(ids)):
id = ids[i]
currency = currencies[id]
# todo: will need to rethink the fees
# to add support for multiple withdrawal/deposit methods and
# differentiated fees for each particular method
code = self.common_currency_code(currency['altname'])
precision = currency['decimals']
result[code] = {
'id': id,
'code': code,
'info': currency,
'name': code,
'active': True,
'status': 'ok',
'fee': None,
'precision': precision,
'limits': {
'amount': {
'min': math.pow(10, -precision),
'max': math.pow(10, precision),
},
'price': {
'min': math.pow(10, -precision),
'max': math.pow(10, precision),
},
'cost': {
'min': None,
'max': None,
},
'withdraw': {
'min': None,
'max': math.pow(10, precision),
},
},
}
return result
async def fetch_order_book(self, symbol, params={}):
await self.load_markets()
darkpool = symbol.find('.d') >= 0
if darkpool:
raise ExchangeError(self.id + ' does not provide an order book for darkpool symbol ' + symbol)
market = self.market(symbol)
response = await self.publicGetDepth(self.extend({
'pair': market['id'],
}, params))
orderbook = response['result'][market['id']]
return self.parse_order_book(orderbook)
def parse_ticker(self, ticker, market=None):
timestamp = self.milliseconds()
symbol = None
if market:
symbol = market['symbol']
baseVolume = float(ticker['v'][1])
vwap = float(ticker['p'][1])
quoteVolume = baseVolume * vwap
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': float(ticker['h'][1]),
'low': float(ticker['l'][1]),
'bid': float(ticker['b'][0]),
'ask': float(ticker['a'][0]),
'vwap': vwap,
'open': float(ticker['o']),
'close': None,
'first': None,
'last': float(ticker['c'][0]),
'change': None,
'percentage': None,
'average': None,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}
async def fetch_tickers(self, symbols=None, params={}):
await self.load_markets()
pairs = []
for s in range(0, len(self.symbols)):
symbol = self.symbols[s]
market = self.markets[symbol]
if market['active']:
if not market['darkpool']:
pairs.append(market['id'])
filter = ','.join(pairs)
response = await self.publicGetTicker(self.extend({
'pair': filter,
}, params))
tickers = response['result']
ids = list(tickers.keys())
result = {}
for i in range(0, len(ids)):
id = ids[i]
market = self.markets_by_id[id]
symbol = market['symbol']
ticker = tickers[id]
result[symbol] = self.parse_ticker(ticker, market)
return result
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
darkpool = symbol.find('.d') >= 0
if darkpool:
raise ExchangeError(self.id + ' does not provide a ticker for darkpool symbol ' + symbol)
market = self.market(symbol)
response = await self.publicGetTicker(self.extend({
'pair': market['id'],
}, params))
ticker = response['result'][market['id']]
return self.parse_ticker(ticker, market)
def parse_ohlcv(self, ohlcv, market=None, timeframe='1m', since=None, limit=None):
return [
ohlcv[0] * 1000,
float(ohlcv[1]),
float(ohlcv[2]),
float(ohlcv[3]),
float(ohlcv[4]),
float(ohlcv[6]),
]
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'pair': market['id'],
'interval': self.timeframes[timeframe],
}
if since:
request['since'] = int(since / 1000)
response = await self.publicGetOHLC(self.extend(request, params))
ohlcvs = response['result'][market['id']]
return self.parse_ohlcvs(ohlcvs, market, timeframe, since, limit)
def parse_trade(self, trade, market=None):
timestamp = None
side = None
type = None
price = None
amount = None
id = None
order = None
fee = None
if not market:
market = self.find_market_by_altname_or_id(trade['pair'])
if 'ordertxid' in trade:
order = trade['ordertxid']
id = trade['id']
timestamp = int(trade['time'] * 1000)
side = trade['type']
type = trade['ordertype']
price = float(trade['price'])
amount = float(trade['vol'])
if 'fee' in trade:
currency = None
if market:
currency = market['quote']
fee = {
'cost': float(trade['fee']),
'currency': currency,
}
else:
timestamp = int(trade[2] * 1000)
side = 'sell' if (trade[3] == 's') else 'buy'
type = 'limit' if (trade[4] == 'l') else 'market'
price = float(trade[0])
amount = float(trade[1])
symbol = market['symbol'] if (market) else None
return {
'id': id,
'order': order,
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'type': type,
'side': side,
'price': price,
'amount': amount,
'fee': fee,
}
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
id = market['id']
response = await self.publicGetTrades(self.extend({
'pair': id,
}, params))
trades = response['result'][id]
return self.parse_trades(trades, market, since, limit)
async def fetch_balance(self, params={}):
await self.load_markets()
response = await self.privatePostBalance()
balances = response['result']
result = {'info': balances}
currencies = list(balances.keys())
for c in range(0, len(currencies)):
currency = currencies[c]
code = currency
# X-ISO4217-A3 standard currency codes
if code[0] == 'X':
code = code[1:]
elif code[0] == 'Z':
code = code[1:]
code = self.common_currency_code(code)
balance = float(balances[currency])
account = {
'free': balance,
'used': 0.0,
'total': balance,
}
result[code] = account
return self.parse_balance(result)
async def create_order(self, symbol, type, side, amount, price=None, params={}):
await self.load_markets()
market = self.market(symbol)
order = {
'pair': market['id'],
'type': side,
'ordertype': type,
'volume': self.amount_to_precision(symbol, amount),
}
if type == 'limit':
order['price'] = self.price_to_precision(symbol, price)
response = await self.privatePostAddOrder(self.extend(order, params))
length = len(response['result']['txid'])
id = response['result']['txid'] if (length > 1) else response['result']['txid'][0]
return {
'info': response,
'id': id,
}
def find_market_by_altname_or_id(self, id):
result = None
if id in self.marketsByAltname:
result = self.marketsByAltname[id]
elif id in self.markets_by_id:
result = self.markets_by_id[id]
return result
def parse_order(self, order, market=None):
description = order['descr']
side = description['type']
type = description['ordertype']
symbol = None
if not market:
market = self.find_market_by_altname_or_id(description['pair'])
timestamp = int(order['opentm'] * 1000)
amount = float(order['vol'])
filled = float(order['vol_exec'])
remaining = amount - filled
fee = None
cost = self.safe_float(order, 'cost')
price = self.safe_float(description, 'price')
if not price:
price = self.safe_float(order, 'price')
if market:
symbol = market['symbol']
if 'fee' in order:
flags = order['oflags']
feeCost = self.safe_float(order, 'fee')
fee = {
'cost': feeCost,
'rate': None,
}
if flags.find('fciq') >= 0:
fee['currency'] = market['quote']
elif flags.find('fcib') >= 0:
fee['currency'] = market['base']
return {
'id': order['id'],
'info': order,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'status': order['status'],
'symbol': symbol,
'type': type,
'side': side,
'price': price,
'cost': cost,
'amount': amount,
'filled': filled,
'remaining': remaining,
'fee': fee,
# 'trades': self.parse_trades(order['trades'], market),
}
def parse_orders(self, orders, market=None, since=None, limit=None):
result = []
ids = list(orders.keys())
for i in range(0, len(ids)):
id = ids[i]
order = self.extend({'id': id}, orders[id])
result.append(self.parse_order(order, market))
return self.filter_by_since_limit(result, since, limit)
async def fetch_order(self, id, symbol=None, params={}):
await self.load_markets()
response = await self.privatePostQueryOrders(self.extend({
'trades': True, # whether or not to include trades in output(optional, default False)
'txid': id, # comma delimited list of transaction ids to query info about(20 maximum)
# 'userref': 'optional', # restrict results to given user reference id(optional)
}, params))
orders = response['result']
order = self.parse_order(self.extend({'id': id}, orders[id]))
return self.extend({'info': response}, order)
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
request = {
# 'type': 'all', # any position, closed position, closing position, no position
# 'trades': False, # whether or not to include trades related to position in output
# 'start': 1234567890, # starting unix timestamp or trade tx id of results(exclusive)
# 'end': 1234567890, # ending unix timestamp or trade tx id of results(inclusive)
# 'ofs' = result offset
}
if since:
request['start'] = int(since / 1000)
response = await self.privatePostTradesHistory(self.extend(request, params))
trades = response['result']['trades']
ids = list(trades.keys())
for i in range(0, len(ids)):
trades[ids[i]]['id'] = ids[i]
return self.parse_trades(trades, None, since, limit)
async def cancel_order(self, id, symbol=None, params={}):
await self.load_markets()
response = None
try:
response = await self.privatePostCancelOrder(self.extend({
'txid': id,
}, params))
except Exception as e:
if self.last_http_response:
if self.last_http_response.find('EOrder:Unknown order') >= 0:
raise OrderNotFound(self.id + ' cancelOrder() error ' + self.last_http_response)
raise e
return response
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
request = {}
if since:
request['start'] = int(since / 1000)
response = await self.privatePostOpenOrders(self.extend(request, params))
orders = self.parse_orders(response['result']['open'], None, since, limit)
return self.filter_orders_by_symbol(orders, symbol)
async def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
request = {}
if since:
request['start'] = int(since / 1000)
response = await self.privatePostClosedOrders(self.extend(request, params))
orders = self.parse_orders(response['result']['closed'], None, since, limit)
return self.filter_orders_by_symbol(orders, symbol)
async def fetch_deposit_methods(self, code=None, params={}):
await self.load_markets()
request = {}
if code:
currency = self.currency(code)
request['asset'] = currency['id']
response = await self.privatePostDepositMethods(self.extend(request, params))
return response['result']
async def create_deposit_address(self, currency, params={}):
request = {
'new': 'true',
}
response = await self.fetch_deposit_address(currency, self.extend(request, params))
return {
'currency': currency,
'address': response['address'],
'status': 'ok',
'info': response,
}
async def fetch_deposit_address(self, code, params={}):
method = self.safe_value(params, 'method')
if not method:
raise ExchangeError(self.id + ' fetchDepositAddress() requires an extra `method` parameter')
await self.load_markets()
currency = self.currency(code)
request = {
'asset': currency['id'],
'method': method,
'new': 'false',
}
response = await self.privatePostDepositAddresses(self.extend(request, params))
result = response['result']
numResults = len(result)
if numResults < 1:
raise ExchangeError(self.id + ' privatePostDepositAddresses() returned no addresses')
address = self.safe_string(result[0], 'address')
return {
'currency': code,
'address': address,
'status': 'ok',
'info': response,
}
async def withdraw(self, currency, amount, address, params={}):
if 'key' in params:
await self.load_markets()
response = await self.privatePostWithdraw(self.extend({
'asset': currency,
'amount': amount,
# 'address': address, # they don't allow withdrawals to direct addresses
}, params))
return {
'info': response,
'id': response['result'],
}
raise ExchangeError(self.id + " withdraw requires a 'key' parameter(withdrawal key name, as set up on your account)")
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = '/' + self.version + '/' + api + '/' + path
if api == 'public':
if params:
url += '?' + self.urlencode(params)
else:
self.check_required_credentials()
nonce = str(self.nonce())
body = self.urlencode(self.extend({'nonce': nonce}, params))
auth = self.encode(nonce + body)
hash = self.hash(auth, 'sha256', 'binary')
binary = self.encode(url)
binhash = self.binary_concat(binary, hash)
secret = base64.b64decode(self.secret)
signature = self.hmac(binhash, secret, hashlib.sha512, 'base64')
headers = {
'API-Key': self.apiKey,
'API-Sign': self.decode(signature),
'Content-Type': 'application/x-www-form-urlencoded',
}
url = self.urls['api'] + url
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def nonce(self):
return self.milliseconds()
async def request(self, path, api='public', method='GET', params={}, headers=None, body=None):
response = await self.fetch2(path, api, method, params, headers, body)
if 'error' in response:
numErrors = len(response['error'])
if numErrors:
for i in range(0, len(response['error'])):
if response['error'][i] == 'EService:Unavailable':
raise ExchangeNotAvailable(self.id + ' ' + self.json(response))
if response['error'][i] == 'EService:Busy':
raise DDoSProtection(self.id + ' ' + self.json(response))
raise ExchangeError(self.id + ' ' + self.json(response))
return response
| 38.537748
| 126
| 0.466387
|
f6e756de2e8e8d62613dbeef2e8437d02cf2891b
| 124,000
|
py
|
Python
|
flink-ai-flow/ai_flow/protobuf/message_pb2.py
|
LJMichale/flink-ai-extended
|
efda4ad801571a155970e3a9f42797fc0ee90c84
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1
|
2021-08-06T04:24:36.000Z
|
2021-08-06T04:24:36.000Z
|
flink-ai-flow/ai_flow/protobuf/message_pb2.py
|
LJMichale/flink-ai-extended
|
efda4ad801571a155970e3a9f42797fc0ee90c84
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
flink-ai-flow/ai_flow/protobuf/message_pb2.py
|
LJMichale/flink-ai-extended
|
efda4ad801571a155970e3a9f42797fc0ee90c84
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1
|
2021-05-20T02:17:11.000Z
|
2021-05-20T02:17:11.000Z
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: message.proto
"""Generated protocol buffer code."""
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import wrappers_pb2 as google_dot_protobuf_dot_wrappers__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='message.proto',
package='ai_flow',
syntax='proto3',
serialized_options=b'\n\020com.aiflow.protoZ\010/ai_flow\210\001\001\220\001\001',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\rmessage.proto\x12\x07\x61i_flow\x1a\x1egoogle/protobuf/wrappers.proto\"K\n\x0bSchemaProto\x12\x11\n\tname_list\x18\x01 \x03(\t\x12)\n\ttype_list\x18\x02 \x03(\x0e\x32\x16.ai_flow.DataTypeProto\"\xc6\x05\n\x0c\x44\x61tasetProto\x12\x0c\n\x04uuid\x18\x01 \x01(\x03\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x39\n\nproperties\x18\x03 \x03(\x0b\x32%.ai_flow.DatasetProto.PropertiesEntry\x12$\n\x06schema\x18\x04 \x01(\x0b\x32\x14.ai_flow.SchemaProto\x12\x31\n\x0b\x64\x61ta_format\x18\x05 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12)\n\x03uri\x18\x06 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x31\n\x0b\x64\x65scription\x18\x07 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x30\n\x0b\x63reate_time\x18\x08 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12\x30\n\x0bupdate_time\x18\t \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12\x32\n\x0c\x63\x61talog_name\x18\n \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x32\n\x0c\x63\x61talog_type\x18\x0b \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x36\n\x10\x63\x61talog_database\x18\x0c \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12<\n\x16\x63\x61talog_connection_uri\x18\r \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x33\n\rcatalog_table\x18\x0e \x01(\x0b\x32\x1c.google.protobuf.StringValue\x1a\x31\n\x0fPropertiesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"a\n\x12ModelRelationProto\x12\x0c\n\x04uuid\x18\x01 \x01(\x03\x12\x0c\n\x04name\x18\x02 \x01(\t\x12/\n\nproject_id\x18\x03 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\"\x8b\x01\n\nModelProto\x12\x0c\n\x04uuid\x18\x01 \x01(\x03\x12\x0c\n\x04name\x18\x02 \x01(\t\x12/\n\nproject_id\x18\x03 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12\x30\n\nmodel_desc\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xb3\x01\n\x19ModelVersionRelationProto\x12-\n\x07version\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12-\n\x08model_id\x18\x02 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12\x38\n\x13project_snapshot_id\x18\x03 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\"\xf6\x02\n\x11ModelVersionProto\x12-\n\x07version\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12-\n\x08model_id\x18\x02 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12\x38\n\x13project_snapshot_id\x18\x03 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12\x30\n\nmodel_path\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x30\n\nmodel_type\x18\x06 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x32\n\x0cversion_desc\x18\x07 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x31\n\rcurrent_stage\x18\x08 \x01(\x0e\x32\x1a.ai_flow.ModelVersionStage\"\x9f\x04\n\x08JobProto\x12\x0c\n\x04uuid\x18\x01 \x01(\x03\x12\x0c\n\x04name\x18\x02 \x01(\t\x12:\n\x15workflow_execution_id\x18\x03 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12&\n\tjob_state\x18\x04 \x01(\x0e\x32\x13.ai_flow.StateProto\x12\x35\n\nproperties\x18\x05 \x03(\x0b\x32!.ai_flow.JobProto.PropertiesEntry\x12,\n\x06job_id\x18\x06 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12/\n\nstart_time\x18\x07 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12-\n\x08\x65nd_time\x18\x08 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12-\n\x07log_uri\x18\t \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12/\n\tsignature\x18\n \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12;\n\x12workflow_execution\x18\x0b \x01(\x0b\x32\x1f.ai_flow.WorkflowExecutionProto\x1a\x31\n\x0fPropertiesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xe2\x01\n\rWorkflowProto\x12\x0c\n\x04uuid\x18\x01 \x01(\x03\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x33\n\rworkflow_json\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12:\n\nproperties\x18\x04 \x03(\x0b\x32&.ai_flow.WorkflowProto.PropertiesEntry\x12\x11\n\tnamespace\x18\x05 \x01(\t\x1a\x31\n\x0fPropertiesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xc0\x04\n\x16WorkflowExecutionProto\x12\x0c\n\x04uuid\x18\x01 \x01(\x03\x12\x0c\n\x04name\x18\x02 \x01(\t\x12/\n\nproject_id\x18\x03 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12,\n\x0f\x65xecution_state\x18\x04 \x01(\x0e\x32\x13.ai_flow.StateProto\x12\x43\n\nproperties\x18\x05 \x03(\x0b\x32/.ai_flow.WorkflowExecutionProto.PropertiesEntry\x12/\n\nstart_time\x18\x06 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12-\n\x08\x65nd_time\x18\x07 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12-\n\x07log_uri\x18\x08 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x33\n\rworkflow_json\x18\t \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12/\n\tsignature\x18\n \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x14\n\x0c\x65xecution_id\x18\x0b \x01(\t\x12(\n\x08workflow\x18\x0c \x01(\x0b\x32\x16.ai_flow.WorkflowProto\x1a\x31\n\x0fPropertiesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xc3\x01\n\x0cProjectProto\x12\x0c\n\x04uuid\x18\x01 \x01(\x03\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x39\n\nproperties\x18\x03 \x03(\x0b\x32%.ai_flow.ProjectProto.PropertiesEntry\x12)\n\x03uri\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x1a\x31\n\x0fPropertiesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xb7\x02\n\x11WorkflowMetaProto\x12\x0c\n\x04uuid\x18\x01 \x01(\x03\x12\x0c\n\x04name\x18\x02 \x01(\t\x12/\n\nproject_id\x18\x03 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12>\n\nproperties\x18\x04 \x03(\x0b\x32*.ai_flow.WorkflowMetaProto.PropertiesEntry\x12\x30\n\x0b\x63reate_time\x18\x05 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12\x30\n\x0bupdate_time\x18\x06 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x1a\x31\n\x0fPropertiesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\x91\x03\n\rArtifactProto\x12\x0c\n\x04uuid\x18\x01 \x01(\x03\x12\x0c\n\x04name\x18\x02 \x01(\t\x12:\n\nproperties\x18\x03 \x03(\x0b\x32&.ai_flow.ArtifactProto.PropertiesEntry\x12\x33\n\rartifact_type\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12)\n\x03uri\x18\x05 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x31\n\x0b\x64\x65scription\x18\x06 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x30\n\x0b\x63reate_time\x18\x07 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12\x30\n\x0bupdate_time\x18\x08 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x1a\x31\n\x0fPropertiesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"z\n\x14RegisteredModelParam\x12\x30\n\nmodel_name\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x30\n\nmodel_desc\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xde\x01\n\x11ModelVersionParam\x12\x30\n\nmodel_path\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x30\n\nmodel_type\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x32\n\x0cversion_desc\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x31\n\rcurrent_stage\x18\x04 \x01(\x0e\x32\x1a.ai_flow.ModelVersionStage\"w\n\x0eModelMetaParam\x12\x30\n\nmodel_name\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x33\n\rmodel_version\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"A\n\x08Response\x12\x13\n\x0breturn_code\x18\x01 \x01(\t\x12\x12\n\nreturn_msg\x18\x02 \x01(\t\x12\x0c\n\x04\x64\x61ta\x18\x03 \x01(\t\"[\n\x13RegisteredModelMeta\x12\x12\n\nmodel_name\x18\x01 \x01(\t\x12\x30\n\nmodel_desc\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xbd\x02\n\x10ModelVersionMeta\x12\x12\n\nmodel_name\x18\x01 \x01(\t\x12\x15\n\rmodel_version\x18\x02 \x01(\t\x12\x30\n\nmodel_path\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x30\n\nmodel_type\x18\x05 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x32\n\x0cversion_desc\x18\x06 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x33\n\x0eversion_status\x18\x07 \x01(\x0e\x32\x1b.ai_flow.ModelVersionStatus\x12\x31\n\rcurrent_stage\x18\x08 \x01(\x0e\x32\x1a.ai_flow.ModelVersionStage\"\x88\x01\n\x15RegisteredModelDetail\x12\x36\n\x10registered_model\x18\x01 \x01(\x0b\x32\x1c.ai_flow.RegisteredModelMeta\x12\x37\n\x14latest_model_version\x18\x02 \x01(\x0b\x32\x19.ai_flow.ModelVersionMeta\"O\n\x14RegisteredModelMetas\x12\x37\n\x11registered_models\x18\x01 \x03(\x0b\x32\x1c.ai_flow.RegisteredModelMeta\"J\n\x0bResultProto\x12$\n\x06status\x18\x01 \x01(\x0e\x32\x14.ai_flow.StatusProto\x12\x15\n\rerror_message\x18\x02 \x01(\t\"\x98\x05\n\x0fMetricMetaProto\x12\x31\n\x0bmetric_name\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12-\n\x0bmetric_type\x18\x02 \x01(\x0e\x32\x18.ai_flow.MetricTypeProto\x12\x31\n\x0bmetric_desc\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x32\n\x0cproject_name\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x32\n\x0c\x64\x61taset_name\x18\x05 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x30\n\nmodel_name\x18\x06 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12.\n\x08job_name\x18\x07 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12/\n\nstart_time\x18\x08 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12-\n\x08\x65nd_time\x18\t \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12)\n\x03uri\x18\n \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12*\n\x04tags\x18\x0b \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12<\n\nproperties\x18\x0c \x03(\x0b\x32(.ai_flow.MetricMetaProto.PropertiesEntry\x1a\x31\n\x0fPropertiesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xdf\x02\n\x12MetricSummaryProto\x12\x0c\n\x04uuid\x18\x01 \x01(\x03\x12\x31\n\x0bmetric_name\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x30\n\nmetric_key\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x32\n\x0cmetric_value\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x10metric_timestamp\x18\x05 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12\x33\n\rmodel_version\x18\x06 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x36\n\x10job_execution_id\x18\x07 \x01(\x0b\x32\x1c.google.protobuf.StringValue*\xc0\x03\n\nReturnCode\x12\x0b\n\x07SUCCESS\x10\x00\x12\x12\n\x0eINTERNAL_ERROR\x10\x01\x12\x1b\n\x17TEMPORARILY_UNAVAILABLE\x10\x02\x12\x0c\n\x08IO_ERROR\x10\x03\x12\x0f\n\x0b\x42\x41\x44_REQUEST\x10\x04\x12\x1c\n\x17INVALID_PARAMETER_VALUE\x10\xe8\x07\x12\x17\n\x12\x45NDPOINT_NOT_FOUND\x10\xe9\x07\x12\x16\n\x11MALFORMED_REQUEST\x10\xea\x07\x12\x12\n\rINVALID_STATE\x10\xeb\x07\x12\x16\n\x11PERMISSION_DENIED\x10\xec\x07\x12\x15\n\x10\x46\x45\x41TURE_DISABLED\x10\xed\x07\x12\x1a\n\x15\x43USTOMER_UNAUTHORIZED\x10\xee\x07\x12\x1b\n\x16REQUEST_LIMIT_EXCEEDED\x10\xef\x07\x12\x1c\n\x17RESOURCE_ALREADY_EXISTS\x10\xd1\x0f\x12\x1c\n\x17RESOURCE_DOES_NOT_EXIST\x10\xd2\x0f\x12\x13\n\x0eQUOTA_EXCEEDED\x10\xb9\x17\x12\x1c\n\x17MAX_BLOCK_SIZE_EXCEEDED\x10\xba\x17\x12\x1b\n\x16MAX_READ_SIZE_EXCEEDED\x10\xbb\x17* \n\x0bStatusProto\x12\x06\n\x02OK\x10\x00\x12\t\n\x05\x45RROR\x10\x01*\xd6\x01\n\rDataTypeProto\x12\x19\n\x15\x44\x41TA_TYPE_UNSPECIFIED\x10\x00\x12\t\n\x05INT32\x10\x01\x12\t\n\x05INT64\x10\x02\x12\x0b\n\x07\x46LOAT32\x10\x03\x12\x0b\n\x07\x46LOAT64\x10\x04\x12\n\n\x06STRING\x10\x05\x12\x0e\n\nINT32ARRAY\x10\x06\x12\x0e\n\nINT64ARRAY\x10\x07\x12\x10\n\x0c\x46lOAT32ARRAY\x10\x08\x12\x10\n\x0c\x46LOAT64ARRAY\x10\t\x12\x0f\n\x0bSTRINGARRAY\x10\n\x12\t\n\x05\x42YTES\x10\x0b\x12\x0e\n\nBYTESARRAY\x10\x0c*{\n\nStateProto\x12\x15\n\x11STATE_UNSPECIFIED\x10\x00\x12\x08\n\x04INIT\x10\x01\x12\x0c\n\x08STARTING\x10\x02\x12\x0b\n\x07RUNNING\x10\x03\x12\x0c\n\x08\x46INISHED\x10\x04\x12\n\n\x06\x46\x41ILED\x10\x05\x12\x0b\n\x07KILLING\x10\x06\x12\n\n\x06KILLED\x10\x07*F\n\rExecutionMode\x12\x1e\n\x1a\x45XECUTION_MODE_UNSPECIFIED\x10\x00\x12\t\n\x05\x42\x41TCH\x10\x01\x12\n\n\x06STREAM\x10\x02*}\n\x12ModelVersionStatus\x12\x18\n\x14PENDING_REGISTRATION\x10\x00\x12\x17\n\x13\x46\x41ILED_REGISTRATION\x10\x01\x12\t\n\x05READY\x10\x03\x12\x14\n\x10PENDING_DELETION\x10\x04\x12\x13\n\x0f\x46\x41ILED_DELETION\x10\x05*\\\n\x11ModelVersionStage\x12\r\n\tGENERATED\x10\x00\x12\r\n\tVALIDATED\x10\x01\x12\x0c\n\x08\x44\x45PLOYED\x10\x02\x12\x0e\n\nDEPRECATED\x10\x03\x12\x0b\n\x07\x44\x45LETED\x10\x04*)\n\x0fMetricTypeProto\x12\x0b\n\x07\x44\x41TASET\x10\x00\x12\t\n\x05MODEL\x10\x01\x42\"\n\x10\x63om.aiflow.protoZ\x08/ai_flow\x88\x01\x01\x90\x01\x01\x62\x06proto3'
,
dependencies=[google_dot_protobuf_dot_wrappers__pb2.DESCRIPTOR,])
_RETURNCODE = _descriptor.EnumDescriptor(
name='ReturnCode',
full_name='ai_flow.ReturnCode',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='SUCCESS', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='INTERNAL_ERROR', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='TEMPORARILY_UNAVAILABLE', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='IO_ERROR', index=3, number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='BAD_REQUEST', index=4, number=4,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='INVALID_PARAMETER_VALUE', index=5, number=1000,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='ENDPOINT_NOT_FOUND', index=6, number=1001,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='MALFORMED_REQUEST', index=7, number=1002,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='INVALID_STATE', index=8, number=1003,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='PERMISSION_DENIED', index=9, number=1004,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='FEATURE_DISABLED', index=10, number=1005,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='CUSTOMER_UNAUTHORIZED', index=11, number=1006,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='REQUEST_LIMIT_EXCEEDED', index=12, number=1007,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='RESOURCE_ALREADY_EXISTS', index=13, number=2001,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='RESOURCE_DOES_NOT_EXIST', index=14, number=2002,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='QUOTA_EXCEEDED', index=15, number=3001,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='MAX_BLOCK_SIZE_EXCEEDED', index=16, number=3002,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='MAX_READ_SIZE_EXCEEDED', index=17, number=3003,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=6186,
serialized_end=6634,
)
_sym_db.RegisterEnumDescriptor(_RETURNCODE)
ReturnCode = enum_type_wrapper.EnumTypeWrapper(_RETURNCODE)
_STATUSPROTO = _descriptor.EnumDescriptor(
name='StatusProto',
full_name='ai_flow.StatusProto',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='OK', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='ERROR', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=6636,
serialized_end=6668,
)
_sym_db.RegisterEnumDescriptor(_STATUSPROTO)
StatusProto = enum_type_wrapper.EnumTypeWrapper(_STATUSPROTO)
_DATATYPEPROTO = _descriptor.EnumDescriptor(
name='DataTypeProto',
full_name='ai_flow.DataTypeProto',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='DATA_TYPE_UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='INT32', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='INT64', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='FLOAT32', index=3, number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='FLOAT64', index=4, number=4,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='STRING', index=5, number=5,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='INT32ARRAY', index=6, number=6,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='INT64ARRAY', index=7, number=7,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='FlOAT32ARRAY', index=8, number=8,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='FLOAT64ARRAY', index=9, number=9,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='STRINGARRAY', index=10, number=10,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='BYTES', index=11, number=11,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='BYTESARRAY', index=12, number=12,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=6671,
serialized_end=6885,
)
_sym_db.RegisterEnumDescriptor(_DATATYPEPROTO)
DataTypeProto = enum_type_wrapper.EnumTypeWrapper(_DATATYPEPROTO)
_STATEPROTO = _descriptor.EnumDescriptor(
name='StateProto',
full_name='ai_flow.StateProto',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='STATE_UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='INIT', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='STARTING', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='RUNNING', index=3, number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='FINISHED', index=4, number=4,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='FAILED', index=5, number=5,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='KILLING', index=6, number=6,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='KILLED', index=7, number=7,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=6887,
serialized_end=7010,
)
_sym_db.RegisterEnumDescriptor(_STATEPROTO)
StateProto = enum_type_wrapper.EnumTypeWrapper(_STATEPROTO)
_EXECUTIONMODE = _descriptor.EnumDescriptor(
name='ExecutionMode',
full_name='ai_flow.ExecutionMode',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='EXECUTION_MODE_UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='BATCH', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='STREAM', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=7012,
serialized_end=7082,
)
_sym_db.RegisterEnumDescriptor(_EXECUTIONMODE)
ExecutionMode = enum_type_wrapper.EnumTypeWrapper(_EXECUTIONMODE)
_MODELVERSIONSTATUS = _descriptor.EnumDescriptor(
name='ModelVersionStatus',
full_name='ai_flow.ModelVersionStatus',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='PENDING_REGISTRATION', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='FAILED_REGISTRATION', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='READY', index=2, number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='PENDING_DELETION', index=3, number=4,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='FAILED_DELETION', index=4, number=5,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=7084,
serialized_end=7209,
)
_sym_db.RegisterEnumDescriptor(_MODELVERSIONSTATUS)
ModelVersionStatus = enum_type_wrapper.EnumTypeWrapper(_MODELVERSIONSTATUS)
_MODELVERSIONSTAGE = _descriptor.EnumDescriptor(
name='ModelVersionStage',
full_name='ai_flow.ModelVersionStage',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='GENERATED', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='VALIDATED', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='DEPLOYED', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='DEPRECATED', index=3, number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='DELETED', index=4, number=4,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=7211,
serialized_end=7303,
)
_sym_db.RegisterEnumDescriptor(_MODELVERSIONSTAGE)
ModelVersionStage = enum_type_wrapper.EnumTypeWrapper(_MODELVERSIONSTAGE)
_METRICTYPEPROTO = _descriptor.EnumDescriptor(
name='MetricTypeProto',
full_name='ai_flow.MetricTypeProto',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='DATASET', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='MODEL', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=7305,
serialized_end=7346,
)
_sym_db.RegisterEnumDescriptor(_METRICTYPEPROTO)
MetricTypeProto = enum_type_wrapper.EnumTypeWrapper(_METRICTYPEPROTO)
SUCCESS = 0
INTERNAL_ERROR = 1
TEMPORARILY_UNAVAILABLE = 2
IO_ERROR = 3
BAD_REQUEST = 4
INVALID_PARAMETER_VALUE = 1000
ENDPOINT_NOT_FOUND = 1001
MALFORMED_REQUEST = 1002
INVALID_STATE = 1003
PERMISSION_DENIED = 1004
FEATURE_DISABLED = 1005
CUSTOMER_UNAUTHORIZED = 1006
REQUEST_LIMIT_EXCEEDED = 1007
RESOURCE_ALREADY_EXISTS = 2001
RESOURCE_DOES_NOT_EXIST = 2002
QUOTA_EXCEEDED = 3001
MAX_BLOCK_SIZE_EXCEEDED = 3002
MAX_READ_SIZE_EXCEEDED = 3003
OK = 0
ERROR = 1
DATA_TYPE_UNSPECIFIED = 0
INT32 = 1
INT64 = 2
FLOAT32 = 3
FLOAT64 = 4
STRING = 5
INT32ARRAY = 6
INT64ARRAY = 7
FlOAT32ARRAY = 8
FLOAT64ARRAY = 9
STRINGARRAY = 10
BYTES = 11
BYTESARRAY = 12
STATE_UNSPECIFIED = 0
INIT = 1
STARTING = 2
RUNNING = 3
FINISHED = 4
FAILED = 5
KILLING = 6
KILLED = 7
EXECUTION_MODE_UNSPECIFIED = 0
BATCH = 1
STREAM = 2
PENDING_REGISTRATION = 0
FAILED_REGISTRATION = 1
READY = 3
PENDING_DELETION = 4
FAILED_DELETION = 5
GENERATED = 0
VALIDATED = 1
DEPLOYED = 2
DEPRECATED = 3
DELETED = 4
DATASET = 0
MODEL = 1
_SCHEMAPROTO = _descriptor.Descriptor(
name='SchemaProto',
full_name='ai_flow.SchemaProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='name_list', full_name='ai_flow.SchemaProto.name_list', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='type_list', full_name='ai_flow.SchemaProto.type_list', index=1,
number=2, type=14, cpp_type=8, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=58,
serialized_end=133,
)
_DATASETPROTO_PROPERTIESENTRY = _descriptor.Descriptor(
name='PropertiesEntry',
full_name='ai_flow.DatasetProto.PropertiesEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='ai_flow.DatasetProto.PropertiesEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='ai_flow.DatasetProto.PropertiesEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=797,
serialized_end=846,
)
_DATASETPROTO = _descriptor.Descriptor(
name='DatasetProto',
full_name='ai_flow.DatasetProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='uuid', full_name='ai_flow.DatasetProto.uuid', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='name', full_name='ai_flow.DatasetProto.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='properties', full_name='ai_flow.DatasetProto.properties', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='schema', full_name='ai_flow.DatasetProto.schema', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='data_format', full_name='ai_flow.DatasetProto.data_format', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='uri', full_name='ai_flow.DatasetProto.uri', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='description', full_name='ai_flow.DatasetProto.description', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='create_time', full_name='ai_flow.DatasetProto.create_time', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='update_time', full_name='ai_flow.DatasetProto.update_time', index=8,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='catalog_name', full_name='ai_flow.DatasetProto.catalog_name', index=9,
number=10, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='catalog_type', full_name='ai_flow.DatasetProto.catalog_type', index=10,
number=11, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='catalog_database', full_name='ai_flow.DatasetProto.catalog_database', index=11,
number=12, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='catalog_connection_uri', full_name='ai_flow.DatasetProto.catalog_connection_uri', index=12,
number=13, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='catalog_table', full_name='ai_flow.DatasetProto.catalog_table', index=13,
number=14, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[_DATASETPROTO_PROPERTIESENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=136,
serialized_end=846,
)
_MODELRELATIONPROTO = _descriptor.Descriptor(
name='ModelRelationProto',
full_name='ai_flow.ModelRelationProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='uuid', full_name='ai_flow.ModelRelationProto.uuid', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='name', full_name='ai_flow.ModelRelationProto.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='project_id', full_name='ai_flow.ModelRelationProto.project_id', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=848,
serialized_end=945,
)
_MODELPROTO = _descriptor.Descriptor(
name='ModelProto',
full_name='ai_flow.ModelProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='uuid', full_name='ai_flow.ModelProto.uuid', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='name', full_name='ai_flow.ModelProto.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='project_id', full_name='ai_flow.ModelProto.project_id', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='model_desc', full_name='ai_flow.ModelProto.model_desc', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=948,
serialized_end=1087,
)
_MODELVERSIONRELATIONPROTO = _descriptor.Descriptor(
name='ModelVersionRelationProto',
full_name='ai_flow.ModelVersionRelationProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='version', full_name='ai_flow.ModelVersionRelationProto.version', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='model_id', full_name='ai_flow.ModelVersionRelationProto.model_id', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='project_snapshot_id', full_name='ai_flow.ModelVersionRelationProto.project_snapshot_id', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1090,
serialized_end=1269,
)
_MODELVERSIONPROTO = _descriptor.Descriptor(
name='ModelVersionProto',
full_name='ai_flow.ModelVersionProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='version', full_name='ai_flow.ModelVersionProto.version', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='model_id', full_name='ai_flow.ModelVersionProto.model_id', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='project_snapshot_id', full_name='ai_flow.ModelVersionProto.project_snapshot_id', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='model_path', full_name='ai_flow.ModelVersionProto.model_path', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='model_type', full_name='ai_flow.ModelVersionProto.model_type', index=4,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='version_desc', full_name='ai_flow.ModelVersionProto.version_desc', index=5,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='current_stage', full_name='ai_flow.ModelVersionProto.current_stage', index=6,
number=8, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1272,
serialized_end=1646,
)
_JOBPROTO_PROPERTIESENTRY = _descriptor.Descriptor(
name='PropertiesEntry',
full_name='ai_flow.JobProto.PropertiesEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='ai_flow.JobProto.PropertiesEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='ai_flow.JobProto.PropertiesEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=797,
serialized_end=846,
)
_JOBPROTO = _descriptor.Descriptor(
name='JobProto',
full_name='ai_flow.JobProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='uuid', full_name='ai_flow.JobProto.uuid', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='name', full_name='ai_flow.JobProto.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='workflow_execution_id', full_name='ai_flow.JobProto.workflow_execution_id', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='job_state', full_name='ai_flow.JobProto.job_state', index=3,
number=4, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='properties', full_name='ai_flow.JobProto.properties', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='job_id', full_name='ai_flow.JobProto.job_id', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='start_time', full_name='ai_flow.JobProto.start_time', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='end_time', full_name='ai_flow.JobProto.end_time', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='log_uri', full_name='ai_flow.JobProto.log_uri', index=8,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='signature', full_name='ai_flow.JobProto.signature', index=9,
number=10, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='workflow_execution', full_name='ai_flow.JobProto.workflow_execution', index=10,
number=11, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[_JOBPROTO_PROPERTIESENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1649,
serialized_end=2192,
)
_WORKFLOWPROTO_PROPERTIESENTRY = _descriptor.Descriptor(
name='PropertiesEntry',
full_name='ai_flow.WorkflowProto.PropertiesEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='ai_flow.WorkflowProto.PropertiesEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='ai_flow.WorkflowProto.PropertiesEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=797,
serialized_end=846,
)
_WORKFLOWPROTO = _descriptor.Descriptor(
name='WorkflowProto',
full_name='ai_flow.WorkflowProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='uuid', full_name='ai_flow.WorkflowProto.uuid', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='name', full_name='ai_flow.WorkflowProto.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='workflow_json', full_name='ai_flow.WorkflowProto.workflow_json', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='properties', full_name='ai_flow.WorkflowProto.properties', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='namespace', full_name='ai_flow.WorkflowProto.namespace', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[_WORKFLOWPROTO_PROPERTIESENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2195,
serialized_end=2421,
)
_WORKFLOWEXECUTIONPROTO_PROPERTIESENTRY = _descriptor.Descriptor(
name='PropertiesEntry',
full_name='ai_flow.WorkflowExecutionProto.PropertiesEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='ai_flow.WorkflowExecutionProto.PropertiesEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='ai_flow.WorkflowExecutionProto.PropertiesEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=797,
serialized_end=846,
)
_WORKFLOWEXECUTIONPROTO = _descriptor.Descriptor(
name='WorkflowExecutionProto',
full_name='ai_flow.WorkflowExecutionProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='uuid', full_name='ai_flow.WorkflowExecutionProto.uuid', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='name', full_name='ai_flow.WorkflowExecutionProto.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='project_id', full_name='ai_flow.WorkflowExecutionProto.project_id', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='execution_state', full_name='ai_flow.WorkflowExecutionProto.execution_state', index=3,
number=4, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='properties', full_name='ai_flow.WorkflowExecutionProto.properties', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='start_time', full_name='ai_flow.WorkflowExecutionProto.start_time', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='end_time', full_name='ai_flow.WorkflowExecutionProto.end_time', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='log_uri', full_name='ai_flow.WorkflowExecutionProto.log_uri', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='workflow_json', full_name='ai_flow.WorkflowExecutionProto.workflow_json', index=8,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='signature', full_name='ai_flow.WorkflowExecutionProto.signature', index=9,
number=10, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='execution_id', full_name='ai_flow.WorkflowExecutionProto.execution_id', index=10,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='workflow', full_name='ai_flow.WorkflowExecutionProto.workflow', index=11,
number=12, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[_WORKFLOWEXECUTIONPROTO_PROPERTIESENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2424,
serialized_end=3000,
)
_PROJECTPROTO_PROPERTIESENTRY = _descriptor.Descriptor(
name='PropertiesEntry',
full_name='ai_flow.ProjectProto.PropertiesEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='ai_flow.ProjectProto.PropertiesEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='ai_flow.ProjectProto.PropertiesEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=797,
serialized_end=846,
)
_PROJECTPROTO = _descriptor.Descriptor(
name='ProjectProto',
full_name='ai_flow.ProjectProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='uuid', full_name='ai_flow.ProjectProto.uuid', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='name', full_name='ai_flow.ProjectProto.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='properties', full_name='ai_flow.ProjectProto.properties', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='uri', full_name='ai_flow.ProjectProto.uri', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[_PROJECTPROTO_PROPERTIESENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3003,
serialized_end=3198,
)
_WORKFLOWMETAPROTO_PROPERTIESENTRY = _descriptor.Descriptor(
name='PropertiesEntry',
full_name='ai_flow.WorkflowMetaProto.PropertiesEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='ai_flow.WorkflowMetaProto.PropertiesEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='ai_flow.WorkflowMetaProto.PropertiesEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=797,
serialized_end=846,
)
_WORKFLOWMETAPROTO = _descriptor.Descriptor(
name='WorkflowMetaProto',
full_name='ai_flow.WorkflowMetaProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='uuid', full_name='ai_flow.WorkflowMetaProto.uuid', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='name', full_name='ai_flow.WorkflowMetaProto.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='project_id', full_name='ai_flow.WorkflowMetaProto.project_id', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='properties', full_name='ai_flow.WorkflowMetaProto.properties', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='create_time', full_name='ai_flow.WorkflowMetaProto.create_time', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='update_time', full_name='ai_flow.WorkflowMetaProto.update_time', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[_WORKFLOWMETAPROTO_PROPERTIESENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3201,
serialized_end=3512,
)
_ARTIFACTPROTO_PROPERTIESENTRY = _descriptor.Descriptor(
name='PropertiesEntry',
full_name='ai_flow.ArtifactProto.PropertiesEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='ai_flow.ArtifactProto.PropertiesEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='ai_flow.ArtifactProto.PropertiesEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=797,
serialized_end=846,
)
_ARTIFACTPROTO = _descriptor.Descriptor(
name='ArtifactProto',
full_name='ai_flow.ArtifactProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='uuid', full_name='ai_flow.ArtifactProto.uuid', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='name', full_name='ai_flow.ArtifactProto.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='properties', full_name='ai_flow.ArtifactProto.properties', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='artifact_type', full_name='ai_flow.ArtifactProto.artifact_type', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='uri', full_name='ai_flow.ArtifactProto.uri', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='description', full_name='ai_flow.ArtifactProto.description', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='create_time', full_name='ai_flow.ArtifactProto.create_time', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='update_time', full_name='ai_flow.ArtifactProto.update_time', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[_ARTIFACTPROTO_PROPERTIESENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3515,
serialized_end=3916,
)
_REGISTEREDMODELPARAM = _descriptor.Descriptor(
name='RegisteredModelParam',
full_name='ai_flow.RegisteredModelParam',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='model_name', full_name='ai_flow.RegisteredModelParam.model_name', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='model_desc', full_name='ai_flow.RegisteredModelParam.model_desc', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3918,
serialized_end=4040,
)
_MODELVERSIONPARAM = _descriptor.Descriptor(
name='ModelVersionParam',
full_name='ai_flow.ModelVersionParam',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='model_path', full_name='ai_flow.ModelVersionParam.model_path', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='model_type', full_name='ai_flow.ModelVersionParam.model_type', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='version_desc', full_name='ai_flow.ModelVersionParam.version_desc', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='current_stage', full_name='ai_flow.ModelVersionParam.current_stage', index=3,
number=4, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4043,
serialized_end=4265,
)
_MODELMETAPARAM = _descriptor.Descriptor(
name='ModelMetaParam',
full_name='ai_flow.ModelMetaParam',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='model_name', full_name='ai_flow.ModelMetaParam.model_name', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='model_version', full_name='ai_flow.ModelMetaParam.model_version', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4267,
serialized_end=4386,
)
_RESPONSE = _descriptor.Descriptor(
name='Response',
full_name='ai_flow.Response',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='return_code', full_name='ai_flow.Response.return_code', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='return_msg', full_name='ai_flow.Response.return_msg', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='data', full_name='ai_flow.Response.data', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4388,
serialized_end=4453,
)
_REGISTEREDMODELMETA = _descriptor.Descriptor(
name='RegisteredModelMeta',
full_name='ai_flow.RegisteredModelMeta',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='model_name', full_name='ai_flow.RegisteredModelMeta.model_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='model_desc', full_name='ai_flow.RegisteredModelMeta.model_desc', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4455,
serialized_end=4546,
)
_MODELVERSIONMETA = _descriptor.Descriptor(
name='ModelVersionMeta',
full_name='ai_flow.ModelVersionMeta',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='model_name', full_name='ai_flow.ModelVersionMeta.model_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='model_version', full_name='ai_flow.ModelVersionMeta.model_version', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='model_path', full_name='ai_flow.ModelVersionMeta.model_path', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='model_type', full_name='ai_flow.ModelVersionMeta.model_type', index=3,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='version_desc', full_name='ai_flow.ModelVersionMeta.version_desc', index=4,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='version_status', full_name='ai_flow.ModelVersionMeta.version_status', index=5,
number=7, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='current_stage', full_name='ai_flow.ModelVersionMeta.current_stage', index=6,
number=8, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4549,
serialized_end=4866,
)
_REGISTEREDMODELDETAIL = _descriptor.Descriptor(
name='RegisteredModelDetail',
full_name='ai_flow.RegisteredModelDetail',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='registered_model', full_name='ai_flow.RegisteredModelDetail.registered_model', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='latest_model_version', full_name='ai_flow.RegisteredModelDetail.latest_model_version', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4869,
serialized_end=5005,
)
_REGISTEREDMODELMETAS = _descriptor.Descriptor(
name='RegisteredModelMetas',
full_name='ai_flow.RegisteredModelMetas',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='registered_models', full_name='ai_flow.RegisteredModelMetas.registered_models', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=5007,
serialized_end=5086,
)
_RESULTPROTO = _descriptor.Descriptor(
name='ResultProto',
full_name='ai_flow.ResultProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='status', full_name='ai_flow.ResultProto.status', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='error_message', full_name='ai_flow.ResultProto.error_message', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=5088,
serialized_end=5162,
)
_METRICMETAPROTO_PROPERTIESENTRY = _descriptor.Descriptor(
name='PropertiesEntry',
full_name='ai_flow.MetricMetaProto.PropertiesEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='ai_flow.MetricMetaProto.PropertiesEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='ai_flow.MetricMetaProto.PropertiesEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=797,
serialized_end=846,
)
_METRICMETAPROTO = _descriptor.Descriptor(
name='MetricMetaProto',
full_name='ai_flow.MetricMetaProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='metric_name', full_name='ai_flow.MetricMetaProto.metric_name', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='metric_type', full_name='ai_flow.MetricMetaProto.metric_type', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='metric_desc', full_name='ai_flow.MetricMetaProto.metric_desc', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='project_name', full_name='ai_flow.MetricMetaProto.project_name', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='dataset_name', full_name='ai_flow.MetricMetaProto.dataset_name', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='model_name', full_name='ai_flow.MetricMetaProto.model_name', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='job_name', full_name='ai_flow.MetricMetaProto.job_name', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='start_time', full_name='ai_flow.MetricMetaProto.start_time', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='end_time', full_name='ai_flow.MetricMetaProto.end_time', index=8,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='uri', full_name='ai_flow.MetricMetaProto.uri', index=9,
number=10, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='tags', full_name='ai_flow.MetricMetaProto.tags', index=10,
number=11, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='properties', full_name='ai_flow.MetricMetaProto.properties', index=11,
number=12, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[_METRICMETAPROTO_PROPERTIESENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=5165,
serialized_end=5829,
)
_METRICSUMMARYPROTO = _descriptor.Descriptor(
name='MetricSummaryProto',
full_name='ai_flow.MetricSummaryProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='uuid', full_name='ai_flow.MetricSummaryProto.uuid', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='metric_name', full_name='ai_flow.MetricSummaryProto.metric_name', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='metric_key', full_name='ai_flow.MetricSummaryProto.metric_key', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='metric_value', full_name='ai_flow.MetricSummaryProto.metric_value', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='metric_timestamp', full_name='ai_flow.MetricSummaryProto.metric_timestamp', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='model_version', full_name='ai_flow.MetricSummaryProto.model_version', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='job_execution_id', full_name='ai_flow.MetricSummaryProto.job_execution_id', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=5832,
serialized_end=6183,
)
_SCHEMAPROTO.fields_by_name['type_list'].enum_type = _DATATYPEPROTO
_DATASETPROTO_PROPERTIESENTRY.containing_type = _DATASETPROTO
_DATASETPROTO.fields_by_name['properties'].message_type = _DATASETPROTO_PROPERTIESENTRY
_DATASETPROTO.fields_by_name['schema'].message_type = _SCHEMAPROTO
_DATASETPROTO.fields_by_name['data_format'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_DATASETPROTO.fields_by_name['uri'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_DATASETPROTO.fields_by_name['description'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_DATASETPROTO.fields_by_name['create_time'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_DATASETPROTO.fields_by_name['update_time'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_DATASETPROTO.fields_by_name['catalog_name'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_DATASETPROTO.fields_by_name['catalog_type'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_DATASETPROTO.fields_by_name['catalog_database'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_DATASETPROTO.fields_by_name['catalog_connection_uri'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_DATASETPROTO.fields_by_name['catalog_table'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_MODELRELATIONPROTO.fields_by_name['project_id'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_MODELPROTO.fields_by_name['project_id'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_MODELPROTO.fields_by_name['model_desc'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_MODELVERSIONRELATIONPROTO.fields_by_name['version'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_MODELVERSIONRELATIONPROTO.fields_by_name['model_id'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_MODELVERSIONRELATIONPROTO.fields_by_name['project_snapshot_id'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_MODELVERSIONPROTO.fields_by_name['version'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_MODELVERSIONPROTO.fields_by_name['model_id'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_MODELVERSIONPROTO.fields_by_name['project_snapshot_id'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_MODELVERSIONPROTO.fields_by_name['model_path'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_MODELVERSIONPROTO.fields_by_name['model_type'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_MODELVERSIONPROTO.fields_by_name['version_desc'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_MODELVERSIONPROTO.fields_by_name['current_stage'].enum_type = _MODELVERSIONSTAGE
_JOBPROTO_PROPERTIESENTRY.containing_type = _JOBPROTO
_JOBPROTO.fields_by_name['workflow_execution_id'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_JOBPROTO.fields_by_name['job_state'].enum_type = _STATEPROTO
_JOBPROTO.fields_by_name['properties'].message_type = _JOBPROTO_PROPERTIESENTRY
_JOBPROTO.fields_by_name['job_id'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_JOBPROTO.fields_by_name['start_time'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_JOBPROTO.fields_by_name['end_time'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_JOBPROTO.fields_by_name['log_uri'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_JOBPROTO.fields_by_name['signature'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_JOBPROTO.fields_by_name['workflow_execution'].message_type = _WORKFLOWEXECUTIONPROTO
_WORKFLOWPROTO_PROPERTIESENTRY.containing_type = _WORKFLOWPROTO
_WORKFLOWPROTO.fields_by_name['workflow_json'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_WORKFLOWPROTO.fields_by_name['properties'].message_type = _WORKFLOWPROTO_PROPERTIESENTRY
_WORKFLOWEXECUTIONPROTO_PROPERTIESENTRY.containing_type = _WORKFLOWEXECUTIONPROTO
_WORKFLOWEXECUTIONPROTO.fields_by_name['project_id'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_WORKFLOWEXECUTIONPROTO.fields_by_name['execution_state'].enum_type = _STATEPROTO
_WORKFLOWEXECUTIONPROTO.fields_by_name['properties'].message_type = _WORKFLOWEXECUTIONPROTO_PROPERTIESENTRY
_WORKFLOWEXECUTIONPROTO.fields_by_name['start_time'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_WORKFLOWEXECUTIONPROTO.fields_by_name['end_time'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_WORKFLOWEXECUTIONPROTO.fields_by_name['log_uri'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_WORKFLOWEXECUTIONPROTO.fields_by_name['workflow_json'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_WORKFLOWEXECUTIONPROTO.fields_by_name['signature'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_WORKFLOWEXECUTIONPROTO.fields_by_name['workflow'].message_type = _WORKFLOWPROTO
_PROJECTPROTO_PROPERTIESENTRY.containing_type = _PROJECTPROTO
_PROJECTPROTO.fields_by_name['properties'].message_type = _PROJECTPROTO_PROPERTIESENTRY
_PROJECTPROTO.fields_by_name['uri'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_WORKFLOWMETAPROTO_PROPERTIESENTRY.containing_type = _WORKFLOWMETAPROTO
_WORKFLOWMETAPROTO.fields_by_name['project_id'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_WORKFLOWMETAPROTO.fields_by_name['properties'].message_type = _WORKFLOWMETAPROTO_PROPERTIESENTRY
_WORKFLOWMETAPROTO.fields_by_name['create_time'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_WORKFLOWMETAPROTO.fields_by_name['update_time'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_ARTIFACTPROTO_PROPERTIESENTRY.containing_type = _ARTIFACTPROTO
_ARTIFACTPROTO.fields_by_name['properties'].message_type = _ARTIFACTPROTO_PROPERTIESENTRY
_ARTIFACTPROTO.fields_by_name['artifact_type'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_ARTIFACTPROTO.fields_by_name['uri'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_ARTIFACTPROTO.fields_by_name['description'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_ARTIFACTPROTO.fields_by_name['create_time'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_ARTIFACTPROTO.fields_by_name['update_time'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_REGISTEREDMODELPARAM.fields_by_name['model_name'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_REGISTEREDMODELPARAM.fields_by_name['model_desc'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_MODELVERSIONPARAM.fields_by_name['model_path'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_MODELVERSIONPARAM.fields_by_name['model_type'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_MODELVERSIONPARAM.fields_by_name['version_desc'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_MODELVERSIONPARAM.fields_by_name['current_stage'].enum_type = _MODELVERSIONSTAGE
_MODELMETAPARAM.fields_by_name['model_name'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_MODELMETAPARAM.fields_by_name['model_version'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_REGISTEREDMODELMETA.fields_by_name['model_desc'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_MODELVERSIONMETA.fields_by_name['model_path'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_MODELVERSIONMETA.fields_by_name['model_type'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_MODELVERSIONMETA.fields_by_name['version_desc'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_MODELVERSIONMETA.fields_by_name['version_status'].enum_type = _MODELVERSIONSTATUS
_MODELVERSIONMETA.fields_by_name['current_stage'].enum_type = _MODELVERSIONSTAGE
_REGISTEREDMODELDETAIL.fields_by_name['registered_model'].message_type = _REGISTEREDMODELMETA
_REGISTEREDMODELDETAIL.fields_by_name['latest_model_version'].message_type = _MODELVERSIONMETA
_REGISTEREDMODELMETAS.fields_by_name['registered_models'].message_type = _REGISTEREDMODELMETA
_RESULTPROTO.fields_by_name['status'].enum_type = _STATUSPROTO
_METRICMETAPROTO_PROPERTIESENTRY.containing_type = _METRICMETAPROTO
_METRICMETAPROTO.fields_by_name['metric_name'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_METRICMETAPROTO.fields_by_name['metric_type'].enum_type = _METRICTYPEPROTO
_METRICMETAPROTO.fields_by_name['metric_desc'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_METRICMETAPROTO.fields_by_name['project_name'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_METRICMETAPROTO.fields_by_name['dataset_name'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_METRICMETAPROTO.fields_by_name['model_name'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_METRICMETAPROTO.fields_by_name['job_name'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_METRICMETAPROTO.fields_by_name['start_time'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_METRICMETAPROTO.fields_by_name['end_time'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_METRICMETAPROTO.fields_by_name['uri'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_METRICMETAPROTO.fields_by_name['tags'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_METRICMETAPROTO.fields_by_name['properties'].message_type = _METRICMETAPROTO_PROPERTIESENTRY
_METRICSUMMARYPROTO.fields_by_name['metric_name'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_METRICSUMMARYPROTO.fields_by_name['metric_key'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_METRICSUMMARYPROTO.fields_by_name['metric_value'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_METRICSUMMARYPROTO.fields_by_name['metric_timestamp'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_METRICSUMMARYPROTO.fields_by_name['model_version'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_METRICSUMMARYPROTO.fields_by_name['job_execution_id'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
DESCRIPTOR.message_types_by_name['SchemaProto'] = _SCHEMAPROTO
DESCRIPTOR.message_types_by_name['DatasetProto'] = _DATASETPROTO
DESCRIPTOR.message_types_by_name['ModelRelationProto'] = _MODELRELATIONPROTO
DESCRIPTOR.message_types_by_name['ModelProto'] = _MODELPROTO
DESCRIPTOR.message_types_by_name['ModelVersionRelationProto'] = _MODELVERSIONRELATIONPROTO
DESCRIPTOR.message_types_by_name['ModelVersionProto'] = _MODELVERSIONPROTO
DESCRIPTOR.message_types_by_name['JobProto'] = _JOBPROTO
DESCRIPTOR.message_types_by_name['WorkflowProto'] = _WORKFLOWPROTO
DESCRIPTOR.message_types_by_name['WorkflowExecutionProto'] = _WORKFLOWEXECUTIONPROTO
DESCRIPTOR.message_types_by_name['ProjectProto'] = _PROJECTPROTO
DESCRIPTOR.message_types_by_name['WorkflowMetaProto'] = _WORKFLOWMETAPROTO
DESCRIPTOR.message_types_by_name['ArtifactProto'] = _ARTIFACTPROTO
DESCRIPTOR.message_types_by_name['RegisteredModelParam'] = _REGISTEREDMODELPARAM
DESCRIPTOR.message_types_by_name['ModelVersionParam'] = _MODELVERSIONPARAM
DESCRIPTOR.message_types_by_name['ModelMetaParam'] = _MODELMETAPARAM
DESCRIPTOR.message_types_by_name['Response'] = _RESPONSE
DESCRIPTOR.message_types_by_name['RegisteredModelMeta'] = _REGISTEREDMODELMETA
DESCRIPTOR.message_types_by_name['ModelVersionMeta'] = _MODELVERSIONMETA
DESCRIPTOR.message_types_by_name['RegisteredModelDetail'] = _REGISTEREDMODELDETAIL
DESCRIPTOR.message_types_by_name['RegisteredModelMetas'] = _REGISTEREDMODELMETAS
DESCRIPTOR.message_types_by_name['ResultProto'] = _RESULTPROTO
DESCRIPTOR.message_types_by_name['MetricMetaProto'] = _METRICMETAPROTO
DESCRIPTOR.message_types_by_name['MetricSummaryProto'] = _METRICSUMMARYPROTO
DESCRIPTOR.enum_types_by_name['ReturnCode'] = _RETURNCODE
DESCRIPTOR.enum_types_by_name['StatusProto'] = _STATUSPROTO
DESCRIPTOR.enum_types_by_name['DataTypeProto'] = _DATATYPEPROTO
DESCRIPTOR.enum_types_by_name['StateProto'] = _STATEPROTO
DESCRIPTOR.enum_types_by_name['ExecutionMode'] = _EXECUTIONMODE
DESCRIPTOR.enum_types_by_name['ModelVersionStatus'] = _MODELVERSIONSTATUS
DESCRIPTOR.enum_types_by_name['ModelVersionStage'] = _MODELVERSIONSTAGE
DESCRIPTOR.enum_types_by_name['MetricTypeProto'] = _METRICTYPEPROTO
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
SchemaProto = _reflection.GeneratedProtocolMessageType('SchemaProto', (_message.Message,), {
'DESCRIPTOR' : _SCHEMAPROTO,
'__module__' : 'message_pb2'
# @@protoc_insertion_point(class_scope:ai_flow.SchemaProto)
})
_sym_db.RegisterMessage(SchemaProto)
DatasetProto = _reflection.GeneratedProtocolMessageType('DatasetProto', (_message.Message,), {
'PropertiesEntry' : _reflection.GeneratedProtocolMessageType('PropertiesEntry', (_message.Message,), {
'DESCRIPTOR' : _DATASETPROTO_PROPERTIESENTRY,
'__module__' : 'message_pb2'
# @@protoc_insertion_point(class_scope:ai_flow.DatasetProto.PropertiesEntry)
})
,
'DESCRIPTOR' : _DATASETPROTO,
'__module__' : 'message_pb2'
# @@protoc_insertion_point(class_scope:ai_flow.DatasetProto)
})
_sym_db.RegisterMessage(DatasetProto)
_sym_db.RegisterMessage(DatasetProto.PropertiesEntry)
ModelRelationProto = _reflection.GeneratedProtocolMessageType('ModelRelationProto', (_message.Message,), {
'DESCRIPTOR' : _MODELRELATIONPROTO,
'__module__' : 'message_pb2'
# @@protoc_insertion_point(class_scope:ai_flow.ModelRelationProto)
})
_sym_db.RegisterMessage(ModelRelationProto)
ModelProto = _reflection.GeneratedProtocolMessageType('ModelProto', (_message.Message,), {
'DESCRIPTOR' : _MODELPROTO,
'__module__' : 'message_pb2'
# @@protoc_insertion_point(class_scope:ai_flow.ModelProto)
})
_sym_db.RegisterMessage(ModelProto)
ModelVersionRelationProto = _reflection.GeneratedProtocolMessageType('ModelVersionRelationProto', (_message.Message,), {
'DESCRIPTOR' : _MODELVERSIONRELATIONPROTO,
'__module__' : 'message_pb2'
# @@protoc_insertion_point(class_scope:ai_flow.ModelVersionRelationProto)
})
_sym_db.RegisterMessage(ModelVersionRelationProto)
ModelVersionProto = _reflection.GeneratedProtocolMessageType('ModelVersionProto', (_message.Message,), {
'DESCRIPTOR' : _MODELVERSIONPROTO,
'__module__' : 'message_pb2'
# @@protoc_insertion_point(class_scope:ai_flow.ModelVersionProto)
})
_sym_db.RegisterMessage(ModelVersionProto)
JobProto = _reflection.GeneratedProtocolMessageType('JobProto', (_message.Message,), {
'PropertiesEntry' : _reflection.GeneratedProtocolMessageType('PropertiesEntry', (_message.Message,), {
'DESCRIPTOR' : _JOBPROTO_PROPERTIESENTRY,
'__module__' : 'message_pb2'
# @@protoc_insertion_point(class_scope:ai_flow.JobProto.PropertiesEntry)
})
,
'DESCRIPTOR' : _JOBPROTO,
'__module__' : 'message_pb2'
# @@protoc_insertion_point(class_scope:ai_flow.JobProto)
})
_sym_db.RegisterMessage(JobProto)
_sym_db.RegisterMessage(JobProto.PropertiesEntry)
WorkflowProto = _reflection.GeneratedProtocolMessageType('WorkflowProto', (_message.Message,), {
'PropertiesEntry' : _reflection.GeneratedProtocolMessageType('PropertiesEntry', (_message.Message,), {
'DESCRIPTOR' : _WORKFLOWPROTO_PROPERTIESENTRY,
'__module__' : 'message_pb2'
# @@protoc_insertion_point(class_scope:ai_flow.WorkflowProto.PropertiesEntry)
})
,
'DESCRIPTOR' : _WORKFLOWPROTO,
'__module__' : 'message_pb2'
# @@protoc_insertion_point(class_scope:ai_flow.WorkflowProto)
})
_sym_db.RegisterMessage(WorkflowProto)
_sym_db.RegisterMessage(WorkflowProto.PropertiesEntry)
WorkflowExecutionProto = _reflection.GeneratedProtocolMessageType('WorkflowExecutionProto', (_message.Message,), {
'PropertiesEntry' : _reflection.GeneratedProtocolMessageType('PropertiesEntry', (_message.Message,), {
'DESCRIPTOR' : _WORKFLOWEXECUTIONPROTO_PROPERTIESENTRY,
'__module__' : 'message_pb2'
# @@protoc_insertion_point(class_scope:ai_flow.WorkflowExecutionProto.PropertiesEntry)
})
,
'DESCRIPTOR' : _WORKFLOWEXECUTIONPROTO,
'__module__' : 'message_pb2'
# @@protoc_insertion_point(class_scope:ai_flow.WorkflowExecutionProto)
})
_sym_db.RegisterMessage(WorkflowExecutionProto)
_sym_db.RegisterMessage(WorkflowExecutionProto.PropertiesEntry)
ProjectProto = _reflection.GeneratedProtocolMessageType('ProjectProto', (_message.Message,), {
'PropertiesEntry' : _reflection.GeneratedProtocolMessageType('PropertiesEntry', (_message.Message,), {
'DESCRIPTOR' : _PROJECTPROTO_PROPERTIESENTRY,
'__module__' : 'message_pb2'
# @@protoc_insertion_point(class_scope:ai_flow.ProjectProto.PropertiesEntry)
})
,
'DESCRIPTOR' : _PROJECTPROTO,
'__module__' : 'message_pb2'
# @@protoc_insertion_point(class_scope:ai_flow.ProjectProto)
})
_sym_db.RegisterMessage(ProjectProto)
_sym_db.RegisterMessage(ProjectProto.PropertiesEntry)
WorkflowMetaProto = _reflection.GeneratedProtocolMessageType('WorkflowMetaProto', (_message.Message,), {
'PropertiesEntry' : _reflection.GeneratedProtocolMessageType('PropertiesEntry', (_message.Message,), {
'DESCRIPTOR' : _WORKFLOWMETAPROTO_PROPERTIESENTRY,
'__module__' : 'message_pb2'
# @@protoc_insertion_point(class_scope:ai_flow.WorkflowMetaProto.PropertiesEntry)
})
,
'DESCRIPTOR' : _WORKFLOWMETAPROTO,
'__module__' : 'message_pb2'
# @@protoc_insertion_point(class_scope:ai_flow.WorkflowMetaProto)
})
_sym_db.RegisterMessage(WorkflowMetaProto)
_sym_db.RegisterMessage(WorkflowMetaProto.PropertiesEntry)
ArtifactProto = _reflection.GeneratedProtocolMessageType('ArtifactProto', (_message.Message,), {
'PropertiesEntry' : _reflection.GeneratedProtocolMessageType('PropertiesEntry', (_message.Message,), {
'DESCRIPTOR' : _ARTIFACTPROTO_PROPERTIESENTRY,
'__module__' : 'message_pb2'
# @@protoc_insertion_point(class_scope:ai_flow.ArtifactProto.PropertiesEntry)
})
,
'DESCRIPTOR' : _ARTIFACTPROTO,
'__module__' : 'message_pb2'
# @@protoc_insertion_point(class_scope:ai_flow.ArtifactProto)
})
_sym_db.RegisterMessage(ArtifactProto)
_sym_db.RegisterMessage(ArtifactProto.PropertiesEntry)
RegisteredModelParam = _reflection.GeneratedProtocolMessageType('RegisteredModelParam', (_message.Message,), {
'DESCRIPTOR' : _REGISTEREDMODELPARAM,
'__module__' : 'message_pb2'
# @@protoc_insertion_point(class_scope:ai_flow.RegisteredModelParam)
})
_sym_db.RegisterMessage(RegisteredModelParam)
ModelVersionParam = _reflection.GeneratedProtocolMessageType('ModelVersionParam', (_message.Message,), {
'DESCRIPTOR' : _MODELVERSIONPARAM,
'__module__' : 'message_pb2'
# @@protoc_insertion_point(class_scope:ai_flow.ModelVersionParam)
})
_sym_db.RegisterMessage(ModelVersionParam)
ModelMetaParam = _reflection.GeneratedProtocolMessageType('ModelMetaParam', (_message.Message,), {
'DESCRIPTOR' : _MODELMETAPARAM,
'__module__' : 'message_pb2'
# @@protoc_insertion_point(class_scope:ai_flow.ModelMetaParam)
})
_sym_db.RegisterMessage(ModelMetaParam)
Response = _reflection.GeneratedProtocolMessageType('Response', (_message.Message,), {
'DESCRIPTOR' : _RESPONSE,
'__module__' : 'message_pb2'
# @@protoc_insertion_point(class_scope:ai_flow.Response)
})
_sym_db.RegisterMessage(Response)
RegisteredModelMeta = _reflection.GeneratedProtocolMessageType('RegisteredModelMeta', (_message.Message,), {
'DESCRIPTOR' : _REGISTEREDMODELMETA,
'__module__' : 'message_pb2'
# @@protoc_insertion_point(class_scope:ai_flow.RegisteredModelMeta)
})
_sym_db.RegisterMessage(RegisteredModelMeta)
ModelVersionMeta = _reflection.GeneratedProtocolMessageType('ModelVersionMeta', (_message.Message,), {
'DESCRIPTOR' : _MODELVERSIONMETA,
'__module__' : 'message_pb2'
# @@protoc_insertion_point(class_scope:ai_flow.ModelVersionMeta)
})
_sym_db.RegisterMessage(ModelVersionMeta)
RegisteredModelDetail = _reflection.GeneratedProtocolMessageType('RegisteredModelDetail', (_message.Message,), {
'DESCRIPTOR' : _REGISTEREDMODELDETAIL,
'__module__' : 'message_pb2'
# @@protoc_insertion_point(class_scope:ai_flow.RegisteredModelDetail)
})
_sym_db.RegisterMessage(RegisteredModelDetail)
RegisteredModelMetas = _reflection.GeneratedProtocolMessageType('RegisteredModelMetas', (_message.Message,), {
'DESCRIPTOR' : _REGISTEREDMODELMETAS,
'__module__' : 'message_pb2'
# @@protoc_insertion_point(class_scope:ai_flow.RegisteredModelMetas)
})
_sym_db.RegisterMessage(RegisteredModelMetas)
ResultProto = _reflection.GeneratedProtocolMessageType('ResultProto', (_message.Message,), {
'DESCRIPTOR' : _RESULTPROTO,
'__module__' : 'message_pb2'
# @@protoc_insertion_point(class_scope:ai_flow.ResultProto)
})
_sym_db.RegisterMessage(ResultProto)
MetricMetaProto = _reflection.GeneratedProtocolMessageType('MetricMetaProto', (_message.Message,), {
'PropertiesEntry' : _reflection.GeneratedProtocolMessageType('PropertiesEntry', (_message.Message,), {
'DESCRIPTOR' : _METRICMETAPROTO_PROPERTIESENTRY,
'__module__' : 'message_pb2'
# @@protoc_insertion_point(class_scope:ai_flow.MetricMetaProto.PropertiesEntry)
})
,
'DESCRIPTOR' : _METRICMETAPROTO,
'__module__' : 'message_pb2'
# @@protoc_insertion_point(class_scope:ai_flow.MetricMetaProto)
})
_sym_db.RegisterMessage(MetricMetaProto)
_sym_db.RegisterMessage(MetricMetaProto.PropertiesEntry)
MetricSummaryProto = _reflection.GeneratedProtocolMessageType('MetricSummaryProto', (_message.Message,), {
'DESCRIPTOR' : _METRICSUMMARYPROTO,
'__module__' : 'message_pb2'
# @@protoc_insertion_point(class_scope:ai_flow.MetricSummaryProto)
})
_sym_db.RegisterMessage(MetricSummaryProto)
DESCRIPTOR._options = None
_DATASETPROTO_PROPERTIESENTRY._options = None
_JOBPROTO_PROPERTIESENTRY._options = None
_WORKFLOWPROTO_PROPERTIESENTRY._options = None
_WORKFLOWEXECUTIONPROTO_PROPERTIESENTRY._options = None
_PROJECTPROTO_PROPERTIESENTRY._options = None
_WORKFLOWMETAPROTO_PROPERTIESENTRY._options = None
_ARTIFACTPROTO_PROPERTIESENTRY._options = None
_METRICMETAPROTO_PROPERTIESENTRY._options = None
# @@protoc_insertion_point(module_scope)
| 47.256098
| 12,204
| 0.769274
|
98516f43efe574b98d1316ad642c574bba5ae7c2
| 3,654
|
py
|
Python
|
.history/DEBER_20210831114511.py
|
Alopezm5/PROYECTO-PARTE-1
|
bd7a8594edf08d41c6ca544cf6bac01ea4fcb684
|
[
"MIT"
] | null | null | null |
.history/DEBER_20210831114511.py
|
Alopezm5/PROYECTO-PARTE-1
|
bd7a8594edf08d41c6ca544cf6bac01ea4fcb684
|
[
"MIT"
] | null | null | null |
.history/DEBER_20210831114511.py
|
Alopezm5/PROYECTO-PARTE-1
|
bd7a8594edf08d41c6ca544cf6bac01ea4fcb684
|
[
"MIT"
] | null | null | null |
class Nomina:
def __init__(self,nom="",ruc=0,dire="",tele=0,ciud="",tipEmpr="",email="",estado="",profe="",dep=""):#3
self.nombre=nom
self.ruc=ruc
self.direccion=dire
self.telefono=tele
self.ciudad=ciud
self.tipoEmpresa=tipEmpr
self.correo=email
self.estadocivil=estado
self.profesion=profe
self.departamento=dep
class Empresa(Nomina):
def datosEmpresa(self):#3
self.nombre=input("Ingresar nombre de la empresa: ")
self.ruc=int(input("Ingresar ruc de la empresa: "))
self.direccion=input("Ingresar la direccion de la empresa: ")
self.telefono=int(input("Ingresar el numero de telefono de la empresa: "))
self.ciudad=input("Ingresar ciudad donde esta la empresa: ")
self.tipoEmpresa=input("Ingresar tipo de empresa publica o privada: ")
def mostrarEmpresa(self):
print("Datos de la Empresa")
print("La empresa "{}"\n De RUC #{} \n Está ubicada en {}\n Se puede comunicar al #{}\n Está empresa esta en la ciudad de "{}"\n Es una entidad "{}"".format(self.nombre,self.ruc,self.direccion, self.telefono,self.ciudad, self.tipoEmpresa))
class Departamento(Empleado):
def departa(self):
self.departamento=input("Ingresar el departamento al que pertenece el empleado: ")
def mostrarDeparta(self):
print("El empleado pertenece al departamento de: "{}"".format(self.departamento))
class Empleado(Nomina):
def empleado(self):
self.nombre=input("Ingresar nombre del empleado: ")
self.cedula=int(input("Ingresar numero de cedula: "))
self.direccion=input("Ingresar la direccion del empleado: ")
self.telefono=int(input("Ingresar numero de contacto del empleado: "))
self.correo=input("Ingresar correo personal del empleado: ")
def empleadoObrero(self):
self.estadocivil=input("Ingresar estado civil del empleado: ")
def empleadoOficina(self):#falta dos atributo como definicion de oficina
self.profesion=input("Ingresar profesion del empleado: ")
def mostrarempleado(self):
print("El empleado: {} con # de C.I. {} \n Con direccion {}, y numero de contacto{}\n Y correo {} \n".format(self.nombre,self.cedula,self.direccion,sele))
if eleccion==1:
print(self.estadocivil)
elif eleccion==2:
print(self.profesion)
# class Pagos():
# def __init__(self):
# pass
# def pagoNormal(self, valhora,hoesti,hotraba, desc, desper):
# self.valorhora=valhora
# self.horaestimada=hoesti
# self.horastrabajadas=hotraba
# self.descuentos=desc
# self.permisos=desper
# def pagoExtra(self, valhora,hoesti,hotraba,incentivos):
# self.valorhora=valhora
# self.horaestimada=hoesti
# self.horastrabajadas=hotraba
# self.bono=incentivos
# def Nomina(self, nom, valhora,hoesti,hotraba, desc, desper,incentivos):#faltan 8 atributos incluir cosas del empleado y sobretiempo
# self.nombre= nom
# self.valorhora=valhora
# self.horaestimada=hoesti
# self.horastrabajadas=hotraba
# self.descuentos=desc
# self.permisos=desper
# self.bono=incentivos
nom=Nomina()
emp=Empresa()
emp.datosEmpresa()
emp.mostrarEmpresa()
# emple=Empleado()
# emple.empleado()
# eleccion=int(input("Va a ingresar un empleado tipo 1. Obreo o 2.Oficina: "))
# if eleccion==1:
# emple.empleadoObrero()
# elif eleccion==2:
# emple.empleadoOficina()
# else:
# print("No selecciono el tipo de empleado")
# emple.mostrarempleado()
| 37.285714
| 247
| 0.655993
|
84db18c5836a151e089149994b7c314943b1d878
| 3,717
|
py
|
Python
|
gallery/oauth2backend.py
|
cccs-is/callisto
|
7e1da3d12a8da0d2b49d79c02399cc4c0accccce
|
[
"MIT"
] | null | null | null |
gallery/oauth2backend.py
|
cccs-is/callisto
|
7e1da3d12a8da0d2b49d79c02399cc4c0accccce
|
[
"MIT"
] | null | null | null |
gallery/oauth2backend.py
|
cccs-is/callisto
|
7e1da3d12a8da0d2b49d79c02399cc4c0accccce
|
[
"MIT"
] | null | null | null |
import jwt
import requests
from cryptography.x509 import load_pem_x509_certificate
from cryptography.hazmat.backends import default_backend
from django.core.exceptions import PermissionDenied
from django.contrib.auth import get_user_model
from django.conf import settings
class OAuth2Authentication:
"""
Token-based authentication backend.
The backend assumes that the actual authentication and user management is done by some other actor (such as
a reverse proxy) which passes in OAuth2 token in the 'Authorization' header, using type AUTHORIZATION_TYPE.
The flow is tested with Azure Active Domain as OAuth2 provider. Other OAuth providers might need code adjustment
if they supply public signatures using different paradigm.
"""
AUTHORIZATION_TYPE = 'Bearer '
def oauth_provider_public_certificate(self, user_token):
"""
The method expects to get list of keys used by matching 'kid' and 'x5t' token claims
from a well-known URL supplied by the OAuth provider.
The keys are then wrapped to form standard public certificate.
"""
token_header = jwt.get_unverified_header(user_token)
token_kid = token_header.get('kid')
token_x5t = token_header.get('x5t')
url = settings.OAUTH_PUBLIC_KEYS_URL
reply = requests.get(url=url, timeout=10.0)
reply_data = reply.json()
for key in reply_data.get('keys'):
if key.get('kid') == token_kid and key.get('x5t') == token_x5t:
cert_body = key.get('x5c')[0]
return '-----BEGIN CERTIFICATE-----\n' + cert_body + '\n-----END CERTIFICATE-----\n'
return None
def verify_and_decode(self, user_token):
cert_str = self.oauth_provider_public_certificate(user_token)
cert_obj = load_pem_x509_certificate(cert_str.encode('utf-8'), default_backend())
public_key = cert_obj.public_key()
audience = settings.OAUTH_TOKEN_AUDIENCE
# Ignore expiration date for now until we figure out how to either get refresh tokens
# or make environment update them for us:
verify_options = {'verify_exp': False}
try:
return jwt.decode(user_token, public_key, algorithms=['RS256'], audience=audience, options=verify_options)
except jwt.exceptions.InvalidTokenError as e:
print('Exception: ' + repr(e))
return None
def authenticate(self, request):
access_token = request.headers.get('Authorization')
if not access_token:
return None
if access_token.startswith(self.AUTHORIZATION_TYPE):
access_token = access_token[len(self.AUTHORIZATION_TYPE):]
decoded = self.verify_and_decode(access_token)
if not decoded:
return None
user_id = decoded.get('oid')
user_model = get_user_model()
try:
user = user_model.objects.get(username=user_id)
if not user.is_active:
raise PermissionDenied()
except user_model.DoesNotExist:
user = user_model(username=user_id)
user.name = decoded.get('name', '')
user.first_name = decoded.get('given_name', '')
user.last_name = decoded.get('family_name', '')
user.email = decoded.get('unique_name') # TODO check if with proper scope we can get e-mail claim
print('Creating user: ', user.first_name, ' ', user.last_name )
user.save()
return user
def get_user(self, user_id):
user_model = get_user_model()
try:
return user_model.objects.get(pk=user_id)
except user_model.DoesNotExist:
return None
| 42.238636
| 118
| 0.663438
|
90dc85de5cda62816f4c75fb4c0d66d248bd9cfa
| 898
|
py
|
Python
|
templates/proj_template.py
|
eric373/ml-py
|
23d5e5685c0e5a09fc9fc1dbe50572562ee4cb32
|
[
"MIT"
] | null | null | null |
templates/proj_template.py
|
eric373/ml-py
|
23d5e5685c0e5a09fc9fc1dbe50572562ee4cb32
|
[
"MIT"
] | null | null | null |
templates/proj_template.py
|
eric373/ml-py
|
23d5e5685c0e5a09fc9fc1dbe50572562ee4cb32
|
[
"MIT"
] | null | null | null |
# Python Project Template
# 1. Prepare Problem
# a) Load libraries
# b) Load dataset
# 2. Summarize Data
# a) Descriptive statistics
# - Note the scale of the attributes for normalizing
# - Note features/variables that are correlated to be removed
# b) Data visualizations
# - Consider standardizing
# - Note exponential/bi-modal distributions
# - Note skewed Gaussian distributions
# 3. Prepare Data
# a) Data Cleaning
# b) Feature Selection
# c) Data Transforms
# - Does transforming affect the algorithm's performance?
# 4. Evaluate Algorithms
# a) Split-out validation dataset
# b) Test options and evaluation metric
# c) Spot Check Algorithms
# d) Compare Algorithms
# 5. Improve Accuracy
# a) Algorithm Tuning
# b) Ensembles
# 6. Finalize Model
# a) Predictions on validation dataset
# b) Create standalone model on entire training dataset
# c) Save model for later use
| 26.411765
| 64
| 0.739421
|
f863801d5bda8210b36af1f2685ad117a828af39
| 1,660
|
py
|
Python
|
wagtail_sb_material/models/branding.py
|
softbutterfly/softbutterfly-wagtail-materialize
|
8619166b5688d4a51b1a6e39a03e22661bc1a7ea
|
[
"BSD-3-Clause"
] | null | null | null |
wagtail_sb_material/models/branding.py
|
softbutterfly/softbutterfly-wagtail-materialize
|
8619166b5688d4a51b1a6e39a03e22661bc1a7ea
|
[
"BSD-3-Clause"
] | null | null | null |
wagtail_sb_material/models/branding.py
|
softbutterfly/softbutterfly-wagtail-materialize
|
8619166b5688d4a51b1a6e39a03e22661bc1a7ea
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- encoding: utf-8 -*-
from django.db import models
from django.utils.translation import ugettext_lazy as _
from wagtail.core.fields import StreamField
from wagtail.admin.edit_handlers import FieldPanel
from wagtail.admin.edit_handlers import MultiFieldPanel
from wagtail.admin.edit_handlers import StreamFieldPanel
from wagtail.images.edit_handlers import ImageChooserPanel
from wagtail.snippets.models import register_snippet
from taggit.managers import TaggableManager
from .base import HTMLAttributes
@register_snippet
class BrandLogo(models.Model):
name = models.CharField(
_("name"),
max_length=32,
)
attributes = StreamField(
HTMLAttributes(),
verbose_name=_("Attributes"),
blank=True,
)
image = models.ForeignKey(
'wagtailimages.Image',
verbose_name=_("image"),
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
text = models.CharField(
_("Text"),
max_length=255,
null=True,
blank=True,
)
tags = TaggableManager(blank=True)
panels = [
FieldPanel('name'),
MultiFieldPanel(
[
StreamFieldPanel('attributes'),
ImageChooserPanel('image'),
FieldPanel('text'),
],
_("Brand")
),
FieldPanel('tags'),
]
class Meta:
verbose_name = _("Brand logo")
verbose_name_plural = _("Brand logos")
def __str__(self):
return self.name or "Brand logo"
"""
<a id="logo-container" href="#" class="brand-logo">
Logo
</a>
"""
| 21.558442
| 58
| 0.618072
|
8497902a6caa723701ee40d225db35a8da987ff4
| 324
|
py
|
Python
|
array/643_maximum_average_subarray_i.py
|
shawlu95/Algorithm-Toolbox
|
b6c7b2228d8e70e0842e0bad607533a2c8322cf0
|
[
"MIT"
] | null | null | null |
array/643_maximum_average_subarray_i.py
|
shawlu95/Algorithm-Toolbox
|
b6c7b2228d8e70e0842e0bad607533a2c8322cf0
|
[
"MIT"
] | null | null | null |
array/643_maximum_average_subarray_i.py
|
shawlu95/Algorithm-Toolbox
|
b6c7b2228d8e70e0842e0bad607533a2c8322cf0
|
[
"MIT"
] | 2
|
2020-02-07T20:49:02.000Z
|
2020-02-11T06:01:55.000Z
|
class Solution:
def findMaxAverage(self, nums: List[int], k: int) -> float:
# sliding window, O(N) time, O(1) space
globMax = tempMax = sum(nums[:k])
for i in range(k, len(nums)):
tempMax += (nums[i] - nums[i-k])
globMax = max(tempMax, globMax)
return globMax / k
| 36
| 63
| 0.546296
|
51dedae21277398cd8496854435176e789a6ca34
| 8,326
|
py
|
Python
|
HttpTriggerHumanPose/postprocessor.py
|
haru315/ovaas-backend-template
|
9672b30db537a1ab472ad8673e5f81f311f47376
|
[
"MIT"
] | 1
|
2021-05-12T06:56:47.000Z
|
2021-05-12T06:56:47.000Z
|
HttpTriggerHumanPose/postprocessor.py
|
haru315/ovaas-backend-template
|
9672b30db537a1ab472ad8673e5f81f311f47376
|
[
"MIT"
] | null | null | null |
HttpTriggerHumanPose/postprocessor.py
|
haru315/ovaas-backend-template
|
9672b30db537a1ab472ad8673e5f81f311f47376
|
[
"MIT"
] | 1
|
2022-02-08T02:25:40.000Z
|
2022-02-08T02:25:40.000Z
|
import cv2
import numpy as np
import math
from collections import defaultdict
from scipy.ndimage.filters import maximum_filter
import itertools
CocoPairs = [
(1, 2), (1, 5), (2, 3), (3, 4), (5, 6), (6, 7), (1, 8), (8, 9),
(9, 10), (1, 11), (11, 12), (12, 13), (1, 0), (0, 14), (14, 16),
(0, 15), (15, 17), (2, 16), (5, 17)] # len = 19
CocoPairsRender = CocoPairs[:-2]
CocoPairsNetwork = [
(12, 13), (20, 21), (14, 15), (16, 17), (22, 23), (24, 25), (0, 1),
(2, 3), (4, 5), (6, 7), (8, 9), (10, 11), (28, 29), (30, 31), (34, 35),
(32, 33), (36, 37), (18, 19), (26, 27)] # len = 19
CocoColors = [
[255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, 0], [170, 255, 0],
[85, 255, 0], [0, 255, 0], [0, 255, 85], [0, 255, 170], [0, 255, 255],
[0, 170, 255], [0, 85, 255], [0, 0, 255], [85, 0, 255], [170, 0, 255],
[255, 0, 255], [255, 0, 170], [255, 0, 85]]
NMS_Threshold = 0.1
InterMinAbove_Threshold = 6
Inter_Threashold = 0.1
Min_Subset_Cnt = 4
Min_Subset_Score = 0.5
Max_Human = 96
def non_max_suppression(heatmap, window_size=3, th=0.3):
heatmap[heatmap < th] = 0
part_th = heatmap*(heatmap == maximum_filter(heatmap,
footprint=np.ones((window_size, window_size))))
return part_th
def estimate_pose(heatMat, pafMat):
if heatMat.shape[2] == 19:
# transform from [height, width, n_parts] to [n_parts, height, width]
heatMat = np.rollaxis(heatMat, 2, 0)
if pafMat.shape[2] == 38:
# transform from [height, width, 2*n_pairs] to [2*n_pairs, height, width]
pafMat = np.rollaxis(pafMat, 2, 0)
_NMS_Threshold = max(np.average(heatMat) * 4.0, NMS_Threshold)
_NMS_Threshold = min(_NMS_Threshold, 0.3)
# _NMS_Threshold = 0.3
coords = [] # for each part index, it stores coordinates of candidates
for heatmap in heatMat[:-1]: # remove background
part_candidates = non_max_suppression(heatmap, 3, _NMS_Threshold)
coords.append(np.where(part_candidates >= _NMS_Threshold))
# all connections detected. no information about what humans they belong to
connection_all = []
for (idx1, idx2), (paf_x_idx, paf_y_idx) in zip(CocoPairs, CocoPairsNetwork):
connection = estimate_pose_pair(
coords, idx1, idx2, pafMat[paf_x_idx], pafMat[paf_y_idx])
connection_all.extend(connection)
conns_by_human = dict()
for idx, c in enumerate(connection_all):
# at first, all connections belong to different humans
conns_by_human['human_%d' % idx] = [c]
no_merge_cache = defaultdict(list)
empty_set = set()
while True:
is_merged = False
for h1, h2 in itertools.combinations(conns_by_human.keys(), 2):
if h1 == h2:
continue
if h2 in no_merge_cache[h1]:
continue
for c1, c2 in itertools.product(conns_by_human[h1], conns_by_human[h2]):
# if two humans share a part (same part idx and coordinates), merge those humans
if set(c1['uPartIdx']) & set(c2['uPartIdx']) != empty_set:
is_merged = True
# extend human1 connectios with human2 connections
conns_by_human[h1].extend(conns_by_human[h2])
conns_by_human.pop(h2) # delete human2
break
if is_merged:
no_merge_cache.pop(h1, None)
break
else:
no_merge_cache[h1].append(h2)
if not is_merged: # if no more mergings are possible, then break
break
# reject by subset count
conns_by_human = {h: conns for (
h, conns) in conns_by_human.items() if len(conns) >= Min_Subset_Cnt}
# reject by subset max score
conns_by_human = {h: conns for (h, conns) in conns_by_human.items() if max(
[conn['score'] for conn in conns]) >= Min_Subset_Score}
# list of humans
humans = [human_conns_to_human_parts(
human_conns, heatMat) for human_conns in conns_by_human.values()]
return humans
def estimate_pose_pair(coords, partIdx1, partIdx2, pafMatX, pafMatY):
connection_temp = [] # all possible connections
peak_coord1, peak_coord2 = coords[partIdx1], coords[partIdx2]
for idx1, (y1, x1) in enumerate(zip(peak_coord1[0], peak_coord1[1])):
for idx2, (y2, x2) in enumerate(zip(peak_coord2[0], peak_coord2[1])):
score, count = get_score(x1, y1, x2, y2, pafMatX, pafMatY)
if (partIdx1, partIdx2) in [(2, 3), (3, 4), (5, 6), (6, 7)]: # arms
if count < InterMinAbove_Threshold // 2 or score <= 0.0: # what's means?
continue
elif count < InterMinAbove_Threshold or score <= 0.0:
continue
connection_temp.append({
'score': score,
'coord_p1': (x1, y1),
'coord_p2': (x2, y2),
'idx': (idx1, idx2), # connection candidate identifier
'partIdx': (partIdx1, partIdx2),
'uPartIdx': ('{}-{}-{}'.format(x1, y1, partIdx1), '{}-{}-{}'.format(x2, y2, partIdx2))
})
connection = []
used_idx1, used_idx2 = [], []
# sort possible connections by score, from maximum to minimum
for conn_candidate in sorted(connection_temp, key=lambda x: x['score'], reverse=True):
# check not connected
if conn_candidate['idx'][0] in used_idx1 or conn_candidate['idx'][1] in used_idx2:
continue
connection.append(conn_candidate)
used_idx1.append(conn_candidate['idx'][0])
used_idx2.append(conn_candidate['idx'][1])
return connection
def get_score(x1, y1, x2, y2, pafMatX, pafMatY):
num_inter = 10
dx, dy = x2 - x1, y2 - y1
normVec = math.sqrt(dx ** 2 + dy ** 2)
if normVec < 1e-4:
return 0.0, 0
vx, vy = dx / normVec, dy / normVec
xs = np.arange(
x1, x2, dx / num_inter) if x1 != x2 else np.full((num_inter, ), x1)
ys = np.arange(
y1, y2, dy / num_inter) if y1 != y2 else np.full((num_inter, ), y1)
xs = (xs + 0.5).astype(np.int8)
ys = (ys + 0.5).astype(np.int8)
# without vectorization
pafXs = np.zeros(num_inter)
pafYs = np.zeros(num_inter)
for idx, (mx, my) in enumerate(zip(xs, ys)):
pafXs[idx] = pafMatX[my][mx]
pafYs[idx] = pafMatY[my][mx]
local_scores = pafXs * vx + pafYs * vy
thidxs = local_scores > Inter_Threashold
score = sum(local_scores * thidxs)
count = sum(thidxs)
return score, count
def human_conns_to_human_parts(human_conns, heatMat):
human_parts = defaultdict(lambda: None)
for conn in human_conns:
human_parts[conn['partIdx'][0]] = (
conn['partIdx'][0], # part index
(conn['coord_p1'][0] / heatMat.shape[2], conn['coord_p1']
[1] / heatMat.shape[1]), # relative coordinates
heatMat[conn['partIdx'][0], conn['coord_p1']
[1], conn['coord_p1'][0]] # score
)
human_parts[conn['partIdx'][1]] = (
conn['partIdx'][1],
(conn['coord_p2'][0] / heatMat.shape[2],
conn['coord_p2'][1] / heatMat.shape[1]),
heatMat[conn['partIdx'][1], conn['coord_p2']
[1], conn['coord_p2'][0]]
)
return human_parts
def draw_to_image(img, human_list):
img_copied = np.copy(img)
image_h, image_w = img_copied.shape[:2]
centers = {}
for human in human_list:
part_idxs = human.keys()
# draw point
for i in range(18):
if i not in part_idxs:
continue
part_coord = human[i][1]
center = (int(part_coord[0] * image_w + 0.5),
int(part_coord[1] * image_h + 0.5))
centers[i] = center
cv2.circle(img_copied, center, 2,
CocoColors[i], thickness=2, lineType=8, shift=0)
# draw line
for pair_order, pair in enumerate(CocoPairsRender):
if pair[0] not in part_idxs or pair[1] not in part_idxs:
continue
img_copied = cv2.line(
img_copied, centers[pair[0]], centers[pair[1]], CocoColors[pair_order], 2)
return img_copied
| 38.192661
| 102
| 0.573745
|
cefdc69e66931d94cf30af7343ce37366fe6f018
| 510
|
py
|
Python
|
examples/probabilistic_keypoint_estimation/demo_image.py
|
niqbal996/paz
|
f27205907367415d5b21f90e1a1d1d1ce598e889
|
[
"MIT"
] | 300
|
2020-10-29T08:02:05.000Z
|
2022-03-30T21:47:32.000Z
|
examples/probabilistic_keypoint_estimation/demo_image.py
|
albertofernandezvillan/paz
|
9fbd50b993f37e1e807297a29c6044c09967c9cc
|
[
"MIT"
] | 30
|
2020-10-29T12:40:32.000Z
|
2022-03-31T14:06:35.000Z
|
examples/probabilistic_keypoint_estimation/demo_image.py
|
albertofernandezvillan/paz
|
9fbd50b993f37e1e807297a29c6044c09967c9cc
|
[
"MIT"
] | 62
|
2020-10-29T12:34:13.000Z
|
2022-03-29T05:21:45.000Z
|
import argparse
from pipelines import DetectGMMKeypointNet2D
from paz.backend.image import show_image, load_image
description = 'Demo for visualizing uncertainty in probabilistic keypoints'
parser = argparse.ArgumentParser(description=description)
parser.add_argument('-p', '--path', type=str, help='Path to image')
args = parser.parse_args()
pipeline = DetectGMMKeypointNet2D()
image = load_image(args.path)
inferences = pipeline(image)
show_image(inferences['image'])
show_image(inferences['contours'][0])
| 34
| 75
| 0.801961
|
f96d1935f23c99f93af94b86210f1387289e0827
| 2,254
|
py
|
Python
|
SUAVE/SUAVE-2.5.0/trunk/SUAVE/Methods/Aerodynamics/Common/Fidelity_Zero/Helper_Functions/wave_drag_lift.py
|
Vinicius-Tanigawa/Undergraduate-Research-Project
|
e92372f07882484b127d7affe305eeec2238b8a9
|
[
"MIT"
] | null | null | null |
SUAVE/SUAVE-2.5.0/trunk/SUAVE/Methods/Aerodynamics/Common/Fidelity_Zero/Helper_Functions/wave_drag_lift.py
|
Vinicius-Tanigawa/Undergraduate-Research-Project
|
e92372f07882484b127d7affe305eeec2238b8a9
|
[
"MIT"
] | null | null | null |
SUAVE/SUAVE-2.5.0/trunk/SUAVE/Methods/Aerodynamics/Common/Fidelity_Zero/Helper_Functions/wave_drag_lift.py
|
Vinicius-Tanigawa/Undergraduate-Research-Project
|
e92372f07882484b127d7affe305eeec2238b8a9
|
[
"MIT"
] | null | null | null |
## @ingroup Methods-Aerodynamics-Common-Fidelity_Zero-Helper_Functions
# wave_drag_lift.py
#
# Created: Jun 2014, T. Macdonald
# Modified: Jul 2014, T. Macdonald
# Jan 2016, E. Botero
# ----------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------
import numpy as np
from SUAVE.Core import Data
# ----------------------------------------------------------------------
# Wave Drag Lift
# ----------------------------------------------------------------------
## @ingroup Methods-Aerodynamics-Common-Fidelity_Zero-Helper_Functions
def wave_drag_lift(conditions,configuration,wing):
"""Computes wave drag due to lift
Assumptions:
Simplified equations
Source:
http://adg.stanford.edu/aa241/drag/ssdragcalc.html
Inputs:
conditions.freestream.mach_number [Unitless]
conditions.aerodynamics.lift_coefficient [Unitless]
wing.total_length [m]
wing.areas.reference [m^2]
Outputs:
wave_drag_lift [Unitless]
Properties Used:
N/A
"""
# Unpack
freestream = conditions.freestream
total_length = wing.total_length
Sref = wing.areas.reference
# Conditions
Mc = freestream.mach_number * 1.0
# Length-wise aspect ratio
ARL = total_length**2/Sref
# Lift coefficient
if wing.vertical:
CL = np.zeros_like(conditions.aerodynamics.lift_coefficient)
else:
# get wing specific CL
CL = conditions.aerodynamics.lift_breakdown.inviscid_wings_lift[wing.tag]
# Computations
x = np.pi*ARL/4
beta = np.array([[0.0]] * len(Mc))
beta[Mc >= 1.05] = np.sqrt(Mc[Mc >= 1.05]**2-1)
wave_drag_lift = np.array([[0.0]] * len(Mc))
wave_drag_lift[Mc >= 1.05] = CL[Mc >= 1.05]**2*x/4*(np.sqrt(1+(beta[Mc >= 1.05]/x)**2)-1)
wave_drag_lift[0:len(Mc[Mc >= 1.05]),0] = wave_drag_lift[Mc >= 1.05]
# Dump data to conditions
wave_lift_result = Data(
reference_area = Sref ,
wave_drag_lift_coefficient = wave_drag_lift ,
length_AR = ARL,
)
return wave_drag_lift
| 29.657895
| 93
| 0.540816
|
aa0a19d906f14be18562487c1d4b256f795846a1
| 778
|
py
|
Python
|
katana/pedals/effects/rotary_1.py
|
leon3110l/katana_tsl_patch
|
a88cb35f524ba1941ccb4f94e0dcaa02df11fd18
|
[
"MIT"
] | null | null | null |
katana/pedals/effects/rotary_1.py
|
leon3110l/katana_tsl_patch
|
a88cb35f524ba1941ccb4f94e0dcaa02df11fd18
|
[
"MIT"
] | null | null | null |
katana/pedals/effects/rotary_1.py
|
leon3110l/katana_tsl_patch
|
a88cb35f524ba1941ccb4f94e0dcaa02df11fd18
|
[
"MIT"
] | null | null | null |
from enum import IntEnum
from .. import FXPedal, FXType
class RotaryType(IntEnum):
DEFAULT = 1
SLOW = 0
FAST = 1
class Rotary(FXPedal):
FX_TYPE = FXType.ROTARY_1
def __init__(self,
_type : RotaryType = RotaryType.DEFAULT,
depth: int = 60,
fall_time: int = 0,
level: int = 50,
rate_fast: int = 85,
rate_slow: int = 30,
rise_time: int = 0,
**kwargs
):
super().__init__('rotary')
self.speed_select = _type
self.depth = depth
self.fall_time = fall_time
self.level = level
self.rate_fast = rate_fast
self.rate_slow = rate_slow
self.rise_time = rise_time
| 24.3125
| 56
| 0.521851
|
0fddc094b067ef3ca62411b6ffe98700e8f9016b
| 440
|
py
|
Python
|
manual_publish.py
|
roschaefer/sMirror
|
5e0215dcf4fe9d1f55d821dcfe917602873f4f9e
|
[
"MIT"
] | 1
|
2018-02-12T17:39:07.000Z
|
2018-02-12T17:39:07.000Z
|
manual_publish.py
|
roschaefer/sMirror
|
5e0215dcf4fe9d1f55d821dcfe917602873f4f9e
|
[
"MIT"
] | 2
|
2018-01-05T23:47:20.000Z
|
2018-11-18T11:17:45.000Z
|
manual_publish.py
|
roschaefer/sMirror
|
5e0215dcf4fe9d1f55d821dcfe917602873f4f9e
|
[
"MIT"
] | 1
|
2018-01-05T23:36:47.000Z
|
2018-01-05T23:36:47.000Z
|
#!/usr/bin/python
import paho.mqtt.publish as publish
import argparse
parser = argparse.ArgumentParser(description='Publish a url to a display via mqtt')
parser.add_argument('url', help='the url to publish')
parser.add_argument('--host', default='smirrormaster.local')
parser.add_argument('--topic', default='slave')
args = parser.parse_args()
publish.single(args.topic, args.url, hostname=args.host, port=9001, transport="websockets")
| 31.428571
| 91
| 0.765909
|
d93545966091953319a0dae6049d2b41467865c1
| 1,851
|
py
|
Python
|
upload_data.py
|
crazyhubox/RoomUse
|
a0b3b13d3f67ee9ec407d00e8224e6988a3cf5c7
|
[
"MIT"
] | 1
|
2021-05-10T11:55:43.000Z
|
2021-05-10T11:55:43.000Z
|
upload_data.py
|
crazyhubox/RoomUse
|
a0b3b13d3f67ee9ec407d00e8224e6988a3cf5c7
|
[
"MIT"
] | null | null | null |
upload_data.py
|
crazyhubox/RoomUse
|
a0b3b13d3f67ee9ec407d00e8224e6988a3cf5c7
|
[
"MIT"
] | null | null | null |
from redis import Redis
from json import loads
import os
# 星期[1,2,3,4,5]
import os
PRO_PATH = os.path.dirname(os.path.abspath(__file__))
def read_datas(file_path:str) -> dict:
with open(file_path,'r',encoding='utf-8') as f:
strings = f.read()
return loads(strings)
def generateor_data():
"""
这个函数用于将爬取到的数据整理后生成给外部调用以使用
Yields:
tuple -- 返回所有楼的的信息,以及对应的星期数
"""
weekdays = ("1","2","3","4","5")
file_name = 'g.json'
for each_day in weekdays:
file_path = "{}/{}/{}".format(PRO_PATH,each_day,file_name)
room_info_each_day = read_datas(file_path)
yield room_info_each_day,each_day
def clean_list(data:list):
"""
对每个房间的信息列表进行标记,红的表示被不空闲标记为0,绿的标记为1
得到类似的输出 A101/1100110000000
Arguments:
data {list} -- 房间的每一节课的信息
Returns:
str -- 格式化后的每个房间的信息
"""
c_lsit = list(map(clean,data))
room = c_lsit[0]
room_info = '{}/{}'.format(room,''.join(c_lsit[1:]))
return room_info
def clean(x):
if x != "Red" and x != "Green":
return x
if x == "Red":
return "0"
return "1"
def upload_datas():
"""
这个函数将本地爬取到的数据上传到服务器的redis数据库当中
对本地数据的读取通过generateor_data()这个方法, 是一个生成器
"""
# rdb = Redis('127.0.0.1',db=1) # 本地
rdb = Redis('your host of redis-server',db=1,password='your redis password') # server
for datas,each_day in generateor_data():
data_info:list = datas['rooms_info']
for each_room in data_info:
build_num = each_room[0][0]
# print(build_num)
key_name = '{}{}'.format(each_day,build_num)
each_room_data = clean_list(each_room)
res = rdb.rpush(key_name,each_room_data)
print(res)
def main():
upload_datas()
if __name__ == '__main__':
main()
| 22.301205
| 90
| 0.596975
|
491a799a21412a2ff7722fab3d94ff4c00bf91a5
| 2,091
|
py
|
Python
|
hknweb/urls.py
|
NauqGnesh/hknweb
|
b4a358c1bff09ec92ba5b903ff7332db826271d4
|
[
"MIT"
] | null | null | null |
hknweb/urls.py
|
NauqGnesh/hknweb
|
b4a358c1bff09ec92ba5b903ff7332db826271d4
|
[
"MIT"
] | null | null | null |
hknweb/urls.py
|
NauqGnesh/hknweb
|
b4a358c1bff09ec92ba5b903ff7332db826271d4
|
[
"MIT"
] | null | null | null |
"""hknweb URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import include
from django.urls import path
from django.conf import settings
from django.conf.urls.static import static
from .shortlinks import views as viewsShortlink
from .views import landing
from .views import users
urlpatterns = [
path("admin/", admin.site.urls),
path("polls/", include("hknweb.polls.urls")),
path("accounts/", include("django.contrib.auth.urls")),
path("accounts/create/", users.account_create, name="account-create"),
path("accounts/settings/", users.account_settings, name="account-settings"),
path("accounts/activate/", users.activate),
path("about/", landing.about, name="about"),
path("events/", include("hknweb.events.urls")),
path("reviewsessions/", include("hknweb.reviewsessions.urls")),
path("exams/", include("hknweb.exams.urls")),
path("alumni/", include("hknweb.alumni.urls")),
path("tutoring/", include("hknweb.tutoring.urls")),
path("cand/", include("hknweb.candidate.urls")),
path("pages/", include("hknweb.markdown_pages.urls")),
path("markdownx/", include("markdownx.urls")),
path("elections/", include("hknweb.elections.urls")),
path("auth/", include("social_django.urls", namespace="social")),
path("", landing.home, name="home"),
path("<slug:temp>/", viewsShortlink.openLink),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 41.82
| 80
| 0.702056
|
01d09f05f3f860448abe6581b9be3468f055d500
| 3,680
|
py
|
Python
|
schema/transform.py
|
basis-technology-corp/annotated-data-model
|
14d201a78f0e2bca2f08015216ead94f389acb14
|
[
"Apache-2.0"
] | 6
|
2016-03-08T15:26:15.000Z
|
2018-03-21T17:58:40.000Z
|
schema/transform.py
|
basis-technology-corp/annotated-data-model
|
14d201a78f0e2bca2f08015216ead94f389acb14
|
[
"Apache-2.0"
] | 19
|
2016-02-23T21:50:24.000Z
|
2020-10-02T20:16:59.000Z
|
schema/transform.py
|
basis-technology-corp/annotated-data-model
|
14d201a78f0e2bca2f08015216ead94f389acb14
|
[
"Apache-2.0"
] | 9
|
2016-02-05T14:55:12.000Z
|
2018-09-27T15:47:41.000Z
|
import json
# load language codes (languages.groovy)
with open("language_codes.json") as f:
language_codes = json.load(f)
with open("adm-schema-generated.json") as f:
schema = json.load(f)
transformed = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "Annotated Text",
"type": "object",
"additionalProperties": False
}
props = {}
# data and documentMetadata are OK
props["data"] = schema["properties"]["data"]
props["documentMetadata"] = schema["properties"]["documentMetadata"]
# properties of "attributes"
ap = {}
for k, v in schema["properties"].items():
# ADM nodes are inside attributes; fix attributes later
if k not in ["data", "documentMetadata", "attributes"]:
# some attributes are singular names... please use field names..
if k in ["tokens", "sentences", "layoutRegions", "scriptRegions"]:
k = k[0:-1]
if k == "concepts":
itemType = "concept"
elif k == "events":
itemType = "event"
elif k == "keyphrases":
itemType = "keyphrase"
elif k == "sentimentResults":
itemType = "categorizerResults"
else:
itemType = k
# ListAttribute may have any k/v as metadata ("token" uses it)
ap[k] = {"type": "object", "additionalProperties": True,
"properties": {
"type": { "type": "string", "enum": ["list"] },
"itemType": { "type": "string", "enum": [itemType]},
"items": v}}
# "languageDetection" doesn't return ListAttribute, but just plain attribute.
ap["languageDetection"] = { "$ref": "#/definitions/LanguageDetection" }
# languageDetectionRegions returns ListAttribute of languageDetection
ap["languageDetectionRegions"]["properties"]["itemType"]["enum"] = ["languageDetection"]
ap["transliteration"] = {"type": "object", "additionalProperties": True,
"properties": {
"type": { "type": "string", "enum": ["transliteration"] },
"results": { "$ref": "#/definitions/Transliteration"}}}
# fix-up "attributes"
props["attributes"] = {"type": "object", "additionalProperties": False,
"properties": dict(sorted(ap.items())) }
# version is missing
props["version"] = {"type": "string"}
transformed["properties"] = props
# definitions are all good except language codes
definitions = schema["definitions"]
definitions["LanguageCode"] = {
"type": "string",
"enum": language_codes
}
# languages are in DetectionResult and TextDomain
langref = {"$ref": "#/definitions/LanguageCode"}
definitions["DetectionResult"]["properties"]["language"] = langref
definitions["TextDomain"]["properties"]["language"] = langref
# script
definitions["ISO15924"] = {
"type": "string",
"enum": definitions["DetectionResult"]["properties"]["script"]["enum"]
}
scriptref = {"$ref": "#/definitions/ISO15924"}
definitions["DetectionResult"]["properties"]["script"] = scriptref
definitions["TextDomain"]["properties"]["script"] = scriptref
definitions["ScriptRegion"]["properties"]["script"] = scriptref
# LanguageDetection is missing "type"
definitions["LanguageDetection"]["properties"]["type"] = {"type": "string"}
transformed["definitions"] = dict(sorted(definitions.items()))
with open("adm-schema.json", mode="w") as f1:
json.dump(transformed, f1, indent=2)
| 33.761468
| 92
| 0.584511
|
6b70c2883a7e6a683fa0978c7eb10aea569ed1bd
| 402
|
py
|
Python
|
parkinglot/migrations/0012_auto_20210328_1056.py
|
Amankori2307/Park-Here
|
bf30e721577ff6df3b761f8bbb1bfe085ffac14c
|
[
"MIT"
] | null | null | null |
parkinglot/migrations/0012_auto_20210328_1056.py
|
Amankori2307/Park-Here
|
bf30e721577ff6df3b761f8bbb1bfe085ffac14c
|
[
"MIT"
] | null | null | null |
parkinglot/migrations/0012_auto_20210328_1056.py
|
Amankori2307/Park-Here
|
bf30e721577ff6df3b761f8bbb1bfe085ffac14c
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.7 on 2021-03-28 10:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('parkinglot', '0011_auto_20210327_2330'),
]
operations = [
migrations.AlterField(
model_name='parking',
name='entry_time',
field=models.DateTimeField(auto_now_add=True),
),
]
| 21.157895
| 58
| 0.614428
|
e7a38a1ff1b1bbcdf9a41e408d552e5dbd584d94
| 793
|
py
|
Python
|
motion_imitation/envs/env_wrappers/boundary_terminal_conditions.py
|
lauramsmith/fine-tuning-locomotion
|
583f1de43e91cdd24d632d783872528eb1337480
|
[
"Apache-2.0"
] | 41
|
2021-10-11T19:52:13.000Z
|
2022-03-21T12:42:31.000Z
|
motion_imitation/envs/env_wrappers/boundary_terminal_conditions.py
|
ywkim0606/fine-tuning-locomotion
|
96d7c81458511c0a7a11b59cf8c2c3fb8df8a64b
|
[
"Apache-2.0"
] | 6
|
2021-12-09T14:03:48.000Z
|
2022-03-28T11:11:01.000Z
|
motion_imitation/envs/env_wrappers/boundary_terminal_conditions.py
|
ywkim0606/fine-tuning-locomotion
|
96d7c81458511c0a7a11b59cf8c2c3fb8df8a64b
|
[
"Apache-2.0"
] | 8
|
2021-10-12T06:17:28.000Z
|
2022-03-10T21:32:05.000Z
|
"""Ends episode if robot is outside workspace bounds."""
class BoundaryTerminalCondition(object):
"""Ends episode if robot is outside workspace bounds."""
def __init__(self, x_space_m=5, y_space_m=5):
"""Constructor.
:param x_space_m: Length of workspace in meters.
:param y_space_m: Width of workspace in meters.
"""
self._x_bound = x_space_m / 2.0
self._y_bound = y_space_m / 2.0
def __call__(self, env):
x, y, _ = env.robot.GetBasePosition()
return abs(x) > self._x_bound or abs(y) > self._y_bound
class CircularBoundaryTerminalCondition(object):
def __init__(self, radius_m=2.5):
self._radius_squared = radius_m ** 2
def __call__(self, env):
x, y, _ = env.robot.GetBasePosition()
return x ** 2 + y ** 2 > self._radius_squared
| 27.344828
| 59
| 0.684741
|
525f66b785193a893f1326c55ced72440b3b9365
| 446
|
py
|
Python
|
insurance_backend/risk_types/views.py
|
tocilla/Insurance
|
f1f43101412e8a56a7615f961984d507ab0d0e06
|
[
"MIT"
] | null | null | null |
insurance_backend/risk_types/views.py
|
tocilla/Insurance
|
f1f43101412e8a56a7615f961984d507ab0d0e06
|
[
"MIT"
] | null | null | null |
insurance_backend/risk_types/views.py
|
tocilla/Insurance
|
f1f43101412e8a56a7615f961984d507ab0d0e06
|
[
"MIT"
] | null | null | null |
# Create your views here.
from rest_framework import mixins
from rest_framework.viewsets import GenericViewSet
from .models import RiskType
from .serializers import RiskTypeSerializer
class RiskTypeViewSet(mixins.ListModelMixin,
mixins.CreateModelMixin,
mixins.RetrieveModelMixin,
GenericViewSet):
queryset = RiskType.objects.all()
serializer_class = RiskTypeSerializer
| 31.857143
| 50
| 0.713004
|
d3f7ae25ac7229fa5eaf2c8e282a1799afcb97a8
| 27,748
|
py
|
Python
|
clients/python-legacy/generated/openapi_client/api_client.py
|
cliffano/jenkins-api-clients-generator
|
522d02b3a130a29471df5ec1d3d22c822b3d0813
|
[
"MIT"
] | null | null | null |
clients/python-legacy/generated/openapi_client/api_client.py
|
cliffano/jenkins-api-clients-generator
|
522d02b3a130a29471df5ec1d3d22c822b3d0813
|
[
"MIT"
] | null | null | null |
clients/python-legacy/generated/openapi_client/api_client.py
|
cliffano/jenkins-api-clients-generator
|
522d02b3a130a29471df5ec1d3d22c822b3d0813
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
Swaggy Jenkins
Jenkins API clients generated from Swagger / Open API specification # noqa: E501
The version of the OpenAPI document: 1.1.2-pre.0
Contact: blah@cliffano.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import atexit
import datetime
from dateutil.parser import parse
import json
import mimetypes
from multiprocessing.pool import ThreadPool
import os
import re
import tempfile
# python 2 and python 3 compatibility library
import six
from six.moves.urllib.parse import quote
from openapi_client.configuration import Configuration
import openapi_client.models
from openapi_client import rest
from openapi_client.exceptions import ApiValueError, ApiException
class ApiClient(object):
"""Generic API client for OpenAPI client library builds.
OpenAPI generic API client. This client handles the client-
server communication, and is invariant across implementations. Specifics of
the methods and models for each application are generated from the OpenAPI
templates.
NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
:param configuration: .Configuration object for this client
:param header_name: a header to pass when making calls to the API.
:param header_value: a header value to pass when making calls to
the API.
:param cookie: a cookie to include in the header when making calls
to the API
:param pool_threads: The number of threads to use for async requests
to the API. More threads means more concurrent API requests.
"""
PRIMITIVE_TYPES = (float, bool, bytes, six.text_type) + six.integer_types
NATIVE_TYPES_MAPPING = {
'int': int,
'long': int if six.PY3 else long, # noqa: F821
'float': float,
'str': str,
'bool': bool,
'date': datetime.date,
'datetime': datetime.datetime,
'object': object,
}
_pool = None
def __init__(self, configuration=None, header_name=None, header_value=None,
cookie=None, pool_threads=1):
if configuration is None:
configuration = Configuration.get_default_copy()
self.configuration = configuration
self.pool_threads = pool_threads
self.rest_client = rest.RESTClientObject(configuration)
self.default_headers = {}
if header_name is not None:
self.default_headers[header_name] = header_value
self.cookie = cookie
# Set default User-Agent.
self.user_agent = 'OpenAPI-Generator/1.0.0/python'
self.client_side_validation = configuration.client_side_validation
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def close(self):
if self._pool:
self._pool.close()
self._pool.join()
self._pool = None
if hasattr(atexit, 'unregister'):
atexit.unregister(self.close)
@property
def pool(self):
"""Create thread pool on first request
avoids instantiating unused threadpool for blocking clients.
"""
if self._pool is None:
atexit.register(self.close)
self._pool = ThreadPool(self.pool_threads)
return self._pool
@property
def user_agent(self):
"""User agent for this API client"""
return self.default_headers['User-Agent']
@user_agent.setter
def user_agent(self, value):
self.default_headers['User-Agent'] = value
def set_default_header(self, header_name, header_value):
self.default_headers[header_name] = header_value
def __call_api(
self, resource_path, method, path_params=None,
query_params=None, header_params=None, body=None, post_params=None,
files=None, response_types_map=None, auth_settings=None,
_return_http_data_only=None, collection_formats=None,
_preload_content=True, _request_timeout=None, _host=None,
_request_auth=None):
config = self.configuration
# header parameters
header_params = header_params or {}
header_params.update(self.default_headers)
if self.cookie:
header_params['Cookie'] = self.cookie
if header_params:
header_params = self.sanitize_for_serialization(header_params)
header_params = dict(self.parameters_to_tuples(header_params,
collection_formats))
# path parameters
if path_params:
path_params = self.sanitize_for_serialization(path_params)
path_params = self.parameters_to_tuples(path_params,
collection_formats)
for k, v in path_params:
# specified safe chars, encode everything
resource_path = resource_path.replace(
'{%s}' % k,
quote(str(v), safe=config.safe_chars_for_path_param)
)
# query parameters
if query_params:
query_params = self.sanitize_for_serialization(query_params)
query_params = self.parameters_to_tuples(query_params,
collection_formats)
# post parameters
if post_params or files:
post_params = post_params if post_params else []
post_params = self.sanitize_for_serialization(post_params)
post_params = self.parameters_to_tuples(post_params,
collection_formats)
post_params.extend(self.files_parameters(files))
# auth setting
self.update_params_for_auth(
header_params, query_params, auth_settings,
request_auth=_request_auth)
# body
if body:
body = self.sanitize_for_serialization(body)
# request url
if _host is None:
url = self.configuration.host + resource_path
else:
# use server/host defined in path or operation instead
url = _host + resource_path
try:
# perform request and return response
response_data = self.request(
method, url, query_params=query_params, headers=header_params,
post_params=post_params, body=body,
_preload_content=_preload_content,
_request_timeout=_request_timeout)
except ApiException as e:
e.body = e.body.decode('utf-8') if six.PY3 else e.body
raise e
self.last_response = response_data
return_data = response_data
if not _preload_content:
return return_data
response_type = response_types_map.get(response_data.status, None)
if six.PY3 and response_type not in ["file", "bytes"]:
match = None
content_type = response_data.getheader('content-type')
if content_type is not None:
match = re.search(r"charset=([a-zA-Z\-\d]+)[\s\;]?", content_type)
encoding = match.group(1) if match else "utf-8"
response_data.data = response_data.data.decode(encoding)
# deserialize response data
if response_type:
return_data = self.deserialize(response_data, response_type)
else:
return_data = None
if _return_http_data_only:
return (return_data)
else:
return (return_data, response_data.status,
response_data.getheaders())
def sanitize_for_serialization(self, obj):
"""Builds a JSON POST object.
If obj is None, return None.
If obj is str, int, long, float, bool, return directly.
If obj is datetime.datetime, datetime.date
convert to string in iso8601 format.
If obj is list, sanitize each element in the list.
If obj is dict, return the dict.
If obj is OpenAPI model, return the properties dict.
:param obj: The data to serialize.
:return: The serialized form of data.
"""
if obj is None:
return None
elif isinstance(obj, self.PRIMITIVE_TYPES):
return obj
elif isinstance(obj, list):
return [self.sanitize_for_serialization(sub_obj)
for sub_obj in obj]
elif isinstance(obj, tuple):
return tuple(self.sanitize_for_serialization(sub_obj)
for sub_obj in obj)
elif isinstance(obj, (datetime.datetime, datetime.date)):
return obj.isoformat()
if isinstance(obj, dict):
obj_dict = obj
else:
# Convert model obj to dict except
# attributes `openapi_types`, `attribute_map`
# and attributes which value is not None.
# Convert attribute name to json key in
# model definition for request.
obj_dict = {obj.attribute_map[attr]: getattr(obj, attr)
for attr, _ in six.iteritems(obj.openapi_types)
if getattr(obj, attr) is not None}
return {key: self.sanitize_for_serialization(val)
for key, val in six.iteritems(obj_dict)}
def deserialize(self, response, response_type):
"""Deserializes response into an object.
:param response: RESTResponse object to be deserialized.
:param response_type: class literal for
deserialized object, or string of class name.
:return: deserialized object.
"""
# handle file downloading
# save response body into a tmp file and return the instance
if response_type == "file":
return self.__deserialize_file(response)
# fetch data from response object
try:
data = json.loads(response.data)
except ValueError:
data = response.data
return self.__deserialize(data, response_type)
def __deserialize(self, data, klass):
"""Deserializes dict, list, str into an object.
:param data: dict, list or str.
:param klass: class literal, or string of class name.
:return: object.
"""
if data is None:
return None
if type(klass) == str:
if klass.startswith('list['):
sub_kls = re.match(r'list\[(.*)\]', klass).group(1)
return [self.__deserialize(sub_data, sub_kls)
for sub_data in data]
if klass.startswith('dict('):
sub_kls = re.match(r'dict\(([^,]*), (.*)\)', klass).group(2)
return {k: self.__deserialize(v, sub_kls)
for k, v in six.iteritems(data)}
# convert str to class
if klass in self.NATIVE_TYPES_MAPPING:
klass = self.NATIVE_TYPES_MAPPING[klass]
else:
klass = getattr(openapi_client.models, klass)
if klass in self.PRIMITIVE_TYPES:
return self.__deserialize_primitive(data, klass)
elif klass == object:
return self.__deserialize_object(data)
elif klass == datetime.date:
return self.__deserialize_date(data)
elif klass == datetime.datetime:
return self.__deserialize_datetime(data)
else:
return self.__deserialize_model(data, klass)
def call_api(self, resource_path, method,
path_params=None, query_params=None, header_params=None,
body=None, post_params=None, files=None,
response_types_map=None, auth_settings=None,
async_req=None, _return_http_data_only=None,
collection_formats=None,_preload_content=True,
_request_timeout=None, _host=None, _request_auth=None):
"""Makes the HTTP request (synchronous) and returns deserialized data.
To make an async_req request, set the async_req parameter.
:param resource_path: Path to method endpoint.
:param method: Method to call.
:param path_params: Path parameters in the url.
:param query_params: Query parameters in the url.
:param header_params: Header parameters to be
placed in the request header.
:param body: Request body.
:param post_params dict: Request post form parameters,
for `application/x-www-form-urlencoded`, `multipart/form-data`.
:param auth_settings list: Auth Settings names for the request.
:param response: Response data type.
:param files dict: key -> filename, value -> filepath,
for `multipart/form-data`.
:param async_req bool: execute request asynchronously
:param _return_http_data_only: response data without head status code
and headers
:param collection_formats: dict of collection formats for path, query,
header, and post parameters.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_token: dict, optional
:return:
If async_req parameter is True,
the request will be called asynchronously.
The method will return the request thread.
If parameter async_req is False or missing,
then the method will return the response directly.
"""
if not async_req:
return self.__call_api(resource_path, method,
path_params, query_params, header_params,
body, post_params, files,
response_types_map, auth_settings,
_return_http_data_only, collection_formats,
_preload_content, _request_timeout, _host,
_request_auth)
return self.pool.apply_async(self.__call_api, (resource_path,
method, path_params,
query_params,
header_params, body,
post_params, files,
response_types_map,
auth_settings,
_return_http_data_only,
collection_formats,
_preload_content,
_request_timeout,
_host, _request_auth))
def request(self, method, url, query_params=None, headers=None,
post_params=None, body=None, _preload_content=True,
_request_timeout=None):
"""Makes the HTTP request using RESTClient."""
if method == "GET":
return self.rest_client.GET(url,
query_params=query_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
headers=headers)
elif method == "HEAD":
return self.rest_client.HEAD(url,
query_params=query_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
headers=headers)
elif method == "OPTIONS":
return self.rest_client.OPTIONS(url,
query_params=query_params,
headers=headers,
_preload_content=_preload_content,
_request_timeout=_request_timeout)
elif method == "POST":
return self.rest_client.POST(url,
query_params=query_params,
headers=headers,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
elif method == "PUT":
return self.rest_client.PUT(url,
query_params=query_params,
headers=headers,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
elif method == "PATCH":
return self.rest_client.PATCH(url,
query_params=query_params,
headers=headers,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
elif method == "DELETE":
return self.rest_client.DELETE(url,
query_params=query_params,
headers=headers,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
else:
raise ApiValueError(
"http method must be `GET`, `HEAD`, `OPTIONS`,"
" `POST`, `PATCH`, `PUT` or `DELETE`."
)
def parameters_to_tuples(self, params, collection_formats):
"""Get parameters as list of tuples, formatting collections.
:param params: Parameters as dict or list of two-tuples
:param dict collection_formats: Parameter collection formats
:return: Parameters as list of tuples, collections formatted
"""
new_params = []
if collection_formats is None:
collection_formats = {}
for k, v in six.iteritems(params) if isinstance(params, dict) else params: # noqa: E501
if k in collection_formats:
collection_format = collection_formats[k]
if collection_format == 'multi':
new_params.extend((k, value) for value in v)
else:
if collection_format == 'ssv':
delimiter = ' '
elif collection_format == 'tsv':
delimiter = '\t'
elif collection_format == 'pipes':
delimiter = '|'
else: # csv is the default
delimiter = ','
new_params.append(
(k, delimiter.join(str(value) for value in v)))
else:
new_params.append((k, v))
return new_params
def files_parameters(self, files=None):
"""Builds form parameters.
:param files: File parameters.
:return: Form parameters with files.
"""
params = []
if files:
for k, v in six.iteritems(files):
if not v:
continue
file_names = v if type(v) is list else [v]
for n in file_names:
with open(n, 'rb') as f:
filename = os.path.basename(f.name)
filedata = f.read()
mimetype = (mimetypes.guess_type(filename)[0] or
'application/octet-stream')
params.append(
tuple([k, tuple([filename, filedata, mimetype])]))
return params
def select_header_accept(self, accepts):
"""Returns `Accept` based on an array of accepts provided.
:param accepts: List of headers.
:return: Accept (e.g. application/json).
"""
if not accepts:
return
accepts = [x.lower() for x in accepts]
if 'application/json' in accepts:
return 'application/json'
else:
return ', '.join(accepts)
def select_header_content_type(self, content_types, method=None, body=None):
"""Returns `Content-Type` based on an array of content_types provided.
:param content_types: List of content-types.
:param method: http method (e.g. POST, PATCH).
:param body: http body to send.
:return: Content-Type (e.g. application/json).
"""
if not content_types:
return 'application/json'
content_types = [x.lower() for x in content_types]
if (method == 'PATCH' and
'application/json-patch+json' in content_types and
isinstance(body, list)):
return 'application/json-patch+json'
if 'application/json' in content_types or '*/*' in content_types:
return 'application/json'
else:
return content_types[0]
def update_params_for_auth(self, headers, queries, auth_settings,
request_auth=None):
"""Updates header and query params based on authentication setting.
:param headers: Header parameters dict to be updated.
:param queries: Query parameters tuple list to be updated.
:param auth_settings: Authentication setting identifiers list.
:param request_auth: if set, the provided settings will
override the token in the configuration.
"""
if not auth_settings:
return
if request_auth:
self._apply_auth_params(headers, queries, request_auth)
return
for auth in auth_settings:
auth_setting = self.configuration.auth_settings().get(auth)
if auth_setting:
self._apply_auth_params(headers, queries, auth_setting)
def _apply_auth_params(self, headers, queries, auth_setting):
"""Updates the request parameters based on a single auth_setting
:param headers: Header parameters dict to be updated.
:param queries: Query parameters tuple list to be updated.
:param auth_setting: auth settings for the endpoint
"""
if auth_setting['in'] == 'cookie':
headers['Cookie'] = auth_setting['value']
elif auth_setting['in'] == 'header':
headers[auth_setting['key']] = auth_setting['value']
elif auth_setting['in'] == 'query':
queries.append((auth_setting['key'], auth_setting['value']))
else:
raise ApiValueError(
'Authentication token must be in `query` or `header`'
)
def __deserialize_file(self, response):
"""Deserializes body to file
Saves response body into a file in a temporary folder,
using the filename from the `Content-Disposition` header if provided.
:param response: RESTResponse.
:return: file path.
"""
fd, path = tempfile.mkstemp(dir=self.configuration.temp_folder_path)
os.close(fd)
os.remove(path)
content_disposition = response.getheader("Content-Disposition")
if content_disposition:
filename = re.search(r'filename=[\'"]?([^\'"\s]+)[\'"]?',
content_disposition).group(1)
path = os.path.join(os.path.dirname(path), filename)
with open(path, "wb") as f:
f.write(response.data)
return path
def __deserialize_primitive(self, data, klass):
"""Deserializes string to primitive type.
:param data: str.
:param klass: class literal.
:return: int, long, float, str, bool.
"""
try:
return klass(data)
except UnicodeEncodeError:
return six.text_type(data)
except TypeError:
return data
def __deserialize_object(self, value):
"""Return an original value.
:return: object.
"""
return value
def __deserialize_date(self, string):
"""Deserializes string to date.
:param string: str.
:return: date.
"""
try:
return parse(string).date()
except ImportError:
return string
except ValueError:
raise rest.ApiException(
status=0,
reason="Failed to parse `{0}` as date object".format(string)
)
def __deserialize_datetime(self, string):
"""Deserializes string to datetime.
The string should be in iso8601 datetime format.
:param string: str.
:return: datetime.
"""
try:
return parse(string)
except ImportError:
return string
except ValueError:
raise rest.ApiException(
status=0,
reason=(
"Failed to parse `{0}` as datetime object"
.format(string)
)
)
def __deserialize_model(self, data, klass):
"""Deserializes list or dict to model.
:param data: dict, list.
:param klass: class literal.
:return: model object.
"""
has_discriminator = False
if (hasattr(klass, 'get_real_child_model')
and klass.discriminator_value_class_map):
has_discriminator = True
if not klass.openapi_types and has_discriminator is False:
return data
kwargs = {}
if (data is not None and
klass.openapi_types is not None and
isinstance(data, (list, dict))):
for attr, attr_type in six.iteritems(klass.openapi_types):
if klass.attribute_map[attr] in data:
value = data[klass.attribute_map[attr]]
kwargs[attr] = self.__deserialize(value, attr_type)
instance = klass(**kwargs)
if has_discriminator:
klass_name = instance.get_real_child_model(data)
if klass_name:
instance = self.__deserialize(data, klass_name)
return instance
| 39.527066
| 96
| 0.556833
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.