hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5d426a552dd2b03b06476c8e56b7d8789bd45763 | 4,252 | py | Python | tests/fhir_tests/test_picklejar.py | dnstone/fhirtordf | 2ec2ac2ea66798a39fa0c3f27c9803f69e7710d1 | [
"Apache-2.0"
] | null | null | null | tests/fhir_tests/test_picklejar.py | dnstone/fhirtordf | 2ec2ac2ea66798a39fa0c3f27c9803f69e7710d1 | [
"Apache-2.0"
] | null | null | null | tests/fhir_tests/test_picklejar.py | dnstone/fhirtordf | 2ec2ac2ea66798a39fa0c3f27c9803f69e7710d1 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2017, Mayo Clinic
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of the Mayo Clinic nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
import os
from fhirtordf.fhir.picklejar import picklejar, picklejarfactory
from tests.utils.base_test_case import make_and_clear_directory
if __name__ == '__main__':
unittest.main()
| 37.964286 | 99 | 0.676152 | # Copyright (c) 2017, Mayo Clinic
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of the Mayo Clinic nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
import os
from fhirtordf.fhir.picklejar import picklejar, picklejarfactory
from tests.utils.base_test_case import make_and_clear_directory
class TestObj:
pass
class PickleJarTestCase(unittest.TestCase):
current_cache_directory = None
@classmethod
def setUpClass(cls):
picklejar().clear()
cls.current_cache_directory = picklejarfactory.cache_directory
o = TestObj()
o.cls = "CLASS"
picklejar().add("cls", (1,), o)
@classmethod
def tearDownClass(cls):
picklejar().clear()
def setUp(self):
# Recover if there is an error when we're not working with the default
picklejarfactory.cache_directory = self.current_cache_directory
def test_pickled_file(self):
o = TestObj()
o.foo = 42
picklejar().add('o1', (1,), o)
o2 = picklejar().get('o1', (1,))
self.assertIsNotNone(o2)
self.assertEqual(42, o2.foo)
def test_singleton(self):
o = picklejar().get('cls', (1,))
self.assertIsNotNone(o)
self.assertEqual("CLASS", o.cls)
def test_cache_loc(self):
current_cache_directory = picklejarfactory.cache_directory
test_directory = os.path.join(os.path.split(os.path.abspath(__file__))[0], 'data')
picklejarfactory.cache_directory = os.path.abspath(os.path.join(test_directory, 'pjcache'))
make_and_clear_directory(picklejarfactory.cache_directory)
o1 = TestObj()
o1.foo = "bagels"
picklejar().add('o1', (2, 3), o1)
o2 = TestObj()
o2.foo = "cheese"
picklejar().add('o2', (2, 5), o2)
ot1 = picklejar().get('o1', (2, 3))
ot2 = picklejar().get('o2', (2, 5))
self.assertEqual("bagels", ot1.foo)
self.assertEqual("cheese", ot2.foo)
picklejar().clear()
picklejarfactory.cache_directory = current_cache_directory
ot1 = picklejar().get('o1', (2, 3))
ot2 = picklejar().get('o2', (2, 5))
self.assertIsNone(ot1)
self.assertIsNone(ot2)
self.assertIsNotNone(picklejar().get('cls', (1,)))
def test_no_cache(self):
current_cache_directory = picklejarfactory.cache_directory
picklejarfactory.cache_directory = None
o = picklejar().get('cls', (1,))
self.assertIsNone(o)
o1 = TestObj()
o1.foo = "bagels"
picklejar().add('o1', (2, 3), o1)
ot1 = picklejar().get('o1', (2, 3))
self.assertIsNone(ot1)
picklejarfactory.cache_directory = current_cache_directory
o = picklejar().get('cls', (1,))
self.assertEqual("CLASS", o.cls)
if __name__ == '__main__':
unittest.main()
| 2,170 | 282 | 46 |
37ba785d1dcf0efb21e6755298fd6ec36a75c92c | 1,614 | py | Python | src/image_attacks/im_main.py | tahleen-rahman/all2friends | 156ba257677def409661e8b68ccdfb1e896ba721 | [
"Apache-2.0"
] | null | null | null | src/image_attacks/im_main.py | tahleen-rahman/all2friends | 156ba257677def409661e8b68ccdfb1e896ba721 | [
"Apache-2.0"
] | 4 | 2021-06-08T21:47:39.000Z | 2022-03-12T00:35:39.000Z | src/image_attacks/im_main.py | tahleen-rahman/all2friends | 156ba257677def409661e8b68ccdfb1e896ba721 | [
"Apache-2.0"
] | null | null | null | # Created by rahman at 15:20 2020-03-05 using PyCharm
import subprocess
from image_attacks.im_utils import slice_files, combine_files, clean_trim, count_cats, make_features_counts, score
from shared_tools.utils import make_allPairs, classifiers, DATAPATH, city
def attack_images(cores, prob_cutoff):
"""
:param cores: how many cores to use for multiprocessing
:param prob_cutoff: user's image belongs to a certain category if the output of the last FC layer of the resnet model for the category > prob_cutoff
:return:
"""
mediaFile = "target_media"
slice_files(mediaFile, DATAPATH, cores)
subprocess.call(['./parallelize_im2proba.sh', cores,
city]) # downloads images and converts to embeddings, shell script calls im2proba.py
prob_file = combine_files(DATAPATH, cores)
clean_file = clean_trim(prob_cutoff, DATAPATH, prob_file)
counts_file = count_cats(DATAPATH, clean_file, countsFile="proba_cut_01_counts.csv" )
allPairs = make_allPairs("avg_pairs.csv", u_list_file=counts_file, DATAPATH=DATAPATH,
friendFile=city + ".target_friends", makeStrangers=True)
data_file = DATAPATH + "im_dataset.csv"
dataset = make_features_counts(DATAPATH, clean_file, data_file, counts_file,
allPairs)
score(dataset, name="mini-counts, cosine, entropy of max cat", classifiers=classifiers)
print ("Created image dataset at", data_file)
return data_file
if __name__ == '__main__':
data_file = attack_images(cores = 120, prob_cutoff = 0.05)
| 25.619048 | 153 | 0.7057 | # Created by rahman at 15:20 2020-03-05 using PyCharm
import subprocess
from image_attacks.im_utils import slice_files, combine_files, clean_trim, count_cats, make_features_counts, score
from shared_tools.utils import make_allPairs, classifiers, DATAPATH, city
def attack_images(cores, prob_cutoff):
"""
:param cores: how many cores to use for multiprocessing
:param prob_cutoff: user's image belongs to a certain category if the output of the last FC layer of the resnet model for the category > prob_cutoff
:return:
"""
mediaFile = "target_media"
slice_files(mediaFile, DATAPATH, cores)
subprocess.call(['./parallelize_im2proba.sh', cores,
city]) # downloads images and converts to embeddings, shell script calls im2proba.py
prob_file = combine_files(DATAPATH, cores)
clean_file = clean_trim(prob_cutoff, DATAPATH, prob_file)
counts_file = count_cats(DATAPATH, clean_file, countsFile="proba_cut_01_counts.csv" )
allPairs = make_allPairs("avg_pairs.csv", u_list_file=counts_file, DATAPATH=DATAPATH,
friendFile=city + ".target_friends", makeStrangers=True)
data_file = DATAPATH + "im_dataset.csv"
dataset = make_features_counts(DATAPATH, clean_file, data_file, counts_file,
allPairs)
score(dataset, name="mini-counts, cosine, entropy of max cat", classifiers=classifiers)
print ("Created image dataset at", data_file)
return data_file
if __name__ == '__main__':
data_file = attack_images(cores = 120, prob_cutoff = 0.05)
| 0 | 0 | 0 |
223a931e7e8f65f54729a2a3fd28e7671deed893 | 1,070 | py | Python | dr/backend/mujoco.py | jigangkim/domain_randomization | 07a309a9e824b5332219871abe8f0f657694b292 | [
"MIT"
] | null | null | null | dr/backend/mujoco.py | jigangkim/domain_randomization | 07a309a9e824b5332219871abe8f0f657694b292 | [
"MIT"
] | null | null | null | dr/backend/mujoco.py | jigangkim/domain_randomization | 07a309a9e824b5332219871abe8f0f657694b292 | [
"MIT"
] | null | null | null | import numpy as np
from dr.backend.base import Backend
| 26.097561 | 66 | 0.653271 | import numpy as np
from dr.backend.base import Backend
class MujocoBackend(Backend):
ENV_MAP = {
'Hopper': 'Hopper-v2',
'Cheetah': 'HalfCheetah-v2',
'Walker': 'Walker2d-v2',
}
def get_world(self, env):
return env.env.model
def get_masses(self, env):
return np.array(self.get_world(env).body_mass[1:])
def set_masses(self, env, masses):
self.get_world(env).body_mass[1:] = masses
def get_gravity(self, env):
return -self.get_world(env).opt.gravity[-1]
def set_gravity(self, env, g):
self.get_world(env).opt.gravity[2] = -g
def get_damping_coefficients(self, env):
return np.array(self.get_world(env).dof_damping[3:])
# raise NotImplementedError
def set_damping_coefficients(self, env, damping_coefficients):
self.get_world(env).dof_damping[3:] = damping_coefficients
# raise NotImplementedError
def get_collision_detector(self, env):
pass
def set_collision_detector(self, env, collision_detector):
pass
| 616 | 374 | 23 |
0ffc5834a2b713e166cfe5b56e3fdb43410997d9 | 570 | py | Python | Exercicios Python/ex028.py | ClaudioSiqueira/Exercicios-Python | 128387769b34b7d42aee5c1effda16de21216e10 | [
"MIT"
] | null | null | null | Exercicios Python/ex028.py | ClaudioSiqueira/Exercicios-Python | 128387769b34b7d42aee5c1effda16de21216e10 | [
"MIT"
] | null | null | null | Exercicios Python/ex028.py | ClaudioSiqueira/Exercicios-Python | 128387769b34b7d42aee5c1effda16de21216e10 | [
"MIT"
] | null | null | null | from random import randint
from time import sleep
computador = randint(1, 5)
print('\033[34m-==-\033[m'*17)
print('Vamos jogar um jogo, tente adivinhar o número que estou pensando !')
sleep(1)
jogador = int(input('Chute um valor de 1 até 5: '))
print('PROCESSANDO...')
sleep(1)
if jogador == computador:
print('\033[1;32mPARABÉNS, VOCÊ ACERTOU!!\033[m Eu realmente esta pensando no número {} !'.format(computador))
else:
print('\033[1;31mERROUUUU !!\033[m Eu estava pensando no número {} e não no número {}'.format(computador,jogador))
print('\033[34m-==-'*17) | 35.625 | 118 | 0.7 | from random import randint
from time import sleep
computador = randint(1, 5)
print('\033[34m-==-\033[m'*17)
print('Vamos jogar um jogo, tente adivinhar o número que estou pensando !')
sleep(1)
jogador = int(input('Chute um valor de 1 até 5: '))
print('PROCESSANDO...')
sleep(1)
if jogador == computador:
print('\033[1;32mPARABÉNS, VOCÊ ACERTOU!!\033[m Eu realmente esta pensando no número {} !'.format(computador))
else:
print('\033[1;31mERROUUUU !!\033[m Eu estava pensando no número {} e não no número {}'.format(computador,jogador))
print('\033[34m-==-'*17) | 0 | 0 | 0 |
baeefdfe853ae0bd15d1e1c0199aae78ab93acc6 | 6,372 | py | Python | go/billing/tasks.py | lynnUg/vumi-go | 852f906c46d5d26940bd6699f11488b73bbc3742 | [
"BSD-3-Clause"
] | null | null | null | go/billing/tasks.py | lynnUg/vumi-go | 852f906c46d5d26940bd6699f11488b73bbc3742 | [
"BSD-3-Clause"
] | null | null | null | go/billing/tasks.py | lynnUg/vumi-go | 852f906c46d5d26940bd6699f11488b73bbc3742 | [
"BSD-3-Clause"
] | null | null | null | from datetime import date
from dateutil.relativedelta import relativedelta
from celery.task import task, group
from django.db.models import Sum, Count
from go.billing import settings
from go.billing.models import (
Account, Transaction, MessageCost, Statement, LineItem)
from go.base.utils import vumi_api
@task()
def generate_monthly_statement(account_id, from_date, to_date):
"""Generate a new *Monthly* ``Statement`` for the given ``account``
between the given ``from_date`` and ``to_date``.
"""
account = Account.objects.get(id=account_id)
tagpools = get_tagpools(account)
statement = Statement(
account=account,
title=settings.MONTHLY_STATEMENT_TITLE,
type=Statement.TYPE_MONTHLY,
from_date=from_date,
to_date=to_date)
statement.save()
items = []
items.extend(make_message_items(account, statement, tagpools))
items.extend(make_session_items(account, statement, tagpools))
statement.lineitem_set.bulk_create(items)
return statement
@task()
def generate_monthly_account_statements():
"""Spawn sub-tasks to generate a *Monthly* ``Statement`` for accounts
without a *Monthly* statement.
"""
today = date.today()
last_month = today - relativedelta(months=1)
from_date = date(last_month.year, last_month.month, 1)
to_date = date(today.year, today.month, 1) - relativedelta(days=1)
account_list = Account.objects.exclude(
statement__type=Statement.TYPE_MONTHLY,
statement__from_date=from_date,
statement__to_date=to_date)
task_list = []
for account in account_list:
task_list.append(
generate_monthly_statement.s(account.id, from_date, to_date))
return group(task_list)()
| 29.775701 | 74 | 0.724419 | from datetime import date
from dateutil.relativedelta import relativedelta
from celery.task import task, group
from django.db.models import Sum, Count
from go.billing import settings
from go.billing.models import (
Account, Transaction, MessageCost, Statement, LineItem)
from go.base.utils import vumi_api
def get_message_transactions(account, from_date, to_date):
transactions = Transaction.objects.filter(
account_number=account.account_number,
created__gte=from_date,
created__lt=(to_date + relativedelta(days=1)))
transactions = transactions.values(
'tag_pool_name',
'tag_name',
'message_direction',
'message_cost',
'markup_percent')
transactions = transactions.annotate(
count=Count('id'), total_message_cost=Sum('message_cost'))
return transactions
def get_session_transactions(account, from_date, to_date):
transactions = Transaction.objects.filter(
account_number=account.account_number,
created__gte=from_date,
created__lt=(to_date + relativedelta(days=1)))
transactions = transactions.filter(session_created=True)
transactions = transactions.values(
'tag_pool_name',
'tag_name',
'session_cost',
'markup_percent')
transactions = transactions.annotate(
count=Count('id'), total_session_cost=Sum('session_cost'))
return transactions
def get_tagpools(account):
return vumi_api().get_user_api(account.account_number).tagpools()
def get_provider_name(transaction, tagpools):
return tagpools.display_name(transaction['tag_pool_name'])
def get_channel_name(transaction, tagpools):
return transaction['tag_name']
def get_message_cost(transaction):
return transaction['total_message_cost']
def get_session_cost(transaction):
return transaction['total_session_cost']
def get_count(transaction):
return transaction['count']
def get_message_unit_cost(transaction):
count = get_count(transaction)
# count should never be 0 since we count by id
return get_message_cost(transaction) / count
def get_session_unit_cost(transaction):
count = get_count(transaction)
# count should never be 0 since we count by id
return get_session_cost(transaction) / count
def get_message_credits(transaction):
cost = get_message_cost(transaction)
markup = transaction['markup_percent']
return MessageCost.calculate_message_credit_cost(cost, markup)
def get_session_credits(transaction):
cost = get_session_cost(transaction)
markup = transaction['markup_percent']
return MessageCost.calculate_session_credit_cost(cost, markup)
def get_channel_type(transaction, tagpools):
delivery_class = tagpools.delivery_class(transaction['tag_pool_name'])
return tagpools.delivery_class_name(delivery_class)
def get_message_description(transaction):
if transaction['message_direction'] == MessageCost.DIRECTION_INBOUND:
return 'Messages received'
else:
return 'Messages sent'
def make_message_item(statement, transaction, tagpools):
return LineItem(
units=get_count(transaction),
statement=statement,
cost=get_message_cost(transaction),
credits=get_message_credits(transaction),
channel=get_channel_name(transaction, tagpools),
billed_by=get_provider_name(transaction, tagpools),
unit_cost=get_message_unit_cost(transaction),
channel_type=get_channel_type(transaction, tagpools),
description=get_message_description(transaction))
def make_session_item(statement, transaction, tagpools):
return LineItem(
units=get_count(transaction),
statement=statement,
cost=get_session_cost(transaction),
credits=get_session_credits(transaction),
channel=get_channel_name(transaction, tagpools),
billed_by=get_provider_name(transaction, tagpools),
unit_cost=get_session_unit_cost(transaction),
channel_type=get_channel_type(transaction, tagpools),
description='Sessions')
def make_message_items(account, statement, tagpools):
transactions = get_message_transactions(
account, statement.from_date, statement.to_date)
return [
make_message_item(statement, transaction, tagpools)
for transaction in transactions]
def make_session_items(account, statement, tagpools):
transactions = get_session_transactions(
account, statement.from_date, statement.to_date)
return [
make_session_item(statement, transaction, tagpools)
for transaction in transactions]
def make_account_fee_item(account, statement):
return LineItem(
units=1,
statement=statement,
credits=None,
cost=settings.ACCOUNT_FEE,
billed_by='Vumi',
unit_cost=settings.ACCOUNT_FEE,
description='Account Fee')
@task()
def generate_monthly_statement(account_id, from_date, to_date):
"""Generate a new *Monthly* ``Statement`` for the given ``account``
between the given ``from_date`` and ``to_date``.
"""
account = Account.objects.get(id=account_id)
tagpools = get_tagpools(account)
statement = Statement(
account=account,
title=settings.MONTHLY_STATEMENT_TITLE,
type=Statement.TYPE_MONTHLY,
from_date=from_date,
to_date=to_date)
statement.save()
items = []
items.extend(make_message_items(account, statement, tagpools))
items.extend(make_session_items(account, statement, tagpools))
statement.lineitem_set.bulk_create(items)
return statement
@task()
def generate_monthly_account_statements():
"""Spawn sub-tasks to generate a *Monthly* ``Statement`` for accounts
without a *Monthly* statement.
"""
today = date.today()
last_month = today - relativedelta(months=1)
from_date = date(last_month.year, last_month.month, 1)
to_date = date(today.year, today.month, 1) - relativedelta(days=1)
account_list = Account.objects.exclude(
statement__type=Statement.TYPE_MONTHLY,
statement__from_date=from_date,
statement__to_date=to_date)
task_list = []
for account in account_list:
task_list.append(
generate_monthly_statement.s(account.id, from_date, to_date))
return group(task_list)()
| 4,145 | 0 | 437 |
9fb59721e031ca2d2ad94260365575fbf0c263fc | 69 | py | Python | tests/data/config/i_child.py | jinliwei1997/mmcv | f8d46df4a9fa32fb44d2e92a4ca5e7b26ee9cb79 | [
"Apache-2.0"
] | 3,748 | 2018-10-12T08:39:46.000Z | 2022-03-31T17:22:55.000Z | tests/data/config/i_child.py | jinliwei1997/mmcv | f8d46df4a9fa32fb44d2e92a4ca5e7b26ee9cb79 | [
"Apache-2.0"
] | 1,637 | 2018-10-12T06:06:18.000Z | 2022-03-31T02:20:53.000Z | tests/data/config/i_child.py | jinliwei1997/mmcv | f8d46df4a9fa32fb44d2e92a4ca5e7b26ee9cb79 | [
"Apache-2.0"
] | 1,234 | 2018-10-12T09:28:20.000Z | 2022-03-31T15:56:24.000Z | _base_ = './i_base.py'
item_cfg = {'b': 2}
item6 = {'cfg': item_cfg}
| 17.25 | 25 | 0.57971 | _base_ = './i_base.py'
item_cfg = {'b': 2}
item6 = {'cfg': item_cfg}
| 0 | 0 | 0 |
c5740ab7ab678719ba6d2355a1a59e6f3c6dc3bc | 17,820 | py | Python | python/oneflow/nn/graph/graph.py | wangyuyue/oneflow | 0a71c22fe8355392acc8dc0e301589faee4c4832 | [
"Apache-2.0"
] | null | null | null | python/oneflow/nn/graph/graph.py | wangyuyue/oneflow | 0a71c22fe8355392acc8dc0e301589faee4c4832 | [
"Apache-2.0"
] | null | null | null | python/oneflow/nn/graph/graph.py | wangyuyue/oneflow | 0a71c22fe8355392acc8dc0e301589faee4c4832 | [
"Apache-2.0"
] | null | null | null | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from collections import OrderedDict
from functools import partial
from typing import Dict
import oneflow._oneflow_internal
import oneflow.framework.c_api_util as c_api_util
import oneflow.framework.graph_build_util as graph_build_util
import oneflow.framework.session_context as session_ctx
from oneflow.framework.distribute import get_rank
from oneflow.framework.tensor import Tensor, TensorTuple
from oneflow.framework.multi_client_session import MultiClientSession
from oneflow.framework.tensor_tuple_util import convert_to_tensor_tuple
from oneflow.nn.graph.block import Block, BlockType
from oneflow.nn.graph.config import GraphConfig
from oneflow.nn.graph.optimizer import OptDict, VariableConfig
from oneflow.amp import GradScaler
from oneflow.nn.graph.util import add_indent, sys_exc_error_msg, list_to_func_return
from oneflow.nn.module import Module
from oneflow.nn.optimizer.optimizer import Optimizer
from oneflow.nn.optimizer.lr_scheduler import LrScheduler
| 36.367347 | 107 | 0.565825 | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from collections import OrderedDict
from functools import partial
from typing import Dict
import oneflow._oneflow_internal
import oneflow.framework.c_api_util as c_api_util
import oneflow.framework.graph_build_util as graph_build_util
import oneflow.framework.session_context as session_ctx
from oneflow.framework.distribute import get_rank
from oneflow.framework.tensor import Tensor, TensorTuple
from oneflow.framework.multi_client_session import MultiClientSession
from oneflow.framework.tensor_tuple_util import convert_to_tensor_tuple
from oneflow.nn.graph.block import Block, BlockType
from oneflow.nn.graph.config import GraphConfig
from oneflow.nn.graph.optimizer import OptDict, VariableConfig
from oneflow.amp import GradScaler
from oneflow.nn.graph.util import add_indent, sys_exc_error_msg, list_to_func_return
from oneflow.nn.module import Module
from oneflow.nn.optimizer.optimizer import Optimizer
from oneflow.nn.optimizer.lr_scheduler import LrScheduler
class Graph(object):
_child_init_cnt = dict()
def __init__(self):
self._generate_name()
self.config = GraphConfig()
self._blocks = OrderedDict()
self._opts = []
self._grad_scaler = None
self._variables_conf = OrderedDict()
self._is_compiled = False
self._job_proto = None
self._args_repr = []
self._outs_repr = []
self._debug = False
self._c_nn_graph = oneflow._oneflow_internal.nn.graph.CNNGraph(self._name)
session = session_ctx.GetDefaultSession()
assert type(session) is MultiClientSession
session.TryInit()
session.AddCGraph(self._c_nn_graph)
@property
def name(self):
return self._name
@property
def training(self):
return self.config.training
@property
def _config_proto(self):
return self.config.proto
@property
def _optimization_conf_proto(self):
session = session_ctx.GetDefaultSession()
assert type(session) is MultiClientSession
return session.resource
@property
def _graph_proto(self):
return self._job_proto
def debug(self, mode: bool = True) -> None:
if get_rank() != 0:
return
else:
print("Note that nn.Graph.debug() only print debug info on rank 0.")
self._debug = mode
for name, block in self._blocks.items():
assert block.type == BlockType.MODULE
block.debug(mode)
def build(self, *args):
raise NotImplementedError()
def add_optimizer(
self, optim: Optimizer, *, lr_sch: LrScheduler = None,
):
opt_dict = dict()
assert optim is not None, "optimizer cannot be None"
assert isinstance(
optim, Optimizer
), "optimizer must be an instance of Optimizer"
opt_dict["optim"] = optim
if lr_sch is not None:
assert isinstance(lr_sch, LrScheduler)
assert (
lr_sch._optimizer is optim
), "lr_scheduler's optimizer must be the same optimizer in add_optimizer."
opt_dict["lr_sch"] = lr_sch
self._opts.append(opt_dict)
def set_grad_scaler(self, grad_scaler: GradScaler = None):
assert isinstance(grad_scaler, GradScaler)
self._grad_scaler = grad_scaler
def _generate_name(self):
child_name = self.__class__.__name__
if Graph._child_init_cnt.get(child_name) is None:
Graph._child_init_cnt[child_name] = 0
self._name = child_name + "_" + str(Graph._child_init_cnt[child_name])
Graph._child_init_cnt[child_name] += 1
def _state(self):
for _, b in self._blocks.items():
pa_gen = b.parameters(recurse=True)
for pa in pa_gen:
yield pa
bu_gen = b.buffers(recurse=True)
for bu in bu_gen:
yield bu
def _generate_config_proto(self):
self.config.proto.set_job_name(self._name)
if self._grad_scaler is not None:
self._grad_scaler.generate_conf_for_graph(
self.config.proto.mutable_train_conf()
)
if len(self._opts) > 0:
self.config._train(True)
for state_block in self._state():
if state_block.type == BlockType.PARAMETER:
self._variables_conf[state_block.origin] = VariableConfig(
state_block.name_prefix + state_block.name
)
for opt in self._opts:
opt_dict = OptDict(opt)
self.config._generate_optimizer_and_variable_configs(
opt_dict, self._variables_conf
)
def _compile(self, *args):
# Build forward graph
try:
if self._debug:
print(self._shallow_repr() + " start building forward graph.")
assert not self._is_compiled, (
"nn.Graph " + self._name + " has already been compiled."
)
eager_outputs = self._build_forward_graph(*args)
if self._debug:
print(self._shallow_repr() + " end building forward graph.")
except:
print(
"[ERROR]"
+ self._shallow_repr()
+ " build forward graph got error: "
+ sys_exc_error_msg()
)
raise
# Complie and init Runtime
try:
if self._debug:
print(self._shallow_repr() + " start compiling and init graph runtime.")
self._c_nn_graph.complie_and_init_runtime()
if self._debug:
print(self._shallow_repr() + " end compiling and init graph rumtime.")
except:
print(
"[ERROR]"
+ self._shallow_repr()
+ " compiling and initialing graph runtime got error : ",
sys_exc_error_msg(),
)
raise
self._is_compiled = True
return eager_outputs
def _build_forward_graph(self, *args):
session = session_ctx.GetDefaultSession()
assert type(session) is MultiClientSession
self._generate_config_proto()
with graph_build_util.graph_build_context(self.config.proto, session):
# Deal with inputs
arg_op_names, lazy_args, self._args_repr = self._build_io(
"input", graph_build_util.build_graph_input_arg, *args
)
# Deal with parameter and buffer
state_op_names, self._states_tensor_tuple = self._build_states()
# Deal with module in self.build(*args)
outputs = self.build(*lazy_args)
# Deal with outputs
if not (type(outputs) is tuple or type(outputs) is list):
if outputs is None:
outputs = ()
else:
outputs = (outputs,)
output_op_names, self._eager_outputs, self._outs_repr = self._build_io(
"output", graph_build_util.build_graph_output, *outputs
)
self._outputs_tensor_tuple = convert_to_tensor_tuple(
self._flatten_io("output", *self._eager_outputs)
)
self._eager_outputs = list_to_func_return(self._eager_outputs)
# Register input/output/variable to _c_nn_graph
self._c_nn_graph.register_input_op_names(arg_op_names)
self._c_nn_graph.register_output_op_names(output_op_names)
self._c_nn_graph.register_variable_op_names_and_tensors(
state_op_names, self._states_tensor_tuple
)
# Save job proto for debug
self._job_proto = c_api_util.GetCurrentJob()
return self._eager_outputs
def _run(self, *args):
try:
flattened_eager_args = self._flatten_io("input", *args)
# oneflow._oneflow_internal.eager.multi_client.Sync() NOTE(chengcheng): Need Sync?
oneflow._oneflow_internal.nn.graph.RunLazyNNGraph(
convert_to_tensor_tuple(flattened_eager_args),
self._outputs_tensor_tuple,
self._states_tensor_tuple,
self._c_nn_graph,
)
except:
print(
"[ERROR]"
+ self._shallow_repr()
+ " run got error : "
+ sys_exc_error_msg()
)
raise
return self._eager_outputs
def __call__(self, *args):
if not self._is_compiled:
self._compile(*args)
return self._run(*args)
def _build_io(self, io_type, build_func, *args):
assert io_type in ("input", "output")
io_type_upper = io_type.upper()
build_args = []
op_names = []
args_repr = []
def build_tensor_or_none(tensor, name, repr_str):
assert tensor is None or (isinstance(tensor, Tensor))
if isinstance(tensor, Tensor):
build_arg = build_func(name, tensor)
op_names.append(name)
else:
build_arg = None
args_repr.append(repr_str)
if self._debug:
print(repr_str)
return build_arg
for idx, arg in enumerate(args):
if isinstance(arg, Tensor) or arg is None:
if arg is None:
name, repr_str = self._io_item_check_and_gen(
arg, None, io_type, idx
)
else:
name, repr_str = self._io_item_check_and_gen(
arg, Tensor, io_type, idx
)
build_args.append(build_tensor_or_none(arg, name, repr_str))
elif isinstance(arg, (TensorTuple, list)):
if isinstance(arg, TensorTuple):
seq_args = TensorTuple()
else:
seq_args = list()
for i in range(len(arg)):
name, repr_str = self._io_item_check_and_gen(
arg[i], Tensor, io_type, idx, i
)
seq_args.append(build_tensor_or_none(arg[i], name, repr_str))
build_args.append(seq_args)
else:
self._io_item_check_and_gen(arg, Tensor, io_type, idx)
return op_names, build_args, args_repr
def _flatten_io(self, io_type, *args):
assert isinstance(args, tuple)
flattened_args = []
for idx, arg in enumerate(args):
if isinstance(arg, Tensor):
flattened_args.append(arg)
elif isinstance(arg, (TensorTuple, list)):
for i in range(len(arg)):
self._io_item_check(arg[i], Tensor, io_type, idx, i)
flattened_args.append(arg[i])
else:
self._io_item_check(arg, None, io_type, idx)
return flattened_args
def _io_item_check(self, item, expect_type, io_type, idx, second_idx=None):
if expect_type is None and item is None:
return
elif expect_type is not None and isinstance(item, expect_type):
return
else:
assert io_type in ("input", "output")
name = (
"_"
+ self.name
+ "-"
+ io_type
+ "_"
+ str(idx)
+ ("" if second_idx is None else "_" + str(second_idx))
)
repr_str = (
"[ERROR](" + io_type.upper() + ":" + name + ":" + str(type(item)) + ")"
)
print(repr_str)
raise NotImplementedError(
"nn.Graph.build()'s input/output only support types: Tensor/TensorTuple/list(Tensor)/None."
)
def _io_item_check_and_gen(self, item, expect_type, io_type, idx, second_idx=None):
assert io_type in ("input", "output")
name = (
"_"
+ self.name
+ "-"
+ io_type
+ "_"
+ str(idx)
+ ("" if second_idx is None else "_" + str(second_idx))
)
if expect_type is None and item is None:
repr_str = (
"[WARNING]("
+ io_type.upper()
+ ":"
+ name
+ ":"
+ str(type(item))
+ ")"
)
return name, repr_str
elif expect_type is not None and isinstance(item, expect_type):
if isinstance(item, Tensor):
repr_str = (
"(" + io_type.upper() + ":" + name + ":" + item._meta_repr() + ")"
)
else:
repr_str = (
"[WARNING]("
+ io_type.upper()
+ ":"
+ name
+ ":"
+ str(type(item))
+ ")"
)
return name, repr_str
else:
repr_str = (
"[ERROR](" + io_type.upper() + ":" + name + ":" + str(type(item)) + ")"
)
print(repr_str)
raise NotImplementedError(
"nn.Graph.build()'s input/output only support types: Tensor/TensorTuple/list(Tensor)/None."
)
def _build_states(self):
state_op_names = []
state_tensors = []
for state_block in self._state():
op_name = state_block.name_prefix + state_block.name
state_tensor = state_block.origin
state_op_names.append(op_name)
state_tensors.append(state_tensor)
if state_block.type == BlockType.PARAMETER:
state_config = self._variables_conf[state_block.origin]
else:
state_config = None
state_block.set_lazy_origin_builder(
partial(
graph_build_util.build_graph_state,
op_name,
state_tensor,
state_config,
)
)
state_tensor_tuple = convert_to_tensor_tuple(state_tensors)
return state_op_names, state_tensor_tuple
def _add_block(self, name: str, module: Module = None) -> None:
r"""Adds a module to the current graph as a block.
The block can be accessed as an attribute using the given name.
Args:
name (string): name of the child block. The child block can be
accessed from this graph using the given name
module (Module): child module to be added to the graph.
"""
if not isinstance(module, Module) and module is not None:
raise TypeError("{} is not a Module subclass".format(type(module)))
elif not isinstance(name, str):
raise TypeError("module name should be a string. Got {}".format(type(name)))
elif hasattr(self, name) and name not in self._blocks:
raise KeyError("attribute '{}' already exists".format(name))
elif "." in name:
raise KeyError('module name can\'t contain ".", got: {}'.format(name))
elif name == "":
raise KeyError('module name can\'t be empty string ""')
self._blocks[name] = Block("", name, module)
def __setattr__(self, name: str, value=None):
if isinstance(value, Module):
self._add_block(name, value)
elif isinstance(value, Optimizer):
raise AttributeError(
"'{}' object are not allowed to set Optimizer attribute named '{}', "
"please use add_optimizer(...) instead.".format(
type(self).__name__, name
)
)
else:
object.__setattr__(self, name, value)
def __getattr__(self, name: str):
if "_blocks" in self.__dict__:
if name in self._blocks:
return self._blocks[name]
if name in self.__dict__:
return self.__dict__[name]
raise AttributeError(
"'{}' object has no attribute '{}'".format(type(self).__name__, name)
)
def __repr__(self):
child_lines = []
if len(self._args_repr) > 0:
for in_str in self._args_repr:
input_str = add_indent(in_str, 2)
child_lines.append(input_str)
if len(self._blocks) > 0:
for n, m in self._blocks.items():
mod_str = repr(m)
mod_str = add_indent(mod_str, 2)
child_lines.append(mod_str)
if len(self._outs_repr) > 0:
for out_str in self._outs_repr:
output_str = add_indent(out_str, 2)
child_lines.append(output_str)
main_str = self._shallow_repr() + ": ("
if len(child_lines) > 0:
main_str += "\n " + "\n ".join(child_lines) + "\n"
main_str += ")"
return main_str
def _shallow_repr(self):
shallow_repr = "(GRAPH:" + self._name + ":" + self.__class__.__name__ + ")"
return shallow_repr
| 14,335 | 1,899 | 23 |
8a7c3ef46588513a00763c8fbd4bb9d87d5bd15a | 848 | py | Python | src/decoding/bin/decodePixelBased.py | WoutDavid/ST-nextflow-pipeline | 8de3da218ec4f10f183e1163fe782c19fd8dd841 | [
"MIT"
] | null | null | null | src/decoding/bin/decodePixelBased.py | WoutDavid/ST-nextflow-pipeline | 8de3da218ec4f10f183e1163fe782c19fd8dd841 | [
"MIT"
] | null | null | null | src/decoding/bin/decodePixelBased.py | WoutDavid/ST-nextflow-pipeline | 8de3da218ec4f10f183e1163fe782c19fd8dd841 | [
"MIT"
] | null | null | null | import sys
import csv
import re
from modules.pixelBasedDecoding import decodePixelBased
# Input parsing:
# needs to know up front how large the image is going to be
x_dim = int(sys.argv[1])
y_dim = int(sys.argv[2])
# Extract tile nr
tile_nr = sys.argv[3]
tile_nr_int = int(re.findall(r"\d+", tile_nr)[0])
codebook = sys.argv[4]
bit_len = int(sys.argv[5])
threshold = float(sys.argv[6])
# Prefix to be able to sort the images in the correct order
image_prefix= sys.argv[7]
image_path_list = [sys.argv[i] for i in range(8, len(sys.argv))]
# Decode pixelbase
decoded_df = decodePixelBased(x_dim,y_dim, codebook, bit_len, image_path_list,image_prefix,threshold)
# Add an extra rown with tile number to the dataframe
decoded_df['Tile'] = [tile_nr_int for i in range(0,len(decoded_df))]
decoded_df.to_csv(f"decoded_{tile_nr}.csv", index=False)
| 27.354839 | 101 | 0.746462 | import sys
import csv
import re
from modules.pixelBasedDecoding import decodePixelBased
# Input parsing:
# needs to know up front how large the image is going to be
x_dim = int(sys.argv[1])
y_dim = int(sys.argv[2])
# Extract tile nr
tile_nr = sys.argv[3]
tile_nr_int = int(re.findall(r"\d+", tile_nr)[0])
codebook = sys.argv[4]
bit_len = int(sys.argv[5])
threshold = float(sys.argv[6])
# Prefix to be able to sort the images in the correct order
image_prefix= sys.argv[7]
image_path_list = [sys.argv[i] for i in range(8, len(sys.argv))]
# Decode pixelbase
decoded_df = decodePixelBased(x_dim,y_dim, codebook, bit_len, image_path_list,image_prefix,threshold)
# Add an extra rown with tile number to the dataframe
decoded_df['Tile'] = [tile_nr_int for i in range(0,len(decoded_df))]
decoded_df.to_csv(f"decoded_{tile_nr}.csv", index=False)
| 0 | 0 | 0 |
cdbef3b5da47b0cf0e8e70e5d63b840baa5fdd6c | 1,112 | py | Python | book_club/book_club/book/migrations/0001_initial.py | Beshkov/Python-web-fundamentals | 6b0e9cc9725ea80a33c2ebde6e29f2ab585ab8d9 | [
"MIT"
] | null | null | null | book_club/book_club/book/migrations/0001_initial.py | Beshkov/Python-web-fundamentals | 6b0e9cc9725ea80a33c2ebde6e29f2ab585ab8d9 | [
"MIT"
] | null | null | null | book_club/book_club/book/migrations/0001_initial.py | Beshkov/Python-web-fundamentals | 6b0e9cc9725ea80a33c2ebde6e29f2ab585ab8d9 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.6 on 2021-08-08 21:52
from django.db import migrations, models
| 38.344828 | 161 | 0.560252 | # Generated by Django 3.2.6 on 2021-08-08 21:52
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Book',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=35)),
('author', models.CharField(max_length=30)),
('genre', models.CharField(choices=[('fiction', 'Fiction'), ('non-fiction', 'Non-fiction')], default='None', max_length=11)),
('length', models.CharField(max_length=15)),
('emotional_value', models.TextField(max_length=255)),
('review', models.TextField(max_length=1500)),
('mark', models.CharField(choices=[('1', 'dislike'), ('2', 'did not like'), ('3', 'neutral'), ('4', 'liked'), ('5', 'loved it')], max_length=1)),
('private', models.BooleanField(default=False)),
],
),
]
| 0 | 998 | 23 |
1bb4c5017f9be7bdefa32170d540c123ac7888b3 | 1,209 | py | Python | neural_heap/dataset/io_synthesis/io_synthesis.py | brandontrabucco/neural-heap | 57a4823370c8e6b6c38562ebf3ca11489441a94b | [
"MIT"
] | null | null | null | neural_heap/dataset/io_synthesis/io_synthesis.py | brandontrabucco/neural-heap | 57a4823370c8e6b6c38562ebf3ca11489441a94b | [
"MIT"
] | null | null | null | neural_heap/dataset/io_synthesis/io_synthesis.py | brandontrabucco/neural-heap | 57a4823370c8e6b6c38562ebf3ca11489441a94b | [
"MIT"
] | null | null | null | import os, sys
os.chdir("G:\\My Drive\\Academic\\Research\\Neural Heap")
from neural_heap.dataset.io_synthesis.io_synthesis_args import IOSynthesisArgs
from neural_heap.dataset.io_synthesis.io_synthesis_utils import IOSynthesisUtils
| 30.225 | 81 | 0.594706 | import os, sys
os.chdir("G:\\My Drive\\Academic\\Research\\Neural Heap")
from neural_heap.dataset.io_synthesis.io_synthesis_args import IOSynthesisArgs
from neural_heap.dataset.io_synthesis.io_synthesis_utils import IOSynthesisUtils
class IOSynthesis(object):
def __init__(
self,
parser=None):
self.io_synthesis_args = IOSynthesisArgs(parser=parser)
self.io_synthesis_utils = IOSynthesisUtils()
def get_train_dataset(
self,
args):
return self.io_synthesis_utils.get_dataset(
args.range,
args.length,
args.train_instances)
def get_val_dataset(
self,
args):
return self.io_synthesis_utils.get_dataset(
args.range,
args.length,
args.val_instances)
def __call__(
self,
args=None):
if args is None:
args = self.io_synthesis_args.get_parser().parse_args()
train_dataset = self.get_train_dataset(
args)
val_dataset = self.get_val_dataset(
args)
return train_dataset, val_dataset
| 818 | 5 | 141 |
4f28caf061d0d3e8e16222acca82b34d0343937c | 2,589 | py | Python | SilverBash/Adam_Teal_Bash/algod_app.py | preciousM494/TEAL | 7c4b6ee8b50842c568db5c984b7896219304a71a | [
"Apache-2.0"
] | 3 | 2022-01-20T14:08:29.000Z | 2022-01-30T11:08:09.000Z | SilverBash/Adam_Teal_Bash/algod_app.py | preciousM494/TEAL | 7c4b6ee8b50842c568db5c984b7896219304a71a | [
"Apache-2.0"
] | 7 | 2022-01-09T03:14:46.000Z | 2022-01-27T22:51:51.000Z | SilverBash/Adam_Teal_Bash/algod_app.py | preciousM494/TEAL | 7c4b6ee8b50842c568db5c984b7896219304a71a | [
"Apache-2.0"
] | 26 | 2022-01-09T02:48:42.000Z | 2022-01-26T15:02:51.000Z | from pyteal import *
from algosdk.v2client import algod
from algosdk import account, mnemonic
import pyteal
# from deploy import PytealDeploy
algod_token = 'B3SU4KcVKi94Jap2VXkK83xx38bsv95K5UZm2lab'
algod_addres = "http://testnet-algorand.api.purestake.io/ps2"
purestack_token = {
"X-Api-Key": algod_token
}
tmpl_fee = Int(1000)
tmpl_period = Int(50)
tmpl_dur = Int(5000)
tmpl_lease = Bytes("base64", "023sdDE2")
tmpl_amt = Int(2000)
tmpl_rcv = Addr("6ZHGHH5Z5CTPCF5WCESXMGRSVK7QJETR63M3NY5FJCUYDHO57VTCMJOBGY")
tmpl_timeout = Int(30000)
| 30.821429 | 110 | 0.65392 | from pyteal import *
from algosdk.v2client import algod
from algosdk import account, mnemonic
import pyteal
# from deploy import PytealDeploy
algod_token = 'B3SU4KcVKi94Jap2VXkK83xx38bsv95K5UZm2lab'
algod_addres = "http://testnet-algorand.api.purestake.io/ps2"
purestack_token = {
"X-Api-Key": algod_token
}
tmpl_fee = Int(1000)
tmpl_period = Int(50)
tmpl_dur = Int(5000)
tmpl_lease = Bytes("base64", "023sdDE2")
tmpl_amt = Int(2000)
tmpl_rcv = Addr("6ZHGHH5Z5CTPCF5WCESXMGRSVK7QJETR63M3NY5FJCUYDHO57VTCMJOBGY")
tmpl_timeout = Int(30000)
def periodic_payment(
tmpl_fee=tmpl_fee,
tmpl_period=tmpl_period,
tmpl_dur=tmpl_dur,
tmpl_lease=tmpl_lease,
tmpl_amt=tmpl_amt,
tmpl_rcv=tmpl_rcv,
tmpl_timeout=tmpl_timeout,
):
periodic_pay_core = And(
Txn.type_enum() == TxnType.Payment,
Txn.fee() < tmpl_fee,
Txn.first_valid() % tmpl_period == Int(0),
Txn.last_valid() == tmpl_dur + Txn.first_valid(),
Txn.lease() == tmpl_lease,
)
periodic_pay_transfer = And(
Txn.close_remainder_to() == Global.zero_address(),
Txn.rekey_to() == Global.zero_address(),
Txn.receiver() == tmpl_rcv,
Txn.amount() == tmpl_amt,
)
periodic_pay_close = And(
Txn.close_remainder_to() == tmpl_rcv,
Txn.rekey_to() == Global.zero_address(),
Txn.receiver() == Global.zero_address(),
Txn.first_valid() == tmpl_timeout,
Txn.amount() == Int(0),
)
periodic_pay_escrow = periodic_pay_core.And(
periodic_pay_transfer.Or(periodic_pay_close)
)
return compileTeal(periodic_pay_escrow, mode=Mode.Signature, version=2)
def run_algod(amount, rounds, time, account):
algod_client = algod.AlgodClient(algod_token, algod_addres, headers=purestack_token)
print('Writing contract to periodic.teal...')
amount = Int(int(amount))
time = Int(int(time) * 1000) # Multiply by 1000 because it is in milliseconds
rounds = Int(int(rounds))
address = Addr(account['address'])
with open('periodic.teal','w') as f:
program_teal = periodic_payment(tmpl_amt=amount, tmpl_dur=time, tmpl_period=rounds, tmpl_rcv=address)
f.write(program_teal)
print('Finished writing!')
# deploy = PytealDeploy()
# # compile program to binary
# deploy.compile_program(algod_client, program_teal)
# print("--------------------------")
# print("Depoying application......")
# app_id = deploy.create_app(algod_client)
| 1,963 | 0 | 49 |
a8494f2b5e36e61c41ba5193984ac5e32316d1a8 | 8,056 | py | Python | murano_tempest_tests/tests/api/application_catalog/test_repository.py | zhur0ng/murano-tempest-plugin | c70cda4dc7b8208252e9741a96acba9fb6a5c6e9 | [
"Apache-2.0"
] | 6 | 2017-10-31T10:37:17.000Z | 2019-01-28T22:05:05.000Z | murano_tempest_tests/tests/api/application_catalog/test_repository.py | zhur0ng/murano-tempest-plugin | c70cda4dc7b8208252e9741a96acba9fb6a5c6e9 | [
"Apache-2.0"
] | 1 | 2018-08-20T07:39:23.000Z | 2018-08-20T07:39:23.000Z | murano_tempest_tests/tests/api/application_catalog/test_repository.py | zhur0ng/murano-tempest-plugin | c70cda4dc7b8208252e9741a96acba9fb6a5c6e9 | [
"Apache-2.0"
] | 2 | 2018-01-11T05:08:35.000Z | 2018-08-20T07:32:33.000Z | # Copyright (c) 2016 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from tempest import config
from tempest.lib import decorators
from murano_tempest_tests.tests.api.application_catalog import base
from murano_tempest_tests import utils
CONF = config.CONF
| 36.288288 | 78 | 0.617925 | # Copyright (c) 2016 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from tempest import config
from tempest.lib import decorators
from murano_tempest_tests.tests.api.application_catalog import base
from murano_tempest_tests import utils
CONF = config.CONF
class TestRepositorySanity(base.BaseApplicationCatalogTest):
@classmethod
def resource_setup(cls):
if CONF.application_catalog.glare_backend:
msg = ("Murano using GLARE backend. "
"Repository tests will be skipped.")
raise cls.skipException(msg)
super(TestRepositorySanity, cls).resource_setup()
@decorators.attr(type='smoke')
@decorators.idempotent_id('d0f3ad6c-70b4-4ce0-90c5-e7afb20ace80')
def test_get_list_packages(self):
package_list = self.application_catalog_client.get_list_packages()
self.assertIsInstance(package_list, list)
@decorators.attr(type='smoke')
@decorators.idempotent_id('53f679d9-955f-4dc1-8cdc-1fcdcfbb07a5')
def test_upload_and_delete_package(self):
application_name = utils.generate_name('package_test_upload')
abs_archive_path, dir_with_archive, archive_name = \
utils.prepare_package(application_name)
self.addCleanup(os.remove, abs_archive_path)
package = self.application_catalog_client.upload_package(
application_name, archive_name, dir_with_archive,
{"categories": [], "tags": [], 'is_public': False})
package_list = self.application_catalog_client.get_list_packages()
self.assertIn(package['id'], {pkg['id'] for pkg in package_list})
self.application_catalog_client.delete_package(package['id'])
package_list = self.application_catalog_client.get_list_packages()
self.assertNotIn(package['id'], {pkg['id'] for pkg in package_list})
class TestRepository(base.BaseApplicationCatalogIsolatedAdminTest):
@classmethod
def resource_setup(cls):
if CONF.application_catalog.glare_backend:
msg = ("Murano using GLARE backend. "
"Repository tests will be skipped.")
raise cls.skipException(msg)
super(TestRepository, cls).resource_setup()
application_name = utils.generate_name('test_repository_class')
cls.abs_archive_path, dir_with_archive, archive_name = \
utils.prepare_package(application_name)
cls.package = cls.application_catalog_client.upload_package(
application_name, archive_name, dir_with_archive,
{"categories": [], "tags": [], 'is_public': False})
@classmethod
def resource_cleanup(cls):
os.remove(cls.abs_archive_path)
cls.application_catalog_client.delete_package(cls.package['id'])
super(TestRepository, cls).resource_cleanup()
@decorators.idempotent_id('5ea58ef1-1a63-403d-a57a-ef4423202993')
def test_get_package(self):
package = self.application_catalog_client.get_package(
self.package['id'])
self.assertEqual(self.package['tags'], package['tags'])
@decorators.attr(type='smoke')
@decorators.idempotent_id('daf5694d-abbf-4ab1-a6df-99540d0efc70')
def test_update_package(self):
post_body = [
{
"op": "add",
"path": "/tags",
"value": ["im a test"]
}
]
result = self.application_catalog_client.update_package(
self.package['id'], post_body)
self.assertIn("im a test", result['tags'])
post_body = [
{
"op": "replace",
"path": "/tags",
"value": ["im bad:D"]
}
]
result = self.application_catalog_client.update_package(
self.package['id'], post_body)
self.assertNotIn("im a test", result['tags'])
self.assertIn("im bad:D", result['tags'])
post_body = [
{
"op": "remove",
"path": "/tags",
"value": ["im bad:D"]
}
]
result = self.application_catalog_client.update_package(
self.package['id'], post_body)
self.assertNotIn("im bad:D", result['tags'])
post_body = [
{
"op": "replace",
"path": "/is_public",
"value": True
}
]
result = self.application_catalog_client.update_package(
self.package['id'], post_body)
self.assertTrue(result['is_public'])
post_body = [
{
"op": "replace",
"path": "/enabled",
"value": True
}
]
result = self.application_catalog_client.update_package(
self.package['id'], post_body)
self.assertTrue(result['enabled'])
post_body = [
{
"op": "replace",
"path": "/description",
"value": "New description"
}
]
result = self.application_catalog_client.update_package(
self.package['id'], post_body)
self.assertEqual("New description", result['description'])
post_body = [
{
"op": "replace",
"path": "/name",
"value": "New name"
}
]
result = self.application_catalog_client.update_package(
self.package['id'], post_body)
self.assertEqual("New name", result['name'])
@decorators.idempotent_id('fe4711ba-d1ee-4291-8a48-f8efcbd480ab')
def test_download_package(self):
self.application_catalog_client.download_package(self.package['id'])
@decorators.attr(type='smoke')
@decorators.idempotent_id('9e55ae34-dea4-4db5-be4a-b3b793c9c4a7')
def test_publicize_package(self):
# Given a package that isn't public
application_name = utils.generate_name('test_publicize_package')
abs_archive_path, dir_with_archive, archive_name = \
utils.prepare_package(application_name)
self.addCleanup(os.remove, abs_archive_path)
package = self.application_catalog_client.upload_package(
application_name, archive_name, dir_with_archive,
{"categories": [], "tags": [], 'is_public': False})
self.addCleanup(self.application_catalog_client.delete_package,
package['id'])
fetched_package = self.application_catalog_client.get_package(
package['id'])
self.assertFalse(fetched_package['is_public'])
# When package is publicized
post_body = [
{
"op": "replace",
"path": "/is_public",
"value": True
}
]
self.application_catalog_client.update_package(package['id'],
post_body)
# Then package becomes public
fetched_package = self.application_catalog_client.get_package(
package['id'])
self.assertTrue(fetched_package['is_public'])
@decorators.idempotent_id('1c017c1b-9efc-4498-95ff-833a9ce565a0')
def test_get_ui_definitions(self):
self.application_catalog_client.get_ui_definition(self.package['id'])
@decorators.idempotent_id('9f5ee28a-cec7-4d8b-a0fd-affbfceb0fc2')
def test_get_logo(self):
self.application_catalog_client.get_logo(self.package['id'])
| 6,060 | 1,133 | 46 |
ad1df4e9b490e6ef1278f8134c76f77a5ef9f80f | 1,210 | py | Python | setup.py | chaselgrove/hypothesis | 07a9c436208ec62b6d8b33c1cb4305a67baff8c6 | [
"BSD-2-Clause"
] | 6 | 2018-11-11T02:10:56.000Z | 2022-02-07T10:21:25.000Z | setup.py | chaselgrove/hypothesis | 07a9c436208ec62b6d8b33c1cb4305a67baff8c6 | [
"BSD-2-Clause"
] | 2 | 2018-08-10T02:32:29.000Z | 2018-08-21T15:34:13.000Z | setup.py | chaselgrove/hypothesis | 07a9c436208ec62b6d8b33c1cb4305a67baff8c6 | [
"BSD-2-Clause"
] | 2 | 2018-07-24T03:31:32.000Z | 2018-08-10T02:59:09.000Z | #!/usr/bin/python
# See file COPYING distributed with python-hypothesis for copyright and
# license.
from setuptools import setup
long_description = open('README.rst').read()
setup(name='python-hypothesis',
version='0.4.2',
description='Python library for the Hypothes.is API',
author='Christian Haselgrove',
author_email='christian.haselgrove@umassmed.edu',
url='https://github.com/chaselgrove/python-hypothesis',
packages=['h_annot'],
scripts=[],
install_requires=['requests',
'python-dateutil',
'six'],
classifiers=['Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Software Development :: Libraries'],
license='BSD license',
long_description=long_description
)
# eof
| 34.571429 | 72 | 0.563636 | #!/usr/bin/python
# See file COPYING distributed with python-hypothesis for copyright and
# license.
from setuptools import setup
long_description = open('README.rst').read()
setup(name='python-hypothesis',
version='0.4.2',
description='Python library for the Hypothes.is API',
author='Christian Haselgrove',
author_email='christian.haselgrove@umassmed.edu',
url='https://github.com/chaselgrove/python-hypothesis',
packages=['h_annot'],
scripts=[],
install_requires=['requests',
'python-dateutil',
'six'],
classifiers=['Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Software Development :: Libraries'],
license='BSD license',
long_description=long_description
)
# eof
| 0 | 0 | 0 |
513206d0eb2312c92c22e659d7d284bcc14bff04 | 481 | py | Python | showminder/frontend/migrations/0008_auto_20180422_1431.py | chassing/showminder | 136ef3f1a41fa3aa45574770dd5751b0b7b9edd3 | [
"MIT"
] | 1 | 2017-01-26T09:12:55.000Z | 2017-01-26T09:12:55.000Z | showminder/frontend/migrations/0008_auto_20180422_1431.py | chassing/showminder | 136ef3f1a41fa3aa45574770dd5751b0b7b9edd3 | [
"MIT"
] | null | null | null | showminder/frontend/migrations/0008_auto_20180422_1431.py | chassing/showminder | 136ef3f1a41fa3aa45574770dd5751b0b7b9edd3 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2018-04-22 14:31
from __future__ import unicode_literals
from django.db import migrations, models
| 22.904762 | 74 | 0.627859 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2018-04-22 14:31
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('frontend', '0007_auto_20170505_1441'),
]
operations = [
migrations.AlterField(
model_name='tvshow',
name='trailer_url',
field=models.URLField(blank=True, max_length=2023, null=True),
),
]
| 0 | 302 | 23 |
4c494c9bae4f69ee732530ae8c366dc42ff79e88 | 2,028 | py | Python | python/oneflow/framework/docstr/index_select.py | felixhao28/oneflow | e558af6ef6c4ed90e4abc7bc1ba895f55795626d | [
"Apache-2.0"
] | null | null | null | python/oneflow/framework/docstr/index_select.py | felixhao28/oneflow | e558af6ef6c4ed90e4abc7bc1ba895f55795626d | [
"Apache-2.0"
] | null | null | null | python/oneflow/framework/docstr/index_select.py | felixhao28/oneflow | e558af6ef6c4ed90e4abc7bc1ba895f55795626d | [
"Apache-2.0"
] | null | null | null | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow
from oneflow.framework.docstr.utils import add_docstr
add_docstr(
oneflow.index_select,
"""
input.index_select(dim, index) -> Tensor
The interface is consistent with PyTorch.
The documentation is referenced from: https://pytorch-cn.readthedocs.io/zh/latest/package_references/torch/#torchindex_select
Select values along an axis specified by `dim`.
:attr:`index` must be an Int32 Tensor with 1-D.
:attr:`dim` must be in the range of input Dimensions.
value of :attr:`index` must be in the range of the dim-th of input.
Note that ``input`` and ``index`` do not broadcast against each other.
Args:
input (Tensor): the source tensor
dim (int): the axis along which to index
index (Tensor): the 1-D tensor containing the indices to index
For example:
.. code-block:: python
>>> import oneflow as flow
>>> input = flow.tensor([[1,2,3],[4,5,6]], dtype=flow.int32)
>>> input
tensor([[1, 2, 3],
[4, 5, 6]], dtype=oneflow.int32)
>>> index = flow.tensor([0,1], dtype=flow.int32)
>>> output = flow.index_select(input, 1, index)
>>> output
tensor([[1, 2],
[4, 5]], dtype=oneflow.int32)
>>> output = input.index_select(1, index)
>>> output
tensor([[1, 2],
[4, 5]], dtype=oneflow.int32)
""",
)
| 34.372881 | 129 | 0.649408 | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow
from oneflow.framework.docstr.utils import add_docstr
add_docstr(
oneflow.index_select,
"""
input.index_select(dim, index) -> Tensor
The interface is consistent with PyTorch.
The documentation is referenced from: https://pytorch-cn.readthedocs.io/zh/latest/package_references/torch/#torchindex_select
Select values along an axis specified by `dim`.
:attr:`index` must be an Int32 Tensor with 1-D.
:attr:`dim` must be in the range of input Dimensions.
value of :attr:`index` must be in the range of the dim-th of input.
Note that ``input`` and ``index`` do not broadcast against each other.
Args:
input (Tensor): the source tensor
dim (int): the axis along which to index
index (Tensor): the 1-D tensor containing the indices to index
For example:
.. code-block:: python
>>> import oneflow as flow
>>> input = flow.tensor([[1,2,3],[4,5,6]], dtype=flow.int32)
>>> input
tensor([[1, 2, 3],
[4, 5, 6]], dtype=oneflow.int32)
>>> index = flow.tensor([0,1], dtype=flow.int32)
>>> output = flow.index_select(input, 1, index)
>>> output
tensor([[1, 2],
[4, 5]], dtype=oneflow.int32)
>>> output = input.index_select(1, index)
>>> output
tensor([[1, 2],
[4, 5]], dtype=oneflow.int32)
""",
)
| 0 | 0 | 0 |
66a99117c2241962a9cb4fee444ebd3e46796813 | 186 | py | Python | HomeWorks/Rostik/string.py | PhantomMind/FamousTeamDream | 1268950a2f18236f0fda364a5484b478280f39f7 | [
"Apache-2.0"
] | null | null | null | HomeWorks/Rostik/string.py | PhantomMind/FamousTeamDream | 1268950a2f18236f0fda364a5484b478280f39f7 | [
"Apache-2.0"
] | null | null | null | HomeWorks/Rostik/string.py | PhantomMind/FamousTeamDream | 1268950a2f18236f0fda364a5484b478280f39f7 | [
"Apache-2.0"
] | null | null | null | someList = ['470 градусов по фаренгейту', 'Сталкер', 'Наруто', 'Форсаж', 'Оно', 'Оно2', 'Смешарики', 'Лунтик']
print(someList)
someList.append('frdfvty')
del someList[8]
print(someList)
| 31 | 110 | 0.704301 | someList = ['470 градусов по фаренгейту', 'Сталкер', 'Наруто', 'Форсаж', 'Оно', 'Оно2', 'Смешарики', 'Лунтик']
print(someList)
someList.append('frdfvty')
del someList[8]
print(someList)
| 0 | 0 | 0 |
50fda266a3d1133cdac7672b09442bedfca50aed | 3,136 | py | Python | scraper/modules/pgte_patreon.py | iburinoc/kindle_scraper | 0929b0aaa0ce47cbdeb845bb7724fb5390ca2e3e | [
"MIT"
] | null | null | null | scraper/modules/pgte_patreon.py | iburinoc/kindle_scraper | 0929b0aaa0ce47cbdeb845bb7724fb5390ca2e3e | [
"MIT"
] | 3 | 2020-06-06T16:16:30.000Z | 2021-04-30T20:47:07.000Z | scraper/modules/pgte_patreon.py | iburinoc/kindle_scraper | 0929b0aaa0ce47cbdeb845bb7724fb5390ca2e3e | [
"MIT"
] | 2 | 2019-06-22T22:40:04.000Z | 2020-06-04T22:28:32.000Z | import asyncio
from aiogoogle import Aiogoogle, GoogleAPI
from aiogoogle.auth.creds import ClientCreds, UserCreds
import base64
import bs4
import json
import scraper.util
import scraper.flags
AUTHOR = "Erratic Errata"
| 29.308411 | 99 | 0.639031 | import asyncio
from aiogoogle import Aiogoogle, GoogleAPI
from aiogoogle.auth.creds import ClientCreds, UserCreds
import base64
import bs4
import json
import scraper.util
import scraper.flags
AUTHOR = "Erratic Errata"
def _get_user_creds(creds):
return UserCreds(
access_token=creds.token,
refresh_token=creds.refresh_token,
expires_at=creds.expiry.isoformat(),
scopes=creds.scopes,
)
def _get_client_creds(creds):
return ClientCreds(
client_id=creds.client_id,
client_secret=creds.client_secret,
scopes=creds.scopes,
)
async def _get_email_ids(session, gmail):
def _get_query():
queries = [
'subject:"ErraticErrata just shared"',
"newer_than:14d",
]
return " ".join(queries)
messages = []
pageToken = None
while True:
response = await session.as_user(
gmail.users.messages.list(userId="me", q=_get_query(), pageToken=pageToken)
)
if "messages" not in response:
break
messages.extend(response["messages"])
if "nextPageToken" not in response:
break
pageToken = response["nextPageToken"]
return [message["id"] for message in messages]
async def _get_email(session, gmail, key):
response = await session.as_user(gmail.users.messages.get(userId="me", id=key))
ts = int(response["internalDate"])
subject = [
hdr["value"]
for hdr in response["payload"]["headers"]
if hdr["name"] == "Subject"
][0]
# part 1 is the html one
b64 = response["payload"]["parts"][1]["body"]["data"]
html = base64.urlsafe_b64decode(b64).decode("utf-8")
soup = bs4.BeautifulSoup(html, features="lxml")
title_node = [ x for x in soup.find_all('span', color='dark') if len(list(x.children)) == 1][0]
title = title_node.contents[0]
article = list(title_node.parent.parent.parent.find_all('div', recursive=False))[1]
article = str(article.decode_contents())
return (title, article, ts)
async def _get_after(creds, gmail, timestamp):
async with Aiogoogle(
user_creds=_get_user_creds(creds), client_creds=_get_client_creds(creds)
) as session:
print(f"Getting emails")
ids = await _get_email_ids(session, gmail)
print(f"Found {len(ids)} ids")
emails = await asyncio.gather(*(_get_email(session, gmail, key) for key in ids))
print(f"Scraped {len(emails)} emails")
filtered = [email for email in emails if email[2] > timestamp]
print(f"Found {len(filtered)} new emails")
return filtered
def scrape(state, creds):
timestamp = state.get("timestamp", 0)
discovery = json.loads(open(scraper.flags.get_flags().gmail_discovery, 'rb').read())
gmail = GoogleAPI(discovery)
emails = asyncio.run(_get_after(creds, gmail, timestamp))
return (
[
(subject, scraper.util.format_chapter(subject, html, AUTHOR))
for (subject, html, _) in emails
],
{"timestamp": max(0, timestamp, *[ts for (_, _, ts) in emails])},
)
| 2,778 | 0 | 138 |
d223692d0a1aa94547a58cfe2d937308586051ab | 97 | py | Python | invokers/python/tests/functions/http/default/func.py | andrew-su/function-buildpacks-for-knative | dcb9a8c1e07a6288dbc096e2f5270eb5e16a625a | [
"BSD-2-Clause"
] | 19 | 2021-11-03T15:02:24.000Z | 2022-03-23T04:33:56.000Z | invokers/python/tests/functions/http/default/func.py | andrew-su/function-buildpacks-for-knative | dcb9a8c1e07a6288dbc096e2f5270eb5e16a625a | [
"BSD-2-Clause"
] | 36 | 2021-11-05T14:33:37.000Z | 2022-03-24T20:13:40.000Z | invokers/python/tests/functions/http/default/func.py | andrew-su/function-buildpacks-for-knative | dcb9a8c1e07a6288dbc096e2f5270eb5e16a625a | [
"BSD-2-Clause"
] | 4 | 2021-11-16T08:27:58.000Z | 2022-02-03T02:58:24.000Z | # Copyright 2021-2022 VMware, Inc.
# SPDX-License-Identifier: BSD-2-Clause
| 16.166667 | 39 | 0.701031 | # Copyright 2021-2022 VMware, Inc.
# SPDX-License-Identifier: BSD-2-Clause
def main():
pass
| -1 | 0 | 23 |
48e14c33ef3c486ff575304d354ffdceab4264fd | 5,193 | py | Python | birds/test_storage.py | pitkali/birds-api | 2c37995898abe1bbe0dab3307af11bb508ec4cca | [
"Apache-2.0"
] | null | null | null | birds/test_storage.py | pitkali/birds-api | 2c37995898abe1bbe0dab3307af11bb508ec4cca | [
"Apache-2.0"
] | null | null | null | birds/test_storage.py | pitkali/birds-api | 2c37995898abe1bbe0dab3307af11bb508ec4cca | [
"Apache-2.0"
] | null | null | null | """Storage engine sanity tests"""
from pymongo import MongoClient
import copy
import unittest
import time
from . import storage
ITEM_VISIBLE = {"key" : "valueA", storage.VISIBLE_KEY : True}
ITEM_HIDDEN = {"key" : "valueB"}
def is_same_dictionary(a, b):
"""Shallow dictionary comparison"""
keysA = set(a.keys())
keysB = set(b.keys())
sharedKeys = keysA & keysB
if len(keysA) != len(keysB) or len(sharedKeys) != len(keysB):
return False
for k, v in a.items():
if b[k] != v:
return False
return True
class StorageTest(unittest.TestCase):
"""Tests the storage engine implementation for sanity."""
def list(self):
"""Wraps storage listing returning list."""
return list(self.storage.list())
MONGO_TEST_COLLECTION = "storage_test"
# Override test loading to skip test from storage test base class
# without skipping them in the subclasses.
| 31.472727 | 79 | 0.651261 | """Storage engine sanity tests"""
from pymongo import MongoClient
import copy
import unittest
import time
from . import storage
ITEM_VISIBLE = {"key" : "valueA", storage.VISIBLE_KEY : True}
ITEM_HIDDEN = {"key" : "valueB"}
def is_same_dictionary(a, b):
"""Shallow dictionary comparison"""
keysA = set(a.keys())
keysB = set(b.keys())
sharedKeys = keysA & keysB
if len(keysA) != len(keysB) or len(sharedKeys) != len(keysB):
return False
for k, v in a.items():
if b[k] != v:
return False
return True
class ComparisonTest(unittest.TestCase):
def test_same(self):
self.assertTrue(is_same_dictionary(ITEM_VISIBLE, ITEM_VISIBLE))
self.assertTrue(is_same_dictionary(ITEM_HIDDEN, ITEM_HIDDEN))
def test_looks_same(self):
self.assertTrue(is_same_dictionary({"key": "valueA"},
{"key": "valueA"}))
def test_different_key_count(self):
self.assertFalse(is_same_dictionary(ITEM_VISIBLE, ITEM_HIDDEN))
self.assertFalse(is_same_dictionary(ITEM_HIDDEN, ITEM_VISIBLE))
def test_different_keys(self):
self.assertFalse(is_same_dictionary({"keyA" : "value"},
{"keyB" : "value"}))
def test_different_values(self):
self.assertFalse(is_same_dictionary({"key" : "valueA"},
{"key" : "valueB"}))
def visible_item():
return copy.deepcopy(ITEM_VISIBLE)
def hidden_item():
return copy.deepcopy(ITEM_HIDDEN)
class AddFieldsTest(unittest.TestCase):
def test_missing_both(self):
item = hidden_item()
storage.add_default_fields(item)
self.assertIn(storage.VISIBLE_KEY, item)
self.assertIn(storage.ADDED_KEY, item)
self.assertFalse(item[storage.VISIBLE_KEY])
def test_missing_date(self):
item = visible_item()
storage.add_default_fields(item)
self.assertIn(storage.VISIBLE_KEY, item)
self.assertIn(storage.ADDED_KEY, item)
self.assertTrue(item[storage.VISIBLE_KEY])
def test_date_format(self):
item = visible_item()
storage.add_default_fields(item)
time.strptime(item[storage.ADDED_KEY],
"%Y-%m-%d")
class StorageTest(unittest.TestCase):
"""Tests the storage engine implementation for sanity."""
def list(self):
"""Wraps storage listing returning list."""
return list(self.storage.list())
def test_store_retrieve(self):
item = visible_item()
self.assertTrue(is_same_dictionary(
self.storage.retrieve(self.storage.store(item)),
item))
def test_stringified_id(self):
item = visible_item()
item_id = str(self.storage.store(item))
self.assertTrue(is_same_dictionary(
self.storage.retrieve(item_id), item))
self.assertTrue(self.storage.remove(item_id))
def test_store_remove(self):
self.assertEqual(len(self.list()), 0)
item_id = self.storage.store(visible_item())
self.assertEqual(len(self.list()), 1)
self.assertTrue(self.storage.remove(item_id))
self.assertIsNone(self.storage.retrieve(item_id))
self.assertEqual(len(self.list()), 0)
def test_store_hidden(self):
item = hidden_item()
self.assertEqual(len(self.list()), 0)
item_id = self.storage.store(item)
self.assertEqual(len(self.list()), 0)
self.assertTrue(
is_same_dictionary(self.storage.retrieve(item_id), item))
def test_list(self):
self.storage.store(hidden_item())
visible_item_id = self.storage.store(visible_item())
l = self.list()
self.assertEqual(visible_item_id, l[0])
def test_retrieve_missing(self):
self.assertIsNone(self.storage.retrieve("I'm so random"))
def test_remove_missing(self):
self.assertFalse(
self.storage.remove("These are not the droids you're looking for"))
class MemoryStorageTest(StorageTest):
def setUp(self):
self.storage = storage.MemoryStorage()
MONGO_TEST_COLLECTION = "storage_test"
class MongoStorageTest(StorageTest):
@classmethod
def setUpClass(cls):
# Wow, such default instance, much evil.
cls.client = MongoClient()
cls.db = cls.client.test_db
# Make sure the first test starts with no collection
cls.db.drop_collection(MONGO_TEST_COLLECTION)
@classmethod
def tearDownClass(cls):
cls.client.close()
def setUp(self):
self.storage = storage.MongoStorage(
MongoStorageTest.db[MONGO_TEST_COLLECTION])
def tearDown(self):
MongoStorageTest.db.drop_collection(MONGO_TEST_COLLECTION)
# Override test loading to skip test from storage test base class
# without skipping them in the subclasses.
def load_tests(loader, tests, pattern):
suite = unittest.TestSuite()
for test_class in (AddFieldsTest, ComparisonTest,
MemoryStorageTest, MongoStorageTest):
tests = loader.loadTestsFromTestCase(test_class)
suite.addTests(tests)
return suite
| 3,464 | 209 | 589 |
503cae65e11fcbedcf30da20c844d8092c3b4bda | 3,225 | py | Python | models/utils/gcn.py | mhnaufal/parking-space-occupancy | ecc17478838506601b526b7488b343f14e2637e5 | [
"MIT"
] | 6 | 2021-07-29T04:15:15.000Z | 2022-01-12T07:18:14.000Z | models/utils/gcn.py | mhnaufal/parking-space-occupancy | ecc17478838506601b526b7488b343f14e2637e5 | [
"MIT"
] | null | null | null | models/utils/gcn.py | mhnaufal/parking-space-occupancy | ecc17478838506601b526b7488b343f14e2637e5 | [
"MIT"
] | 4 | 2021-07-27T10:04:33.000Z | 2021-11-27T20:28:35.000Z | import torch
import copy
from torch import nn
from torch.nn import functional as F
from torch.nn.modules.container import ModuleList
class NeighborAttention(nn.Module):
"""
A graph-based attention replacement.
"""
class NeighborEncoderLayer(nn.Module):
"""
Copy-paste of torch.nn.TransformerEncoderLayer but
uses 'NeighborAttention' instead of the regural
torch.nn.MultiheadAttention.
"""
class SimpleNeighborEncoderLayer(nn.Module):
"""
Copy-paste of torch.nn.TransformerEncoderLayer but
uses 'NeighborAttention' instead of the regural
torch.nn.MultiheadAttention.
"""
class GCN(nn.Module):
"""
NeighborEncoder is a stack of NeighborEncoderLayers.
"""
| 33.947368 | 100 | 0.622326 | import torch
import copy
from torch import nn
from torch.nn import functional as F
from torch.nn.modules.container import ModuleList
class NeighborAttention(nn.Module):
"""
A graph-based attention replacement.
"""
def __init__(self, n, d_model):
super().__init__()
self.n = n
self.linear = nn.Linear((n+1)*d_model, d_model) # [(n+1)*L -> L]
def forward(self, features, rois):
# compute the center of each roi
roi_centers = rois.mean(1)
# for each roi, find the n nearest rois (+ self)
dx = roi_centers[None, :, 0] - roi_centers[:, 0, None] # [L, L]
dy = roi_centers[None, :, 1] - roi_centers[:, 1, None] # [L, L]
d = torch.sqrt(dx**2 + dy**2) # [L, L]
idx = torch.argsort(d, 1)[:, :(self.n+1)] # [L, n+1]
# for each roi, 'attend' to self and the n nearest rois
features = features[idx, :] # [L, n+1, C]
features = features.permute(0, 2, 1) # [L, C, n+1]
features = features.flatten(1) # [L, C*(n+1)]
features = self.linear(features) # [L, c]
features = F.relu(features) # [L, c]
return features
class NeighborEncoderLayer(nn.Module):
"""
Copy-paste of torch.nn.TransformerEncoderLayer but
uses 'NeighborAttention' instead of the regural
torch.nn.MultiheadAttention.
"""
def __init__(self, n=2, d_model=256, dim_feedforward=2048, dropout=1e-3):
super().__init__()
self.attn = NeighborAttention(n, d_model)
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
def forward(self, features, rois):
features2 = self.attn(features, rois)
features = features + self.dropout1(features2)
features = self.norm1(features)
features2 = self.linear2(self.dropout(F.relu(self.linear1(features))))
features = features + self.dropout2(features2)
features = self.norm2(features)
return features
class SimpleNeighborEncoderLayer(nn.Module):
"""
Copy-paste of torch.nn.TransformerEncoderLayer but
uses 'NeighborAttention' instead of the regural
torch.nn.MultiheadAttention.
"""
def __init__(self, n=2, d_model=256, dim_feedforward=2048):
super().__init__()
self.attn = NeighborAttention(n, d_model)
self.norm = nn.LayerNorm(d_model)
def forward(self, features, rois):
features = features + self.attn(features, rois)
features = F.relu(self.norm(features))
return features
class GCN(nn.Module):
"""
NeighborEncoder is a stack of NeighborEncoderLayers.
"""
def __init__(self, num_layers=2, *args, **kwargs):
super().__init__()
self.layers = ModuleList([NeighborEncoderLayer(*args, **kwargs) for i in range(num_layers)])
def forward(self, features, rois):
for layer in self.layers:
features = layer(features, rois)
return features
| 2,279 | 0 | 220 |
5542929b78e5e07d174778ee5e31247b762fa0bf | 45 | py | Python | EmployeeSQL/Config.py | XxTopShottaxX/SQL-challenge | a560b8fb28f55396913b95b44c8bf5ffb95267a3 | [
"ADSL"
] | null | null | null | EmployeeSQL/Config.py | XxTopShottaxX/SQL-challenge | a560b8fb28f55396913b95b44c8bf5ffb95267a3 | [
"ADSL"
] | null | null | null | EmployeeSQL/Config.py | XxTopShottaxX/SQL-challenge | a560b8fb28f55396913b95b44c8bf5ffb95267a3 | [
"ADSL"
] | null | null | null | username="postgres"
password="dr@g0ngThcetaG" | 22.5 | 25 | 0.822222 | username="postgres"
password="dr@g0ngThcetaG" | 0 | 0 | 0 |
a3de5b6bc51701b123efaaa48340189702b53808 | 2,805 | py | Python | src/testoob/reporting/test_info.py | sshyran/testoob | 729fa6a17660d0bd8c75907a89ed6998180b5765 | [
"Apache-2.0"
] | null | null | null | src/testoob/reporting/test_info.py | sshyran/testoob | 729fa6a17660d0bd8c75907a89ed6998180b5765 | [
"Apache-2.0"
] | null | null | null | src/testoob/reporting/test_info.py | sshyran/testoob | 729fa6a17660d0bd8c75907a89ed6998180b5765 | [
"Apache-2.0"
] | null | null | null | # Testoob, Python Testing Out Of (The) Box
# Copyright (C) 2005-2006 The Testoob Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"getting information about tests"
def create_test_info(arg):
"""
Factory method for creating TestInfo instances.
"""
if isinstance(arg, TestInfo):
return arg
return TestInfo(arg)
class TestInfo:
"""
An interface for getting information about tests.
Reporters receive instances of this class.
"""
# should be usable as dictionary keys, so define __hash__ and __cmp__
from testoob.utils import add_fields_pickling
add_fields_pickling(TestInfo)
| 30.16129 | 81 | 0.670945 | # Testoob, Python Testing Out Of (The) Box
# Copyright (C) 2005-2006 The Testoob Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"getting information about tests"
def create_test_info(arg):
"""
Factory method for creating TestInfo instances.
"""
if isinstance(arg, TestInfo):
return arg
return TestInfo(arg)
class TestInfo:
"""
An interface for getting information about tests.
Reporters receive instances of this class.
"""
def __init__(self, fixture):
self.fixture = fixture
def module(self):
return self.fixture.__module__
def filename(self):
import sys
try:
return sys.modules[self.module()].__file__
except KeyError:
return "unknown file"
def classname(self):
return self.fixture.__class__.__name__
def funcname(self):
# parsing id() because the function name is a private fixture field
return self.fixture.id().split(".")[-1]
def extrainfo(self):
return self.fixture._testoob_extra_description
def extrafuncname(self):
return "%s%s" % (self.funcname(), self.extrainfo())
def docstring(self):
if getattr(self.fixture, self.funcname()).__doc__:
return getattr(self.fixture, self.funcname()).__doc__.splitlines()[0]
return ""
def funcinfo(self):
return (self.funcname(), self.docstring(), self.extrainfo())
def failure_exception_type(self): # TODO: do we need this?
return self.fixture.failureException
def id(self): # TODO: do we need this?
return self.fixture.id()
def short_description(self): # TODO: do we need this?
return self.fixture.shortDescription()
def __str__(self):
return str(self.fixture)
# should be usable as dictionary keys, so define __hash__ and __cmp__
def __unique_string_repr(self):
return "%s - %s" % (hash(self), str(self))
def __cmp__(self, other):
try:
return cmp(self.fixture, other.fixture)
except AttributeError:
return cmp(self.__unique_string_repr(), other.__unique_string_repr())
def __hash__(self):
return hash(self.fixture)
from testoob.utils import add_fields_pickling
add_fields_pickling(TestInfo)
| 1,243 | 0 | 434 |
bf515a4686057296a620835ea20d27f153c36106 | 315 | py | Python | cloudmesh_cmd/cygwin-install.py | cloudmesh/windows | f089e9ef83ae1cae9b29c877c462130a7b93b5e8 | [
"Apache-2.0"
] | null | null | null | cloudmesh_cmd/cygwin-install.py | cloudmesh/windows | f089e9ef83ae1cae9b29c877c462130a7b93b5e8 | [
"Apache-2.0"
] | 1 | 2015-06-08T23:15:25.000Z | 2015-06-08T23:15:25.000Z | cloudmesh_cmd/cygwin-install.py | cloudmesh/windows | f089e9ef83ae1cae9b29c877c462130a7b93b5e8 | [
"Apache-2.0"
] | null | null | null | import urllib
import os
from subprocess import Popen
directory = 'C:\\Temp\\cygwindownload'
if not os.path.isdir(directory):
os.makedirs(directory)
urllib.urlretrieve("https://cygwin.com/setup-x86.exe", directory + '\\setup.exe')
p = Popen("cygwin-install.bat")
stdout, stderr = p.communicate()
| 22.5 | 82 | 0.704762 | import urllib
import os
from subprocess import Popen
directory = 'C:\\Temp\\cygwindownload'
if not os.path.isdir(directory):
os.makedirs(directory)
urllib.urlretrieve("https://cygwin.com/setup-x86.exe", directory + '\\setup.exe')
p = Popen("cygwin-install.bat")
stdout, stderr = p.communicate()
| 0 | 0 | 0 |
77596c5e8f345c28d69336c3470785f0b1d58164 | 7,531 | py | Python | availability_app/views.py | Brown-University-Library/OLD-ARCHIVED_availability_api | 77ce8a557f46e195a55da93eb8985337ab0448ac | [
"MIT"
] | 1 | 2017-06-13T16:57:48.000Z | 2017-06-13T16:57:48.000Z | availability_app/views.py | Brown-University-Library/OLD-ARCHIVED_availability_api | 77ce8a557f46e195a55da93eb8985337ab0448ac | [
"MIT"
] | 9 | 2019-09-03T13:39:57.000Z | 2020-09-14T20:39:44.000Z | availability_app/views.py | Brown-University-Library/availability_api | 77ce8a557f46e195a55da93eb8985337ab0448ac | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import datetime, json, logging, os, pprint
from availability_app import settings_app
from availability_app.lib import view_info_helper
from availability_app.lib.concurrency import AsyncHelper # temporary demo helper
from availability_app.lib.ezb_v1_handler import EzbV1Helper
from availability_app.lib.bib_items_v2 import BibItemsInfo
from availability_app.lib.bib_items_async_v2 import BibItemsInfoAsync # not yet in production
from availability_app.lib.stats_v1_handler import StatsValidator, StatsBuilder
from django.conf import settings as project_settings
from django.contrib.auth import logout
from django.core.urlresolvers import reverse
from django.http import HttpResponse, HttpResponseNotFound, HttpResponseBadRequest, HttpResponseRedirect
from django.shortcuts import get_object_or_404, render
log = logging.getLogger( __name__ )
slog = logging.getLogger( 'stats_logger' )
ezb1_helper = EzbV1Helper()
stats_builder = StatsBuilder()
stats_validator = StatsValidator()
bib_items = BibItemsInfo()
# ===========================
# demo handlers
# ===========================
def concurrency_test( request ):
""" Tests concurrency, via trio, with django. """
if project_settings.DEBUG == False: # only active on dev-server
return HttpResponseNotFound( '<div>404 / Not Found</div>' )
async_hlpr = AsyncHelper()
url_dct = {
'shortest': 'https://httpbin.org/delay/.6',
'shorter': 'https://httpbin.org/delay/.8',
'standard': 'https://httpbin.org/delay/1',
'longer': 'https://httpbin.org/delay/1.2',
'longest': 'https://httpbin.org/delay/1.4' }
if request.GET.get( 'outlier', '' ) == 'yes':
url_dct['outlier'] = 'https://httpbin.org/delay/10'
async_hlpr.process_urls( url_dct )
response_dct = { 'results:': async_hlpr.results_dct, 'total_time_taken': async_hlpr.total_time_taken }
output = json.dumps( response_dct, sort_keys=True, indent=2 )
return HttpResponse( output, content_type='application/json; charset=utf-8' )
def v2_bib_items_async( request, bib_value ):
""" Not currently used; non-async version in production is used by easyrequest_hay. """
# if project_settings.DEBUG == False: # only active on dev-server
# return HttpResponseNotFound( '<div>404 / Not Found</div>' )
bib_items_async = bitems_async = BibItemsInfoAsync()
log.debug( f'starting... request.__dict__, ```{request.__dict__}```' )
start_stamp = datetime.datetime.now()
query_dct = bitems_async.build_query_dct( request, start_stamp )
raw_data_dct = bitems_async.manage_data_calls( bib_value )
host = request.META.get( 'HTTP_HOST', '127.0.0.1' )
data_dct = bitems_async.prep_data( raw_data_dct, host )
response_dct = bitems_async.build_response_dct( data_dct, start_stamp )
jsn = json.dumps( { 'query': query_dct, 'response': response_dct }, sort_keys=True, indent=2 )
return HttpResponse( jsn, content_type='application/javascript; charset=utf-8' )
# ===========================
# primary app handlers
# ===========================
def ezb_v1( request, id_type, id_value ):
""" Handles existing easyborrow-api call. """
params = request.GET
log.debug( 'starting; id_type, `%s`; id_value, `%s`' % (id_type, id_value) )
validity_dct = ezb1_helper.validate( id_type, id_value )
if validity_dct['validity'] is not True:
data_dct = { 'query': ezb1_helper.build_query_dct( request, datetime.datetime.now() ), u'response': {u'error': validity_dct['error']} }
jsn = json.dumps( data_dct, sort_keys=True, indent=2 )
return HttpResponseBadRequest( jsn, content_type=u'application/javascript; charset=utf-8' )
else:
data_dct = ezb1_helper.build_data_dct( id_type, validity_dct['value'], request.GET.get('show_marc', ''), request )
jsn = json.dumps( data_dct, sort_keys=True, indent=2 )
return HttpResponse( jsn, content_type='application/javascript; charset=utf-8' )
def v2_bib_items( request, bib_value ):
""" Handles easy_request_hay call. """
# log.debug( f'starting... request.__dict__, ```{pprint.pformat(request.__dict__)}```' )
log.debug( f'starting... request.__dict__, ```{request.__dict__}```' )
start_stamp = datetime.datetime.now()
query_dct = bib_items.build_query_dct( request, start_stamp )
host = request.META.get( 'HTTP_HOST', '127.0.0.1' )
data_dct = bib_items.prep_data( bib_value, host )
## TODO- refactor this quick-handling of a bad sierra response
response_dct = {}
if 'httpStatus' in data_dct.keys():
if data_dct['httpStatus'] != 200:
response_dct = { 'problem_sierra_response': data_dct }
jsn = json.dumps( { 'query': query_dct, 'response': response_dct }, sort_keys=True, indent=2 )
return HttpResponseNotFound( jsn, content_type='application/javascript; charset=utf-8' )
else:
response_dct = bib_items.build_response_dct( data_dct, start_stamp )
jsn = json.dumps( { 'query': query_dct, 'response': response_dct }, sort_keys=True, indent=2 )
return HttpResponse( jsn, content_type='application/javascript; charset=utf-8' )
def ezb_v1_stats( request ):
""" Returns basic stats on v1-api usage. """
log.debug( 'starting ezb_v1_stats()' )
slog.info( 'new entry!' )
## grab & validate params
rq_now = datetime.datetime.now()
if stats_validator.check_params( request.GET, request.scheme, request.META['HTTP_HOST'], rq_now ) == False:
return HttpResponseBadRequest( stats_validator.output, content_type=u'application/javascript; charset=utf-8' )
## run-query
results = stats_builder.run_query( request.GET )
## build response
stats_builder.build_response( results, request.GET, request.scheme, request.META['HTTP_HOST'], rq_now )
return HttpResponse( stats_builder.output, content_type=u'application/javascript; charset=utf-8' )
def locations_and_statuses( request ):
""" Shows values being used. """
rq_now = datetime.datetime.now()
data_dct = {
'query': ezb1_helper.build_query_dct( request, rq_now ),
'response': {
'ezb_available_locations': json.loads( os.environ['AVL_API__EZB_AVAILABLE_LOCATIONS'] ),
'ezb_available_statuses': json.loads( os.environ['AVL_API__EZB_AVAILABLE_STATUSES'] ),
'time_taken': str( datetime.datetime.now() - rq_now ) }
}
output = json.dumps( data_dct, sort_keys=True, indent=2 )
return HttpResponse( output, content_type='application/json; charset=utf-8' )
# ===========================
# for development convenience
# ===========================
def version( request ):
""" Returns basic data including branch & commit. """
# log.debug( 'request.__dict__, ```%s```' % pprint.pformat(request.__dict__) )
rq_now = datetime.datetime.now()
commit = view_info_helper.get_commit()
branch = view_info_helper.get_branch()
info_txt = commit.replace( 'commit', branch )
resp_now = datetime.datetime.now()
taken = resp_now - rq_now
context_dct = view_info_helper.make_context( request, rq_now, info_txt, taken )
output = json.dumps( context_dct, sort_keys=True, indent=2 )
return HttpResponse( output, content_type='application/json; charset=utf-8' )
def error_check( request ):
""" For checking that admins receive error-emails. """
if project_settings.DEBUG == True:
1/0
else:
return HttpResponseNotFound( '<div>404 / Not Found</div>' )
| 46.487654 | 143 | 0.688089 | # -*- coding: utf-8 -*-
import datetime, json, logging, os, pprint
from availability_app import settings_app
from availability_app.lib import view_info_helper
from availability_app.lib.concurrency import AsyncHelper # temporary demo helper
from availability_app.lib.ezb_v1_handler import EzbV1Helper
from availability_app.lib.bib_items_v2 import BibItemsInfo
from availability_app.lib.bib_items_async_v2 import BibItemsInfoAsync # not yet in production
from availability_app.lib.stats_v1_handler import StatsValidator, StatsBuilder
from django.conf import settings as project_settings
from django.contrib.auth import logout
from django.core.urlresolvers import reverse
from django.http import HttpResponse, HttpResponseNotFound, HttpResponseBadRequest, HttpResponseRedirect
from django.shortcuts import get_object_or_404, render
log = logging.getLogger( __name__ )
slog = logging.getLogger( 'stats_logger' )
ezb1_helper = EzbV1Helper()
stats_builder = StatsBuilder()
stats_validator = StatsValidator()
bib_items = BibItemsInfo()
# ===========================
# demo handlers
# ===========================
def concurrency_test( request ):
""" Tests concurrency, via trio, with django. """
if project_settings.DEBUG == False: # only active on dev-server
return HttpResponseNotFound( '<div>404 / Not Found</div>' )
async_hlpr = AsyncHelper()
url_dct = {
'shortest': 'https://httpbin.org/delay/.6',
'shorter': 'https://httpbin.org/delay/.8',
'standard': 'https://httpbin.org/delay/1',
'longer': 'https://httpbin.org/delay/1.2',
'longest': 'https://httpbin.org/delay/1.4' }
if request.GET.get( 'outlier', '' ) == 'yes':
url_dct['outlier'] = 'https://httpbin.org/delay/10'
async_hlpr.process_urls( url_dct )
response_dct = { 'results:': async_hlpr.results_dct, 'total_time_taken': async_hlpr.total_time_taken }
output = json.dumps( response_dct, sort_keys=True, indent=2 )
return HttpResponse( output, content_type='application/json; charset=utf-8' )
def v2_bib_items_async( request, bib_value ):
""" Not currently used; non-async version in production is used by easyrequest_hay. """
# if project_settings.DEBUG == False: # only active on dev-server
# return HttpResponseNotFound( '<div>404 / Not Found</div>' )
bib_items_async = bitems_async = BibItemsInfoAsync()
log.debug( f'starting... request.__dict__, ```{request.__dict__}```' )
start_stamp = datetime.datetime.now()
query_dct = bitems_async.build_query_dct( request, start_stamp )
raw_data_dct = bitems_async.manage_data_calls( bib_value )
host = request.META.get( 'HTTP_HOST', '127.0.0.1' )
data_dct = bitems_async.prep_data( raw_data_dct, host )
response_dct = bitems_async.build_response_dct( data_dct, start_stamp )
jsn = json.dumps( { 'query': query_dct, 'response': response_dct }, sort_keys=True, indent=2 )
return HttpResponse( jsn, content_type='application/javascript; charset=utf-8' )
# ===========================
# primary app handlers
# ===========================
def ezb_v1( request, id_type, id_value ):
""" Handles existing easyborrow-api call. """
params = request.GET
log.debug( 'starting; id_type, `%s`; id_value, `%s`' % (id_type, id_value) )
validity_dct = ezb1_helper.validate( id_type, id_value )
if validity_dct['validity'] is not True:
data_dct = { 'query': ezb1_helper.build_query_dct( request, datetime.datetime.now() ), u'response': {u'error': validity_dct['error']} }
jsn = json.dumps( data_dct, sort_keys=True, indent=2 )
return HttpResponseBadRequest( jsn, content_type=u'application/javascript; charset=utf-8' )
else:
data_dct = ezb1_helper.build_data_dct( id_type, validity_dct['value'], request.GET.get('show_marc', ''), request )
jsn = json.dumps( data_dct, sort_keys=True, indent=2 )
return HttpResponse( jsn, content_type='application/javascript; charset=utf-8' )
def v2_bib_items( request, bib_value ):
""" Handles easy_request_hay call. """
# log.debug( f'starting... request.__dict__, ```{pprint.pformat(request.__dict__)}```' )
log.debug( f'starting... request.__dict__, ```{request.__dict__}```' )
start_stamp = datetime.datetime.now()
query_dct = bib_items.build_query_dct( request, start_stamp )
host = request.META.get( 'HTTP_HOST', '127.0.0.1' )
data_dct = bib_items.prep_data( bib_value, host )
## TODO- refactor this quick-handling of a bad sierra response
response_dct = {}
if 'httpStatus' in data_dct.keys():
if data_dct['httpStatus'] != 200:
response_dct = { 'problem_sierra_response': data_dct }
jsn = json.dumps( { 'query': query_dct, 'response': response_dct }, sort_keys=True, indent=2 )
return HttpResponseNotFound( jsn, content_type='application/javascript; charset=utf-8' )
else:
response_dct = bib_items.build_response_dct( data_dct, start_stamp )
jsn = json.dumps( { 'query': query_dct, 'response': response_dct }, sort_keys=True, indent=2 )
return HttpResponse( jsn, content_type='application/javascript; charset=utf-8' )
def ezb_v1_stats( request ):
""" Returns basic stats on v1-api usage. """
log.debug( 'starting ezb_v1_stats()' )
slog.info( 'new entry!' )
## grab & validate params
rq_now = datetime.datetime.now()
if stats_validator.check_params( request.GET, request.scheme, request.META['HTTP_HOST'], rq_now ) == False:
return HttpResponseBadRequest( stats_validator.output, content_type=u'application/javascript; charset=utf-8' )
## run-query
results = stats_builder.run_query( request.GET )
## build response
stats_builder.build_response( results, request.GET, request.scheme, request.META['HTTP_HOST'], rq_now )
return HttpResponse( stats_builder.output, content_type=u'application/javascript; charset=utf-8' )
def locations_and_statuses( request ):
""" Shows values being used. """
rq_now = datetime.datetime.now()
data_dct = {
'query': ezb1_helper.build_query_dct( request, rq_now ),
'response': {
'ezb_available_locations': json.loads( os.environ['AVL_API__EZB_AVAILABLE_LOCATIONS'] ),
'ezb_available_statuses': json.loads( os.environ['AVL_API__EZB_AVAILABLE_STATUSES'] ),
'time_taken': str( datetime.datetime.now() - rq_now ) }
}
output = json.dumps( data_dct, sort_keys=True, indent=2 )
return HttpResponse( output, content_type='application/json; charset=utf-8' )
# ===========================
# for development convenience
# ===========================
def version( request ):
""" Returns basic data including branch & commit. """
# log.debug( 'request.__dict__, ```%s```' % pprint.pformat(request.__dict__) )
rq_now = datetime.datetime.now()
commit = view_info_helper.get_commit()
branch = view_info_helper.get_branch()
info_txt = commit.replace( 'commit', branch )
resp_now = datetime.datetime.now()
taken = resp_now - rq_now
context_dct = view_info_helper.make_context( request, rq_now, info_txt, taken )
output = json.dumps( context_dct, sort_keys=True, indent=2 )
return HttpResponse( output, content_type='application/json; charset=utf-8' )
def error_check( request ):
""" For checking that admins receive error-emails. """
if project_settings.DEBUG == True:
1/0
else:
return HttpResponseNotFound( '<div>404 / Not Found</div>' )
| 0 | 0 | 0 |
a8a0cc141e0d4ab8242d6a31e939794ff8719dbd | 750 | py | Python | selenium_basics.py | kethan1/Interacting_With_Websites | fa30aa5aa7a3c1bab7b552b1af217eff722815a9 | [
"MIT"
] | null | null | null | selenium_basics.py | kethan1/Interacting_With_Websites | fa30aa5aa7a3c1bab7b552b1af217eff722815a9 | [
"MIT"
] | null | null | null | selenium_basics.py | kethan1/Interacting_With_Websites | fa30aa5aa7a3c1bab7b552b1af217eff722815a9 | [
"MIT"
] | null | null | null | from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions
import time
wd = webdriver.Firefox()
wd.get("https://www.google.com/search?q=python")
# wd.find_element_by_css_selector("a").click() # This doesn't work, same error
# The css selector is needed, because not all a tags are clickable
WebDriverWait(wd, 20).until(
expected_conditions.element_to_be_clickable((By.CSS_SELECTOR, 'div[id=search] a[href^="http"]'))
).click()
# WebDriverWait(wd, 20).until(
# expected_conditions.element_to_be_clickable((By.XPATH, "//div[@id='search']//a[contains(@href,'http')]"))
# ).click()
time.sleep(5)
wd.close()
| 30 | 111 | 0.752 | from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions
import time
wd = webdriver.Firefox()
wd.get("https://www.google.com/search?q=python")
# wd.find_element_by_css_selector("a").click() # This doesn't work, same error
# The css selector is needed, because not all a tags are clickable
WebDriverWait(wd, 20).until(
expected_conditions.element_to_be_clickable((By.CSS_SELECTOR, 'div[id=search] a[href^="http"]'))
).click()
# WebDriverWait(wd, 20).until(
# expected_conditions.element_to_be_clickable((By.XPATH, "//div[@id='search']//a[contains(@href,'http')]"))
# ).click()
time.sleep(5)
wd.close()
| 0 | 0 | 0 |
88b84edd1b1f9a8746061b89dcbe123a92aecaa2 | 255 | py | Python | src/estructura-flask/apiflaskdemo/project/auth/__init__.py | PythonistaMX/py231 | 734f5972948f7d23d581ec54ee7af581eca651ce | [
"MIT"
] | 3 | 2020-05-05T23:38:45.000Z | 2021-11-05T01:03:38.000Z | apiflaskdemo/project/auth/__init__.py | PythonistaMX/api-testing-demo | 80443851e921506e0b259960867a5c1fa6c15402 | [
"MIT"
] | null | null | null | apiflaskdemo/project/auth/__init__.py | PythonistaMX/api-testing-demo | 80443851e921506e0b259960867a5c1fa6c15402 | [
"MIT"
] | 4 | 2020-10-20T01:15:23.000Z | 2022-02-08T02:27:35.000Z | from functools import wraps
from flask import g, abort | 25.5 | 38 | 0.647059 | from functools import wraps
from flask import g, abort
def login_required(view):
@wraps(view)
def wrapped_view(*args, **kwargs):
if g.user is None:
return abort(403)
return view(*args,**kwargs)
return wrapped_view | 177 | 0 | 23 |
8912e1dc471c0edad5e00a6d7f9813c76ea52508 | 3,569 | py | Python | elsa.py | Frostiee11/elsa | 86f4c086b38ddd00ff8851e349c32dacf9df39fa | [
"MIT"
] | 7 | 2018-11-29T13:55:43.000Z | 2020-08-26T10:06:53.000Z | elsa.py | Frostiee11/elsa | 86f4c086b38ddd00ff8851e349c32dacf9df39fa | [
"MIT"
] | null | null | null | elsa.py | Frostiee11/elsa | 86f4c086b38ddd00ff8851e349c32dacf9df39fa | [
"MIT"
] | null | null | null | #Elsa by Frostmeister
import discord
import math
import time
import datetime
import googlesearch as gs
import urbandictionary as ud
import random
import asyncio
import os
from discord.ext import commands
bot = commands.Bot(description=" The Snow Queen ❄️" , command_prefix=("e!","E!"))
################ EVENTS ###################
@bot.event
@bot.event
@bot.event
@bot.event
@bot.event
@bot.event
@bot.event
############ BOT RUN ################
bot.run(os.getenv("TOKEN"))
| 24.958042 | 135 | 0.585318 | #Elsa by Frostmeister
import discord
import math
import time
import datetime
import googlesearch as gs
import urbandictionary as ud
import random
import asyncio
import os
from discord.ext import commands
bot = commands.Bot(description=" The Snow Queen ❄️" , command_prefix=("e!","E!"))
################ EVENTS ###################
@bot.event
async def on_ready():
files = ['repl','Moderation','Fun','Math','General','API']
for i in files:
bot.load_extension(i)
await bot.change_presence(game= discord.Game(name="with Snow | e!help or E!help",type=0))
print("---------------------------------")
print("Logged in as " + bot.user.name)
print("My id is " + bot.user.id)
print("---------------------------------")
@bot.event
async def on_message(msg : str):
if msg.author.bot:
return
# elif "elsa" in msg.content.lower():
# await bot.send_message(msg.channel , "**Nikhil is Baka**")
# await bot.process_commands(msg)
else:
await bot.process_commands(msg)
@bot.event
async def on_message(message):
if message.channel.id == '396519929974751236':
if 'sorry' in str(message.content.lower()):
await bot.add_reaction(message, '😭')
elif 'welcome' in str(message.content.lower()):
await bot.add_reaction(message, '👋')
else:
pass
await bot.process_commands(message)
@bot.event
async def on_member_join(member):
if member.server.id == '396519929974751233':
welcome_channel = bot.get_channel('396519929974751236')
msg = await bot.send_message(welcome_channel, 'Welcome to the server {} ... Have a great time here..!!! 🎉'.format(member.name))
await bot.add_reaction(msg, '👋')
@bot.event
async def on_member_remove(member):
if member.server.id == '396519929974751233':
remove_channel = bot.get_channel('396519929974751236')
msg = await bot.send_message(remove_channel, 'Farewell {} ... We will miss you..!!! 👋'.format(member.name))
await bot.add_reaction(msg, '😭')
@bot.event
async def on_message(ctx):
if ctx.channel.id == '459725199252783115':
await asyncio.sleep(5)
await bot.delete_message(ctx)
await bot.process_commands(ctx)
@bot.event
async def on_message(ctx) :
flag = 1
list = [ '281823482562478080', '443658561038319616' ]
if str(ctx.channel.id) in list :
for a in "abcdefghijklmnopqrstuvwxyz,@$_&-+()/*:;!?.1234567890#" :
if a in str(ctx.content.lower()) :
await asyncio.sleep(0.5)
await bot.delete_message(ctx)
flag = 0
if flag != 0 :
# pass
if str(ctx.channel.id) == '281823482562478080':
await bot.add_reaction(message = ctx,emoji = "👍")
elif str(ctx.channel.id) == '443658561038319616':
emo_y = discord.utils.get(ctx.server.emojis , name = "yes")
emo_n = discord.utils.get(ctx.server.emojis , name = "no")
await bot.add_reaction(message = ctx,emoji = emo_y)
await bot.add_reaction(message = ctx ,emoji = emo_n)
if str(ctx.channel.id) == "314799585761427457" and ctx.content[-1] == "?":
await bot.add_reaction(message = ctx,emoji = "🔼")
await bot.add_reaction(message = ctx,emoji = "🔽")
await bot.process_commands(ctx)
############ BOT RUN ################
bot.run(os.getenv("TOKEN"))
| 2,895 | 0 | 159 |
0d032fd16a7d176ee318c77e6007e9eb73906098 | 4,021 | py | Python | Week 1/Introduction to Programming/Getting Computers to work for you/topic.py | chanchanchong/Crash-Course-On-Python | ce98bdd5c5355a582c76b864dae1c5eecdf6ea44 | [
"MIT"
] | null | null | null | Week 1/Introduction to Programming/Getting Computers to work for you/topic.py | chanchanchong/Crash-Course-On-Python | ce98bdd5c5355a582c76b864dae1c5eecdf6ea44 | [
"MIT"
] | null | null | null | Week 1/Introduction to Programming/Getting Computers to work for you/topic.py | chanchanchong/Crash-Course-On-Python | ce98bdd5c5355a582c76b864dae1c5eecdf6ea44 | [
"MIT"
] | null | null | null | # Working in IT, a lot of what we do boils down to using a computer to perform a
# certain task. In your job you might create user accounts, configure the network,
# install software, backup existing data, or execute a whole range of other
# computer based tasks from day to day. Back in my first IT job, I realized that
# every day I came into work I typed the same three commands to authenticate
# into systems. Those credentials timed out everyday by design, for security
# reasons, so I created a script that would automatically run these commands for
# me every morning to avoid having to type them myself. Funny enough, the team
# that monitors anomalous activity discovered my little invention and contacted
# me to remove it, oops. Tasks performed by a computer that need to be done
# multiple times with little variation are really well suited for automation, because
# when you automate a task you avoid the possibility of human errors, and reduce
# the time it takes to do it. Imagine this scenario, your company had a booth at a
# recent conference and has gathered a huge list of emails from people interested
# in learning more about your products. You want to send these people your
# monthly email newsletter, but some of the people on the list are already
# subscribed to receive it. So how do you make sure everyone receives your
# newsletter, without accidentally sending it to the same person twice? Well, you
# could manually check each email address one by one to make sure you only add
# new ones to the list, sounds boring and inefficient, right? It could be, and it's also
# more error prone, you might accidentally miss new emails, or add emails that
# were already there, or it might get so boring you fall asleep at your desk, Even
# your automated coffee machine won't help you out there. So what could you do
# instead? You could get the computer to do the work for you. You could write a
# program that checks for duplicates, and the adds each new email to the list.
# Your computer will do exactly as its told no matter how many emails there are in
# the list, so it won't get tired or make any mistakes. Even better, once you've
# written the program you can use the same code in the future situations, saving
# you even more time, pretty cool, right? It gets better, think about when you're
# going to send these emails out, if you send them out manually you'll have to send
# the same email to everybody, personalizing the emails would be way too much
# manual work. If instead you use automation to send them, you could have the
# name and company of each person added to the email automatically. The result?
# More effective emails, without you spending hours inserting names into the text.
# Automating tasks allows you to focus on projects that are a better use of your
# time, letting computer do the boring stuff for you. Learning how to program is
# the first step to being able to do this, if you want to get computers to do the
# work for you, you're in the right place. Earlier in this video I told you about the
# first task I ever automated, now I want to tell you about coolest thing I ever
# automated. It was a script that changed a bunch of access permissions for a
# whole lot of Google Internal Services. The script reversed a large directory tree
# with tons of different files, checked the file contents, and then updated the
# permissions to the services based on the conditions that I laid out in the script.
# Okay, I admit I'm total nerd, but I still think it's really cool. Next up, it's time to
# share your ideas. What things would you like to automate using programming?
# While these discussion prompts are optional, they're really fun. Seriously, they let
# you get to know your fellow learners a bit, and collaborate on ideas and insights.
# Make sure you read what others are saying, they may give you ideas that you
# haven't even though of. After that, you're ready to take your very first quiz of the
# course. Don't worry, it's just for practice.
| 75.867925 | 89 | 0.77518 | # Working in IT, a lot of what we do boils down to using a computer to perform a
# certain task. In your job you might create user accounts, configure the network,
# install software, backup existing data, or execute a whole range of other
# computer based tasks from day to day. Back in my first IT job, I realized that
# every day I came into work I typed the same three commands to authenticate
# into systems. Those credentials timed out everyday by design, for security
# reasons, so I created a script that would automatically run these commands for
# me every morning to avoid having to type them myself. Funny enough, the team
# that monitors anomalous activity discovered my little invention and contacted
# me to remove it, oops. Tasks performed by a computer that need to be done
# multiple times with little variation are really well suited for automation, because
# when you automate a task you avoid the possibility of human errors, and reduce
# the time it takes to do it. Imagine this scenario, your company had a booth at a
# recent conference and has gathered a huge list of emails from people interested
# in learning more about your products. You want to send these people your
# monthly email newsletter, but some of the people on the list are already
# subscribed to receive it. So how do you make sure everyone receives your
# newsletter, without accidentally sending it to the same person twice? Well, you
# could manually check each email address one by one to make sure you only add
# new ones to the list, sounds boring and inefficient, right? It could be, and it's also
# more error prone, you might accidentally miss new emails, or add emails that
# were already there, or it might get so boring you fall asleep at your desk, Even
# your automated coffee machine won't help you out there. So what could you do
# instead? You could get the computer to do the work for you. You could write a
# program that checks for duplicates, and the adds each new email to the list.
# Your computer will do exactly as its told no matter how many emails there are in
# the list, so it won't get tired or make any mistakes. Even better, once you've
# written the program you can use the same code in the future situations, saving
# you even more time, pretty cool, right? It gets better, think about when you're
# going to send these emails out, if you send them out manually you'll have to send
# the same email to everybody, personalizing the emails would be way too much
# manual work. If instead you use automation to send them, you could have the
# name and company of each person added to the email automatically. The result?
# More effective emails, without you spending hours inserting names into the text.
# Automating tasks allows you to focus on projects that are a better use of your
# time, letting computer do the boring stuff for you. Learning how to program is
# the first step to being able to do this, if you want to get computers to do the
# work for you, you're in the right place. Earlier in this video I told you about the
# first task I ever automated, now I want to tell you about coolest thing I ever
# automated. It was a script that changed a bunch of access permissions for a
# whole lot of Google Internal Services. The script reversed a large directory tree
# with tons of different files, checked the file contents, and then updated the
# permissions to the services based on the conditions that I laid out in the script.
# Okay, I admit I'm total nerd, but I still think it's really cool. Next up, it's time to
# share your ideas. What things would you like to automate using programming?
# While these discussion prompts are optional, they're really fun. Seriously, they let
# you get to know your fellow learners a bit, and collaborate on ideas and insights.
# Make sure you read what others are saying, they may give you ideas that you
# haven't even though of. After that, you're ready to take your very first quiz of the
# course. Don't worry, it's just for practice.
| 0 | 0 | 0 |
86598004b30724c1f0cdf45f58b67daa2649e2fa | 11,200 | py | Python | bfieldtools/integrals.py | SAFedorov/bfieldtools | 7e64bc2033670f01d2b90df2210b60743731a948 | [
"BSD-3-Clause"
] | 17 | 2020-05-22T19:39:39.000Z | 2022-03-15T19:03:09.000Z | bfieldtools/integrals.py | SAFedorov/bfieldtools | 7e64bc2033670f01d2b90df2210b60743731a948 | [
"BSD-3-Clause"
] | 27 | 2020-05-20T14:22:41.000Z | 2022-01-10T18:30:12.000Z | bfieldtools/integrals.py | SAFedorov/bfieldtools | 7e64bc2033670f01d2b90df2210b60743731a948 | [
"BSD-3-Clause"
] | 8 | 2020-08-12T10:30:50.000Z | 2022-03-22T12:21:33.000Z | """
Analytic integral for vectorized field / potential computation
"""
__all__ = [
"c_coeffs",
"d_distance",
"gamma0",
"omega",
"potential_dipoles",
"potential_vertex_dipoles",
"triangle_potential_approx",
"triangle_potential_dipole_linear",
"triangle_potential_uniform",
"x_distance",
"x_distance2",
]
import numpy as np
def determinant(a):
"""Faster determinant for the two last dimensions of 'a'"""
det = a[..., 0, 0] * (a[..., 1, 1] * a[..., 2, 2] - a[..., 2, 1] * a[..., 1, 2])
det += a[..., 0, 1] * (a[..., 1, 2] * a[..., 2, 0] - a[..., 2, 2] * a[..., 1, 0])
det += a[..., 0, 2] * (a[..., 1, 0] * a[..., 2, 1] - a[..., 2, 0] * a[..., 1, 1])
return det
def norm(vecs):
"""Faster vector norm for the last dimension of 'vecs'"""
return np.sqrt(np.einsum("...i,...i", vecs, vecs))
def cross(r1, r2):
"""Cross product without overhead for the last dimensions of 'r1' and 'r2'"""
result = np.zeros(r1.shape)
result[..., 0] = r1[..., 1] * r2[..., 2] - r1[..., 2] * r2[..., 1]
result[..., 1] = r1[..., 2] * r2[..., 0] - r1[..., 0] * r2[..., 2]
result[..., 2] = r1[..., 0] * r2[..., 1] - r1[..., 1] * r2[..., 0]
return result
def gamma0(R, reg=1e-13, symmetrize=True):
"""1/r integrals over the edges of a triangle called gamma_0
(line charge potentials).
**NOTE: MAY NOT BE VERY PRECISE FOR POINTS DIRECTLY AT TRIANGLE
EDGES.**
Parameters
----------
R : ndarray (..., N_triverts, xyz)
displacement vectors (r-r') between Neval evaluation points (r)
and the 3 vertices of the Ntri triangles/triangle.
reg: float, a small value added to the arguments of the logarithm,
regularizes the values very close to the line segments
symmetrize: recalculates the result for by mirroring
the evaluation points with respect the line segment
mid point to get rid off the badly behaving points on the
negative extension of the line segment
Returns
-------
res: array (Neval, Nverts)
The analytic integrals for each vertex/edge
"""
edges = np.roll(R[0], 2, -2) - np.roll(R[0], 1, -2)
dotprods1 = np.einsum("...i,...i", np.roll(R, 1, -2), edges)
dotprods2 = np.einsum("...i,...i", np.roll(R, 2, -2), edges)
en = norm(edges)
del edges
n = norm(R)
# Regularize s.t. neither the denominator or the numerator can be zero
# Avoid numerical issues directly at the edge
nn1 = np.roll(n, 1, -1) * en
nn2 = np.roll(n, 2, -1) * en
res = np.log((nn1 + dotprods1 + reg) / (nn2 + dotprods2 + reg))
# Symmetrize the result since on the negative extension of the edge
# there's division of two small values resulting numerical instabilities
# (also incompatible with adding the reg value)
if symmetrize:
mask = ((np.abs(dotprods1 + nn1)) < 1e-12) * (dotprods1 + dotprods2 < 0)
res[mask] = -np.log(
(nn1[mask] - dotprods1[mask] + reg) / (nn2[mask] - dotprods2[mask] + reg)
)
res /= en
return -res
def omega(R):
"""Calculate the solid angle of a triangles
see
A. Van Oosterom and J. Strackee
IEEE TRANSACTIONS ON BIOMEDICAL ENGINEERING,
VOL. BME-30, NO. 2, 1983
Parameters
----------
R : ndarray (Neval, (Ntri), N_triverts, xyz)
displacement vectors (r-r') of Ntri triangles
and Neval evaluation points for the 3 vertices
of the triangles/triangle.
The shape of R can any with the constraint that
the last dimenion corrsponds to coordinates (x, y, z) and the
second last dimension to triangle vertices (vert1, vert2, vert3)
Returns
-------
sa: (Neval, (Ntri))
Solid angles of subtened by triangles at evaluation points
"""
# Distances
d = norm(R)
# Scalar triple products
stp = determinant(R)
# Denominator
denom = np.prod(d, axis=-1)
for i in range(3):
j = (i + 1) % 3
k = (i + 2) % 3
# denom += np.sum(R[..., i, :]*R[..., j, :], axis=-1)*d[..., k]
denom += np.einsum("...i,...i,...", R[..., i, :], R[..., j, :], d[..., k])
# Solid angles
sa = -2 * np.arctan2(stp, denom)
return sa
def x_distance(R, tn, ta=None):
"""Signed distances in the triangle planes from the opposite
edge towards the node for all evaluation points in R
The distances are normalized to one at the node if areas are given
The distances are multiplied by the edge lenght if areass are None
Parameters:
R: ndarray (... Ntri, Nverts, xyz)
displacement vectors (coordinates)
tn: ndarray (Ntri, 3)
triangle normals
ta: ndarray (Ntri)
triangle areas
if None, normalizization with double area is not carried out
returns:
ndaarray (..., Ntri, N_triverts (3)), distance in the triangle plane
"""
edges = np.roll(R[0], 2, -2) - np.roll(R[0], 1, -2)
if ta is not None:
edges /= 2 * ta[:, None, None]
edges = -cross(edges, tn[:, None, :])
return np.einsum("...k,...k->...", np.roll(R, 1, -2), edges)
def x_distance2(mesh):
"""Signed distances in the triangle planes from the opposite
edge towards the node for all evalution points in R
"""
# TODO: with gradient, needs mesh info
pass
def d_distance(R, tn):
"""Signed distance from the triangle plane for each triangle
Parameters:
R: ndarray (... Ntri, Nverts, xyz)
displacement vectors (coordinates)
tn: ndarray (Ntri, 3)
triangle normals
Returns:
ndarray (..., Ntri, N_triverts (3)) of signed distances
"""
return np.einsum("...ki,ki->...k", np.take(R, 0, -2), tn)
def c_coeffs(R, ta):
"""Cotan-coeffs
Parameters:
R: ndarray (... Ntri, Nverts, xyz)
displacement vectors (coordinates)
ta: ndarray (Ntri)
triangle areas
Returns:
ndarray (..., Ntri, N_triverts (3))
"""
edges = np.roll(R[0], 2, -2) - np.roll(R[0], 1, -2)
return np.einsum("...ik,...jk->...ij", edges, edges / (2 * ta[:, None, None]))
def triangle_potential_uniform(R, tn, planar=False):
"""1/r potential of a uniform triangle
for original derivation see
A. S. Ferguson, Xu Zhang and G. Stroink,
"A complete linear discretization for calculating the magnetic field
using the boundary element method,"
in IEEE Transactions on Biomedical Engineering,
vol. 41, no. 5, pp. 455-460, May 1994.
doi: 10.1109/10.293220
Parameters
----------
R : (Neval, (Ntri), 3, 3) array
Displacement vectors (Neval, (Ntri), Ntri_verts, xyz)
tn : ((Ntri), 3) array
Triangle normals (Ntri, dir)
planar: boolean
If True, assume all the triangles and the evaluation points
are on the same plane (for speed), leaves out the
omega term
Returns
-------
result: result: ndarray (Neval, (Ntri))
Resultant 1/r potential for each triangle (Ntri)
at the field evaluation points (Neval)
"""
x = x_distance(R, tn, None)
result = np.einsum("...i,...i", gamma0(R), x)
if not planar:
result += d_distance(R, tn) * omega(R)
return result
def triangle_potential_approx(Rcenters, ta, reg=1e-12):
"""1/r potential of a uniform triangle using centroid approximation
Calculates 1/R potentials for triangle centroids
(The singularity at the centroid is handled with the very small
reg value, but anyway the values close to the centroid are inexact)
Parameters
----------
Rcenters : (N, (Ntri), 3) array
Displacement vectors (Neval, Ntri, xyz)
from triangle centers
ta : (Ntri) array
Triangle areas
reg: float
Regularization value used in approximation
Returns
-------
result: result: ndarray (...., Ntri, Ntri_verts)
Resultant 1/r potential for each node (Ntri_verts)
in each triangle (Ntri) in the displacement vectors R
"""
result = ta / (norm(Rcenters) + reg)
return result
def potential_dipoles(R, face_normals, face_areas):
"""Approximate the potential of linearly varying dipole density by
by dipoles at each face
Parameters
R : ndarray (Neval, Ntri, Ntri_verts, N_xyz)
Displacement vectors
face_normals: ndarray (Ntri, 3)
normals for each triangle
face_areas: ndarray (Ntri,)
areas for each triangle
Return
Potential approximation for vertex in each face
pot: (Neval, Ntri, Ntriverts)
"""
nn = face_normals
# Calculate quadrature points corresponding to linear shape functions (Ok?)
weights = np.array([[0.5, 0.25, 0.25], [0.25, 0.5, 0.25], [0.25, 0.25, 0.5]])
# weights = np.eye(3)
# weights = np.ones((3,3))/3
# Combine vertices for quadrature points
Rquad = np.einsum("...ij,ik->...kj", R, weights)
pot = np.einsum("ik, ...ijk->...ij", nn, Rquad) / (norm(Rquad) ** 3)
pot = pot * (face_areas[:, None] / 3)
return pot
def potential_vertex_dipoles(R, vertex_normals, vertex_areas):
"""Approximate the potential of linearly varying dipole density by
by dipoles at each vertex
Parameters
R : ndarray (Neval, Nvertex, N_xyz)
Displacement vectors
vertex_normals: ndarray (Nvertex, 3)
normals for each triangle
vertex_areas: ndarray (Nvertex,)
areas for each triangle
Return
Potential approximation for vertex in each face
pot: (Neval, Ntri, Ntriverts)
"""
nn = vertex_normals
pot = np.einsum("ik, lik->li", nn, R) / (norm(R) ** 3)
pot *= vertex_areas
return pot
def triangle_potential_dipole_linear(R, tn, ta):
"""Potential of dipolar density with magnitude of a
linear shape function on a triangle, "omega_i" in de Munck's paper
for the original derivation, see:
J. C. de Munck, "A linear discretization of the volume mesh_conductor
boundary integral equation using analytically integrated elements
(electrophysiology application),"
in IEEE Transactions on Biomedical Engineering,
vol. 39, no. 9, pp. 986-990, Sept. 1992.
doi: 10.1109/10.256433
Parameters
----------
R : (..., Ntri, 3, 3) array
Displacement vectors (...., Ntri, Ntri_verts, xyz)
tn : ((Ntri), 3) array
Triangle normals (Ntri, dir)
ta : (Ntri), array
Triangle areas (Ntri, dir)
Returns
-------
result: ndarray (...., Ntri, Ntri_verts)
Resultant dipolar potential for each shape functions (Ntri_verts)
in each triangle (Ntri) at the points
corresponding to displacement vectors in R
"""
result = np.einsum(
"...i,...ij,...->...j",
gamma0(R),
c_coeffs(R, ta),
d_distance(R, tn),
optimize=True,
)
x_dists = x_distance(R, tn, ta)
result -= x_dists * omega(R)[..., :, None]
return result
| 30.352304 | 85 | 0.589107 | """
Analytic integral for vectorized field / potential computation
"""
__all__ = [
"c_coeffs",
"d_distance",
"gamma0",
"omega",
"potential_dipoles",
"potential_vertex_dipoles",
"triangle_potential_approx",
"triangle_potential_dipole_linear",
"triangle_potential_uniform",
"x_distance",
"x_distance2",
]
import numpy as np
def determinant(a):
"""Faster determinant for the two last dimensions of 'a'"""
det = a[..., 0, 0] * (a[..., 1, 1] * a[..., 2, 2] - a[..., 2, 1] * a[..., 1, 2])
det += a[..., 0, 1] * (a[..., 1, 2] * a[..., 2, 0] - a[..., 2, 2] * a[..., 1, 0])
det += a[..., 0, 2] * (a[..., 1, 0] * a[..., 2, 1] - a[..., 2, 0] * a[..., 1, 1])
return det
def norm(vecs):
"""Faster vector norm for the last dimension of 'vecs'"""
return np.sqrt(np.einsum("...i,...i", vecs, vecs))
def cross(r1, r2):
"""Cross product without overhead for the last dimensions of 'r1' and 'r2'"""
result = np.zeros(r1.shape)
result[..., 0] = r1[..., 1] * r2[..., 2] - r1[..., 2] * r2[..., 1]
result[..., 1] = r1[..., 2] * r2[..., 0] - r1[..., 0] * r2[..., 2]
result[..., 2] = r1[..., 0] * r2[..., 1] - r1[..., 1] * r2[..., 0]
return result
def gamma0(R, reg=1e-13, symmetrize=True):
"""1/r integrals over the edges of a triangle called gamma_0
(line charge potentials).
**NOTE: MAY NOT BE VERY PRECISE FOR POINTS DIRECTLY AT TRIANGLE
EDGES.**
Parameters
----------
R : ndarray (..., N_triverts, xyz)
displacement vectors (r-r') between Neval evaluation points (r)
and the 3 vertices of the Ntri triangles/triangle.
reg: float, a small value added to the arguments of the logarithm,
regularizes the values very close to the line segments
symmetrize: recalculates the result for by mirroring
the evaluation points with respect the line segment
mid point to get rid off the badly behaving points on the
negative extension of the line segment
Returns
-------
res: array (Neval, Nverts)
The analytic integrals for each vertex/edge
"""
edges = np.roll(R[0], 2, -2) - np.roll(R[0], 1, -2)
dotprods1 = np.einsum("...i,...i", np.roll(R, 1, -2), edges)
dotprods2 = np.einsum("...i,...i", np.roll(R, 2, -2), edges)
en = norm(edges)
del edges
n = norm(R)
# Regularize s.t. neither the denominator or the numerator can be zero
# Avoid numerical issues directly at the edge
nn1 = np.roll(n, 1, -1) * en
nn2 = np.roll(n, 2, -1) * en
res = np.log((nn1 + dotprods1 + reg) / (nn2 + dotprods2 + reg))
# Symmetrize the result since on the negative extension of the edge
# there's division of two small values resulting numerical instabilities
# (also incompatible with adding the reg value)
if symmetrize:
mask = ((np.abs(dotprods1 + nn1)) < 1e-12) * (dotprods1 + dotprods2 < 0)
res[mask] = -np.log(
(nn1[mask] - dotprods1[mask] + reg) / (nn2[mask] - dotprods2[mask] + reg)
)
res /= en
return -res
def omega(R):
"""Calculate the solid angle of a triangles
see
A. Van Oosterom and J. Strackee
IEEE TRANSACTIONS ON BIOMEDICAL ENGINEERING,
VOL. BME-30, NO. 2, 1983
Parameters
----------
R : ndarray (Neval, (Ntri), N_triverts, xyz)
displacement vectors (r-r') of Ntri triangles
and Neval evaluation points for the 3 vertices
of the triangles/triangle.
The shape of R can any with the constraint that
the last dimenion corrsponds to coordinates (x, y, z) and the
second last dimension to triangle vertices (vert1, vert2, vert3)
Returns
-------
sa: (Neval, (Ntri))
Solid angles of subtened by triangles at evaluation points
"""
# Distances
d = norm(R)
# Scalar triple products
stp = determinant(R)
# Denominator
denom = np.prod(d, axis=-1)
for i in range(3):
j = (i + 1) % 3
k = (i + 2) % 3
# denom += np.sum(R[..., i, :]*R[..., j, :], axis=-1)*d[..., k]
denom += np.einsum("...i,...i,...", R[..., i, :], R[..., j, :], d[..., k])
# Solid angles
sa = -2 * np.arctan2(stp, denom)
return sa
def x_distance(R, tn, ta=None):
"""Signed distances in the triangle planes from the opposite
edge towards the node for all evaluation points in R
The distances are normalized to one at the node if areas are given
The distances are multiplied by the edge lenght if areass are None
Parameters:
R: ndarray (... Ntri, Nverts, xyz)
displacement vectors (coordinates)
tn: ndarray (Ntri, 3)
triangle normals
ta: ndarray (Ntri)
triangle areas
if None, normalizization with double area is not carried out
returns:
ndaarray (..., Ntri, N_triverts (3)), distance in the triangle plane
"""
edges = np.roll(R[0], 2, -2) - np.roll(R[0], 1, -2)
if ta is not None:
edges /= 2 * ta[:, None, None]
edges = -cross(edges, tn[:, None, :])
return np.einsum("...k,...k->...", np.roll(R, 1, -2), edges)
def x_distance2(mesh):
"""Signed distances in the triangle planes from the opposite
edge towards the node for all evalution points in R
"""
# TODO: with gradient, needs mesh info
pass
def d_distance(R, tn):
"""Signed distance from the triangle plane for each triangle
Parameters:
R: ndarray (... Ntri, Nverts, xyz)
displacement vectors (coordinates)
tn: ndarray (Ntri, 3)
triangle normals
Returns:
ndarray (..., Ntri, N_triverts (3)) of signed distances
"""
return np.einsum("...ki,ki->...k", np.take(R, 0, -2), tn)
def c_coeffs(R, ta):
"""Cotan-coeffs
Parameters:
R: ndarray (... Ntri, Nverts, xyz)
displacement vectors (coordinates)
ta: ndarray (Ntri)
triangle areas
Returns:
ndarray (..., Ntri, N_triverts (3))
"""
edges = np.roll(R[0], 2, -2) - np.roll(R[0], 1, -2)
return np.einsum("...ik,...jk->...ij", edges, edges / (2 * ta[:, None, None]))
def triangle_potential_uniform(R, tn, planar=False):
"""1/r potential of a uniform triangle
for original derivation see
A. S. Ferguson, Xu Zhang and G. Stroink,
"A complete linear discretization for calculating the magnetic field
using the boundary element method,"
in IEEE Transactions on Biomedical Engineering,
vol. 41, no. 5, pp. 455-460, May 1994.
doi: 10.1109/10.293220
Parameters
----------
R : (Neval, (Ntri), 3, 3) array
Displacement vectors (Neval, (Ntri), Ntri_verts, xyz)
tn : ((Ntri), 3) array
Triangle normals (Ntri, dir)
planar: boolean
If True, assume all the triangles and the evaluation points
are on the same plane (for speed), leaves out the
omega term
Returns
-------
result: result: ndarray (Neval, (Ntri))
Resultant 1/r potential for each triangle (Ntri)
at the field evaluation points (Neval)
"""
x = x_distance(R, tn, None)
result = np.einsum("...i,...i", gamma0(R), x)
if not planar:
result += d_distance(R, tn) * omega(R)
return result
def triangle_potential_approx(Rcenters, ta, reg=1e-12):
"""1/r potential of a uniform triangle using centroid approximation
Calculates 1/R potentials for triangle centroids
(The singularity at the centroid is handled with the very small
reg value, but anyway the values close to the centroid are inexact)
Parameters
----------
Rcenters : (N, (Ntri), 3) array
Displacement vectors (Neval, Ntri, xyz)
from triangle centers
ta : (Ntri) array
Triangle areas
reg: float
Regularization value used in approximation
Returns
-------
result: result: ndarray (...., Ntri, Ntri_verts)
Resultant 1/r potential for each node (Ntri_verts)
in each triangle (Ntri) in the displacement vectors R
"""
result = ta / (norm(Rcenters) + reg)
return result
def potential_dipoles(R, face_normals, face_areas):
"""Approximate the potential of linearly varying dipole density by
by dipoles at each face
Parameters
R : ndarray (Neval, Ntri, Ntri_verts, N_xyz)
Displacement vectors
face_normals: ndarray (Ntri, 3)
normals for each triangle
face_areas: ndarray (Ntri,)
areas for each triangle
Return
Potential approximation for vertex in each face
pot: (Neval, Ntri, Ntriverts)
"""
nn = face_normals
# Calculate quadrature points corresponding to linear shape functions (Ok?)
weights = np.array([[0.5, 0.25, 0.25], [0.25, 0.5, 0.25], [0.25, 0.25, 0.5]])
# weights = np.eye(3)
# weights = np.ones((3,3))/3
# Combine vertices for quadrature points
Rquad = np.einsum("...ij,ik->...kj", R, weights)
pot = np.einsum("ik, ...ijk->...ij", nn, Rquad) / (norm(Rquad) ** 3)
pot = pot * (face_areas[:, None] / 3)
return pot
def potential_vertex_dipoles(R, vertex_normals, vertex_areas):
"""Approximate the potential of linearly varying dipole density by
by dipoles at each vertex
Parameters
R : ndarray (Neval, Nvertex, N_xyz)
Displacement vectors
vertex_normals: ndarray (Nvertex, 3)
normals for each triangle
vertex_areas: ndarray (Nvertex,)
areas for each triangle
Return
Potential approximation for vertex in each face
pot: (Neval, Ntri, Ntriverts)
"""
nn = vertex_normals
pot = np.einsum("ik, lik->li", nn, R) / (norm(R) ** 3)
pot *= vertex_areas
return pot
def triangle_potential_dipole_linear(R, tn, ta):
"""Potential of dipolar density with magnitude of a
linear shape function on a triangle, "omega_i" in de Munck's paper
for the original derivation, see:
J. C. de Munck, "A linear discretization of the volume mesh_conductor
boundary integral equation using analytically integrated elements
(electrophysiology application),"
in IEEE Transactions on Biomedical Engineering,
vol. 39, no. 9, pp. 986-990, Sept. 1992.
doi: 10.1109/10.256433
Parameters
----------
R : (..., Ntri, 3, 3) array
Displacement vectors (...., Ntri, Ntri_verts, xyz)
tn : ((Ntri), 3) array
Triangle normals (Ntri, dir)
ta : (Ntri), array
Triangle areas (Ntri, dir)
Returns
-------
result: ndarray (...., Ntri, Ntri_verts)
Resultant dipolar potential for each shape functions (Ntri_verts)
in each triangle (Ntri) at the points
corresponding to displacement vectors in R
"""
result = np.einsum(
"...i,...ij,...->...j",
gamma0(R),
c_coeffs(R, ta),
d_distance(R, tn),
optimize=True,
)
x_dists = x_distance(R, tn, ta)
result -= x_dists * omega(R)[..., :, None]
return result
| 0 | 0 | 0 |
5cad79a23d827aef5df6641cba6427bf158d210c | 1,029 | py | Python | src/python/T0/WMBS/Oracle/Tier0Feeder/GetNotClosedOutWorkflows.py | silviodonato/T0 | a093729d08b31175ed35cd20e889bd7094ce152a | [
"Apache-2.0"
] | 6 | 2016-03-09T14:36:19.000Z | 2021-07-27T01:28:00.000Z | src/python/T0/WMBS/Oracle/Tier0Feeder/GetNotClosedOutWorkflows.py | silviodonato/T0 | a093729d08b31175ed35cd20e889bd7094ce152a | [
"Apache-2.0"
] | 193 | 2015-01-07T21:03:43.000Z | 2022-03-31T12:22:18.000Z | src/python/T0/WMBS/Oracle/Tier0Feeder/GetNotClosedOutWorkflows.py | silviodonato/T0 | a093729d08b31175ed35cd20e889bd7094ce152a | [
"Apache-2.0"
] | 36 | 2015-01-28T19:01:54.000Z | 2021-12-15T17:18:20.000Z | """
_GetNotClosedOutWorkflows_
Oracle implementation of GetNotClosedOutWorkflows
Lists top level filesets not injected to monitoring
"""
from WMCore.Database.DBFormatter import DBFormatter
| 35.482759 | 117 | 0.669582 | """
_GetNotClosedOutWorkflows_
Oracle implementation of GetNotClosedOutWorkflows
Lists top level filesets not injected to monitoring
"""
from WMCore.Database.DBFormatter import DBFormatter
class GetNotClosedOutWorkflows(DBFormatter):
def execute(self, conn = None, transaction = False):
sql = """SELECT wmbs_subscription.workflow, wmbs_subscription.fileset, wmbs_fileset.open, wmbs_workflow.name
FROM workflow_monitoring
INNER JOIN wmbs_subscription ON
workflow_monitoring.workflow = wmbs_subscription.workflow
INNER JOIN wmbs_workflow ON
wmbs_subscription.workflow = wmbs_workflow.id
INNER JOIN wmbs_fileset ON
wmbs_subscription.fileset = wmbs_fileset.id
WHERE checkForZeroState(workflow_monitoring.closeout) = 0"""
results = self.dbi.processData(sql, [], conn = conn,
transaction = transaction)
return results[0].fetchall()
| 765 | 23 | 50 |
9bd44b4ee14e40bcdf488c3f5373a25ced1d890a | 1,071 | py | Python | setup.py | CyberSolutionsTech/cst-micro-chassis | 236931f91b6622702615a06810871b3106c05fcc | [
"MIT"
] | null | null | null | setup.py | CyberSolutionsTech/cst-micro-chassis | 236931f91b6622702615a06810871b3106c05fcc | [
"MIT"
] | null | null | null | setup.py | CyberSolutionsTech/cst-micro-chassis | 236931f91b6622702615a06810871b3106c05fcc | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
__version__ = '0.1.0'
setup(
name='cst-micro-chassis',
version=__version__,
author='CyberSolutionsTech',
license='MIT',
author_email='nicolae.natrapeiu@cyber-solutions.com',
description='Microservices chassis pattern library',
long_description=read('README.md'),
long_description_content_type='text/markdown',
packages=find_packages(where='src'),
package_dir={'': 'src'},
url='https://pypi.org/project/cst-micro-chassis/',
project_urls={
'Source': 'https://github.com/CyberSolutionsTech/cst-micro-chassis'
},
install_requires=[
'flask==1.1.*',
'flask-restful==0.3.*',
],
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: Implementation :: CPython',
'License :: OSI Approved :: MIT License',
],
python_requires='>=3.7',
)
| 28.184211 | 75 | 0.637722 | from setuptools import setup, find_packages
__version__ = '0.1.0'
def read(filename):
with open(filename, "r", encoding="utf-8") as fh:
return fh.read()
setup(
name='cst-micro-chassis',
version=__version__,
author='CyberSolutionsTech',
license='MIT',
author_email='nicolae.natrapeiu@cyber-solutions.com',
description='Microservices chassis pattern library',
long_description=read('README.md'),
long_description_content_type='text/markdown',
packages=find_packages(where='src'),
package_dir={'': 'src'},
url='https://pypi.org/project/cst-micro-chassis/',
project_urls={
'Source': 'https://github.com/CyberSolutionsTech/cst-micro-chassis'
},
install_requires=[
'flask==1.1.*',
'flask-restful==0.3.*',
],
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: Implementation :: CPython',
'License :: OSI Approved :: MIT License',
],
python_requires='>=3.7',
)
| 77 | 0 | 23 |
389ce309472ac29c718061120357cbdee9284804 | 1,314 | py | Python | src/cli/command/status.py | oVirt/ovirt-engine-cli | 422d70e1dc422f0ca248abea47a472e3605caa4b | [
"Apache-2.0"
] | 4 | 2015-11-29T08:53:03.000Z | 2022-02-05T14:10:24.000Z | src/cli/command/status.py | oVirt/ovirt-engine-cli | 422d70e1dc422f0ca248abea47a472e3605caa4b | [
"Apache-2.0"
] | null | null | null | src/cli/command/status.py | oVirt/ovirt-engine-cli | 422d70e1dc422f0ca248abea47a472e3605caa4b | [
"Apache-2.0"
] | 4 | 2015-02-06T02:06:53.000Z | 2020-03-24T07:13:05.000Z | #
# Copyright (c) 2010 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cli.command.command import Command
| 27.957447 | 74 | 0.62481 | #
# Copyright (c) 2010 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cli.command.command import Command
class StatusCommand(Command):
name = 'status'
description = 'show status'
helptext = """\
== Usage ==
status
== Description ==
Show the exit status of the last command.
"""
def execute(self):
context = self.context
stdout = context.terminal.stdout
status = context.status
if status is not None:
sstatus = str(status)
for sym in dir(context):
if sym[0].isupper() and getattr(context, sym) == status:
sstatus += ' (%s)' % sym
else:
sstatus = 'N/A'
stdout.write('last command status: %s\n' % sstatus)
| 423 | 234 | 23 |
5f7466b0ae98c93b9b60cf0a0fffea4cf7387dd8 | 383 | py | Python | basicfunc.py | krshubham/ecc-elgamal | 9a4e14b59580a6b71d33919e5dd9adc3867bf30f | [
"MIT"
] | 1 | 2017-10-17T07:03:51.000Z | 2017-10-17T07:03:51.000Z | basicfunc.py | krshubham/ecc-elgamal | 9a4e14b59580a6b71d33919e5dd9adc3867bf30f | [
"MIT"
] | null | null | null | basicfunc.py | krshubham/ecc-elgamal | 9a4e14b59580a6b71d33919e5dd9adc3867bf30f | [
"MIT"
] | null | null | null | import collections
import os,sys
Coord = collections.namedtuple("Coord", ["x", "y"]) | 18.238095 | 51 | 0.488251 | import collections
import os,sys
def inv(n, q):
for i in range(q):
if (n * i) % q == 1:
return i
pass
assert False, "unreached"
pass
def sqrt(n, q):
assert n < q
for i in range(1, q):
if i * i % q == n:
return (i, q - i)
pass
return (False, False)
Coord = collections.namedtuple("Coord", ["x", "y"]) | 251 | 0 | 46 |
f7bd2300804ef9ced2e61c57d150286efb4ecfff | 6,089 | py | Python | fero/datasource.py | FeroLabs/fero_client | 57a8a6d321c6199f723ae123d554217b9a27a971 | [
"MIT"
] | 1 | 2021-11-24T16:59:21.000Z | 2021-11-24T16:59:21.000Z | fero/datasource.py | FeroLabs/fero_client | 57a8a6d321c6199f723ae123d554217b9a27a971 | [
"MIT"
] | 3 | 2020-11-17T19:45:05.000Z | 2021-01-05T23:54:38.000Z | fero/datasource.py | FeroLabs/fero_client | 57a8a6d321c6199f723ae123d554217b9a27a971 | [
"MIT"
] | null | null | null | import os
import time
import fero
from fero import FeroError
from typing import Optional, Union
from marshmallow import (
Schema,
fields,
validate,
EXCLUDE,
)
from .common import FeroObject
| 29.995074 | 107 | 0.626868 | import os
import time
import fero
from fero import FeroError
from typing import Optional, Union
from marshmallow import (
Schema,
fields,
validate,
EXCLUDE,
)
from .common import FeroObject
class DataSourceSchema(Schema):
class Meta:
unknown = EXCLUDE
uuid = fields.UUID(required=True)
primary_key_column = fields.String(required=True, allow_none=True)
primary_datetime_column = fields.String(required=True, allow_none=True)
schema = fields.Dict(required=True, allow_none=True)
name = fields.String(required=True)
description = fields.String(required=True)
created = fields.DateTime(required=True)
modified = fields.DateTime(required=True)
ac_name = fields.String(required=True)
username = fields.String(required=True)
INITIALIZED = "I"
PROCESSING = "P"
LOADING_FILE = "L"
ANALYZING_FILE = "A"
WRITING_FILE = "W"
COMPRESSING_TABLE = "C"
READY = "R"
ERROR = "E"
status = fields.String(
validate=validate.OneOf(["I", "P", "L", "A", "W", "C", "R", "E"]), required=True
)
error_notices = fields.Dict(required=True, default=lambda: {"errors": []})
progress = fields.Integer(required=True, default=0)
overwrites = fields.Dict(required=True, allow_none=True)
transformed_source = fields.Bool(required=True, default=False)
live_source = fields.Bool(required=True, default=False)
default_upload_config = fields.Dict(required=False)
class DataSource(FeroObject):
schema_class = DataSourceSchema
def __getattr__(self, name: str):
return self._data.get(name)
def __repr__(self):
return f"<Data Source name={self.name}>"
__str__ = __repr__
def append_csv(self, file_path: str, wait_until_complete: bool = False):
"""Appends a specified csv file to the data source.
:param file_path: Location of the csv file to append
:type file_path: str
:raises FeroError: Raised if the file does not match a naive csv check
"""
if not file_path.endswith(".csv"):
raise FeroError("Fero only supports csv appends")
file_name = os.path.basename(file_path)
inbox_response = self._client.post(
f"/api/v2/data_source/{self.uuid}/inbox_url/",
{"file_name": file_name, "action": "A"},
)
with open(file_path) as fp:
self._client.upload_file(inbox_response, file_name, fp)
upload_status = UploadedFileStatus(self._client, inbox_response["upload_uuid"])
return (
upload_status.wait_until_complete()
if wait_until_complete
else upload_status
)
def replace_csv(self, file_path: str, wait_until_complete: bool = False):
"""Appends a specified csv file to the data source.
:param file_path: Location of the csv file to append
:type file_path: str
:raises FeroError: Raised if the file does not match a naive csv check
"""
if not file_path.endswith(".csv"):
raise FeroError("Fero only supports csv appends")
file_name = os.path.basename(file_path)
inbox_response = self._client.post(
f"/api/v2/data_source/{self.uuid}/inbox_url/",
{"file_name": file_name, "action": "R"},
)
with open(file_path) as fp:
self._client.upload_file(inbox_response, file_name, fp)
upload_status = UploadedFileStatus(self._client, inbox_response["upload_uuid"])
return (
upload_status.wait_until_complete()
if wait_until_complete
else upload_status
)
class UploadedFilesSchema(Schema):
class Meta:
unknown = EXCLUDE
uuid = fields.UUID(required=True)
INITIALIZED = "I"
PARSING = "P"
ANALYZING = "A"
CREATING = "C"
ERROR = "E"
DONE = "D"
DELETING = "R" # R for removing
USER_CONFIRMATION = "U"
status = fields.String(
validate=validate.OneOf(["I", "P", "A", "C", "R", "D", "E", "U"]), required=True
)
error_notices = fields.Dict(
required=True, default=lambda: {"global_notices": [], "parsing_notices": []}
)
class UploadedFileStatus:
def __init__(self, client: "fero.Fero", id: str):
self._id = id
self._client = client
self._status_data = None
self._schema = UploadedFilesSchema()
@staticmethod
def _check_status_complete(status: Optional[dict]) -> bool:
"""Checks status of the latest uploaded file response.
Returns true if complete, false if not complete and raises an error if the status is error.
"""
if status is None or status["status"] not in [
UploadedFilesSchema.ERROR,
UploadedFilesSchema.DONE,
]:
return False
if status["status"] == UploadedFilesSchema.ERROR:
errors = [
f'"{str(e)}"'
for e in status["error_notices"]["global_notices"]
+ status["error_notices"]["parsing_notices"]
]
error_message = f"Unable to upload file. The following error(s) occurred: {', '.join(errors)}"
raise FeroError(error_message)
return True
def get_upload_status(self) -> Union[dict, None]:
"""Gets current status of the uploaded files object
:return: True if file upload is completely processed, false if still processing
:rtype: Union[dict, None]
:raises FeroError: Raised if fero was unable to process the file
"""
raw_data = self._client.get(
f"/api/v2/uploaded_files/{self._id}/", allow_404=True
)
data = None
if raw_data is not None:
data = self._schema.load(raw_data)
return data
def wait_until_complete(self) -> "UploadedFileStatus":
status = self.get_upload_status()
while not self._check_status_complete(status):
time.sleep(0.5)
status = self.get_upload_status()
return self
| 479 | 5,307 | 92 |
746bc1433c13361daa899e88d819fb353b116316 | 3,290 | py | Python | site_viewer_script.py | EmilFerrariHerzum/icons_substituter | 07a7cdcb06400c25090378d38ae48f46a674bf40 | [
"MIT"
] | null | null | null | site_viewer_script.py | EmilFerrariHerzum/icons_substituter | 07a7cdcb06400c25090378d38ae48f46a674bf40 | [
"MIT"
] | null | null | null | site_viewer_script.py | EmilFerrariHerzum/icons_substituter | 07a7cdcb06400c25090378d38ae48f46a674bf40 | [
"MIT"
] | null | null | null | from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium import webdriver
from decouple import config
import time
import re
import os
url = "https://busyliving.co.uk/"
username = config("login_username")
password = config("password")
# Log in to the site and get the source code
# Define a class to store the results needed
# Finds the elements needed and put them in an array
# Define the path to the files
directory = "s3://busyliving"
local = "C:\\Users\\emilf\\Downloads\\Ringley\\Images\\"
files_names = os.listdir("C:\\Users\\emilf\\Downloads\\Ringley\\Images\\")
grant = " --grants read=uri=http://acs.amazonaws.com/groups/global/AllUsers"
# Change the file
# Function that does it all
# Runs the program
replace(finder(site_login(url))) | 40.617284 | 151 | 0.661702 | from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium import webdriver
from decouple import config
import time
import re
import os
url = "https://busyliving.co.uk/"
username = config("login_username")
password = config("password")
# Log in to the site and get the source code
def site_login(URL):
driver = webdriver.Chrome("C:/Users/emilf/Downloads/chromedriver.exe")
driver.get (URL)
time.sleep(5)
driver.find_element_by_class_name('cc-dismiss').click()
time.sleep(2)
driver.find_element_by_class_name('sidebar-menu-toggle').click()
driver.find_element_by_id('user_email').send_keys(username)
driver.find_element_by_id ('user_password').send_keys(password)
driver.find_element_by_class_name("btn").click()
WebDriverWait(driver, 10).until(EC.title_is("Busy Living"))
html_source = driver.page_source
driver.close()
return html_source
# Define a class to store the results needed
class images:
def __init__(self, title, file, picture):
self.title = title
self.file = file
self.picture = picture
# Finds the elements needed and put them in an array
def finder(html):
listings = []
all_text = re.findall(r'(\<div class\=\"icons col-lg-2 col-md-2 col-sm-4 col-xs-6\".*\>(\n.*){14}</div>)', html)
for i in range(len(all_text)):
title = re.findall(r'title\=\"(.*)\"', all_text[i][0])
file = re.findall(r'alt\=\"([^\"]*)\"', all_text[i][0])
picture = re.findall(r'src\=\"(.*)\"', all_text[i][0])
listings.append(images(title, file, picture))
return listings
# Define the path to the files
directory = "s3://busyliving"
local = "C:\\Users\\emilf\\Downloads\\Ringley\\Images\\"
files_names = os.listdir("C:\\Users\\emilf\\Downloads\\Ringley\\Images\\")
grant = " --grants read=uri=http://acs.amazonaws.com/groups/global/AllUsers"
# Change the file
def name_changer(temp_item):
print(f"\nWith which file do you want to replace it with? (enter the name of the file comprehensive of the extension and be wary of caps lock)\n")
sub_item = input()
if (sub_item == "skip"):
# put stuff thats missing
return None
while sub_item not in files_names:
print("\nThe name of the file is wrong\n")
sub_item = input()
os.popen(f'aws s3 cp {local}{sub_item} {directory}{temp_item[0]}')
print(f"\nThe picture has been replaced by {sub_item}\n\n\n")
# Create a text file containing all the names of the files that are replaced
with open('substituted_files.txt', 'w') as filehandle:
filehandle.write('%s\n' %sub_item)
# Function that does it all
def replace(listings):
for i in listings:
print(f"\nThis picture is listed as {i.title[0]} on the website and the file is called {i.file[0]}\n")
driver = webdriver.Chrome("C:/Users/emilf/Downloads/chromedriver.exe")
driver.get(i.picture[0])
temp_item = re.findall(r'com(.*/.*png)', i.picture[0])
name_changer(temp_item)
driver.close()
# Runs the program
replace(finder(site_login(url))) | 2,251 | -8 | 142 |
946bab785021927c61c57cce0106b761b8595f91 | 1,461 | py | Python | evan/services/events/zoom.py | eillarra/evan | befe0f8daedd1b1f629097110d92e68534e43da1 | [
"MIT"
] | null | null | null | evan/services/events/zoom.py | eillarra/evan | befe0f8daedd1b1f629097110d92e68534e43da1 | [
"MIT"
] | 20 | 2021-03-31T20:10:46.000Z | 2022-02-15T09:58:13.000Z | evan/services/events/zoom.py | eillarra/evan | befe0f8daedd1b1f629097110d92e68534e43da1 | [
"MIT"
] | null | null | null | import csv
from datetime import datetime
from typing import Dict, Optional, Union
def attendee_report(csv_path: str) -> Dict:
"""Given a standard Zoom CSV attendee report, returns massaged information."""
report = []
field_map = {
"email": ["Email", str],
"first_name": ["First Name", str],
"last_name": ["Last Name", str],
"attended": ["Attended", to_bool],
"join_time": ["Join Time", to_datetime],
"leave_time": ["Leave Time", to_datetime],
"minutes": ["Time in Session (minutes)", to_minutes],
}
with open(csv_path) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=",")
section = None
tmp = None
for row in csv_reader:
if len(row) == 2 and row[0] == "Attendee Details":
section = "attendees"
continue
if not section:
continue
if not len(report) and row[0] == "Attended":
fields = row
continue
tmp = dict(zip(fields, row))
report.append({k: v[1](tmp[v[0]]) for k, v in field_map.items()})
return report
| 27.566038 | 82 | 0.557153 | import csv
from datetime import datetime
from typing import Dict, Optional, Union
def to_bool(val: str) -> bool:
return val == "Yes"
def to_datetime(dt: str) -> Optional[datetime]:
return datetime.strptime(dt, "%b %d, %Y %H:%M:%S") if dt != "--" else None
def to_minutes(minutes: Union[str, int]) -> int:
return int(minutes) if minutes != "--" else 0
def attendee_report(csv_path: str) -> Dict:
"""Given a standard Zoom CSV attendee report, returns massaged information."""
report = []
field_map = {
"email": ["Email", str],
"first_name": ["First Name", str],
"last_name": ["Last Name", str],
"attended": ["Attended", to_bool],
"join_time": ["Join Time", to_datetime],
"leave_time": ["Leave Time", to_datetime],
"minutes": ["Time in Session (minutes)", to_minutes],
}
with open(csv_path) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=",")
section = None
tmp = None
for row in csv_reader:
if len(row) == 2 and row[0] == "Attendee Details":
section = "attendees"
continue
if not section:
continue
if not len(report) and row[0] == "Attended":
fields = row
continue
tmp = dict(zip(fields, row))
report.append({k: v[1](tmp[v[0]]) for k, v in field_map.items()})
return report
| 215 | 0 | 69 |
1c030e32a152ec1a34ad3b544ff9fd528e088b4e | 4,811 | py | Python | convert_model.py | NoVarlok/sova-tts-vocoder | 1d44a7247341e404e503fb0de386af5f16d36806 | [
"Apache-2.0"
] | 45 | 2020-08-26T05:50:27.000Z | 2021-01-27T15:40:09.000Z | convert_model.py | NoVarlok/sova-tts-vocoder | 1d44a7247341e404e503fb0de386af5f16d36806 | [
"Apache-2.0"
] | null | null | null | convert_model.py | NoVarlok/sova-tts-vocoder | 1d44a7247341e404e503fb0de386af5f16d36806 | [
"Apache-2.0"
] | 3 | 2021-01-05T17:24:23.000Z | 2022-03-31T15:20:21.000Z | # *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import sys
import copy
import torch
if __name__ == '__main__':
old_model_path = sys.argv[1]
new_model_path = sys.argv[2]
model = torch.load(old_model_path, map_location='cpu')
model['model'] = update_model(model['model'])
torch.save(model, new_model_path)
| 47.166667 | 108 | 0.676782 | # *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import sys
import copy
import torch
def _check_model_old_version(model):
if hasattr(model.WN[0], 'res_layers') or hasattr(model.WN[0], 'cond_layers'):
return True
else:
return False
def _update_model_res_skip(old_model, new_model):
for idx in range(0, len(new_model.WN)):
wavenet = new_model.WN[idx]
n_channels = wavenet.n_channels
n_layers = wavenet.n_layers
wavenet.res_skip_layers = torch.nn.ModuleList()
for i in range(0, n_layers):
if i < n_layers - 1:
res_skip_channels = 2 * n_channels
else:
res_skip_channels = n_channels
res_skip_layer = torch.nn.Conv1d(n_channels, res_skip_channels, 1)
skip_layer = torch.nn.utils.remove_weight_norm(wavenet.skip_layers[i])
if i < n_layers - 1:
res_layer = torch.nn.utils.remove_weight_norm(wavenet.res_layers[i])
res_skip_layer.weight = torch.nn.Parameter(torch.cat([res_layer.weight, skip_layer.weight]))
res_skip_layer.bias = torch.nn.Parameter(torch.cat([res_layer.bias, skip_layer.bias]))
else:
res_skip_layer.weight = torch.nn.Parameter(skip_layer.weight)
res_skip_layer.bias = torch.nn.Parameter(skip_layer.bias)
res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight')
wavenet.res_skip_layers.append(res_skip_layer)
del wavenet.res_layers
del wavenet.skip_layers
def _update_model_cond(old_model, new_model):
for idx in range(0, len(new_model.WN)):
wavenet = new_model.WN[idx]
n_channels = wavenet.n_channels
n_layers = wavenet.n_layers
n_mel_channels = wavenet.cond_layers[0].weight.shape[1]
cond_layer = torch.nn.Conv1d(n_mel_channels, 2 * n_channels * n_layers, 1)
cond_layer_weight = []
cond_layer_bias = []
for i in range(0, n_layers):
_cond_layer = torch.nn.utils.remove_weight_norm(wavenet.cond_layers[i])
cond_layer_weight.append(_cond_layer.weight)
cond_layer_bias.append(_cond_layer.bias)
cond_layer.weight = torch.nn.Parameter(torch.cat(cond_layer_weight))
cond_layer.bias = torch.nn.Parameter(torch.cat(cond_layer_bias))
cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight')
wavenet.cond_layer = cond_layer
del wavenet.cond_layers
def update_model(old_model):
if not _check_model_old_version(old_model):
return old_model
new_model = copy.deepcopy(old_model)
if hasattr(old_model.WN[0], 'res_layers'):
_update_model_res_skip(old_model, new_model)
if hasattr(old_model.WN[0], 'cond_layers'):
_update_model_cond(old_model, new_model)
return new_model
if __name__ == '__main__':
old_model_path = sys.argv[1]
new_model_path = sys.argv[2]
model = torch.load(old_model_path, map_location='cpu')
model['model'] = update_model(model['model'])
torch.save(model, new_model_path)
| 2,699 | 0 | 92 |
8f4588bb23009cbf6d229ec7164249f2d69ad1bd | 2,878 | py | Python | python/pavement.py | Oskarmal/svlab3 | c9ca65e8ef2d7d7b2938229901a88fa71dbbeb7e | [
"Apache-2.0"
] | null | null | null | python/pavement.py | Oskarmal/svlab3 | c9ca65e8ef2d7d7b2938229901a88fa71dbbeb7e | [
"Apache-2.0"
] | null | null | null | python/pavement.py | Oskarmal/svlab3 | c9ca65e8ef2d7d7b2938229901a88fa71dbbeb7e | [
"Apache-2.0"
] | null | null | null | #
# Copyright IBM Corp. 2014
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from paver.easy import *
from shutil import rmtree, copytree, copyfile
from subprocess import call
import os
import getpass
@task
@task
@task
@task
@task
@task
@task
@task
@task
@needs('clean', 'copy_client_code', 'copy_mongo_manifest', 'create_mongo_service', 'deploy_to_bluemix')
@task
@needs('clean', 'copy_client_code', 'copy_cloudant_manifest', 'create_cloudant_service', 'deploy_to_bluemix') | 31.282609 | 109 | 0.54378 | #
# Copyright IBM Corp. 2014
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from paver.easy import *
from shutil import rmtree, copytree, copyfile
from subprocess import call
import os
import getpass
@task
def clean():
print '************************************'
print '* Cleaning *'
print '************************************'
rmtree('static', True)
try:
os.remove('manifest.yml')
except OSError:
pass
@task
def copy_client_code():
print '************************************'
print '* Copying Client Code *'
print '************************************'
copytree('../frontend', 'static')
@task
def copy_mongo_manifest():
print '************************************'
print '* Copying Mongo Manifest *'
print '************************************'
copyfile('deploy/mongo-manifest.yml', 'manifest.yml')
@task
def copy_cloudant_manifest():
print '************************************'
print '* Copying Cloudant Manifest *'
print '************************************'
copyfile('deploy/cloudant-manifest.yml', 'manifest.yml')
@task
def create_mongo_service():
print '************************************'
print '* Creating Mongo DB Service *'
print '************************************'
call(["cf", "create-service", "mongodb", "100", "todo-mongo-db"])
@task
def create_cloudant_service():
print '************************************'
print '* Creating Cloudant Service *'
print '************************************'
call(["cf", "create-service", "cloudantNoSQLDB", "Shared", "todo-couch-db"])
@task
def cf_login():
print '************************************'
print '* Logging Into BlueMix *'
print '************************************'
call(["cf", "login", "-a", "https://api.ng.bluemix.net"])
@task
def deploy_to_bluemix():
print '************************************'
print '* Pushing App To BlueMix *'
print '************************************'
name = raw_input("What would you like to call this app?\n")
call(["cf", "push", name])
@task
@needs('clean', 'copy_client_code', 'copy_mongo_manifest', 'create_mongo_service', 'deploy_to_bluemix')
def deploy_mongo_todo():
pass
@task
@needs('clean', 'copy_client_code', 'copy_cloudant_manifest', 'create_cloudant_service', 'deploy_to_bluemix')
def deploy_cloudant_todo():
pass | 1,672 | 0 | 222 |
5a5680130054180b553975fb2552c8384b096654 | 11,166 | py | Python | frontend/animation/display.py | eldstal/CTF | 29ae870a362257c132ee729befbed86473cfb21d | [
"MIT"
] | null | null | null | frontend/animation/display.py | eldstal/CTF | 29ae870a362257c132ee729befbed86473cfb21d | [
"MIT"
] | 2 | 2021-02-07T21:31:09.000Z | 2021-02-18T10:33:01.000Z | frontend/animation/display.py | eldstal/CTF | 29ae870a362257c132ee729befbed86473cfb21d | [
"MIT"
] | null | null | null | from asciimatics.effects import Effect
from asciimatics.screen import Screen
from pyfiglet import Figlet
import re
import ftfy
import unicodedata
# XXX: Should this be a Renderer?
| 34.462963 | 139 | 0.515135 | from asciimatics.effects import Effect
from asciimatics.screen import Screen
from pyfiglet import Figlet
import re
import ftfy
import unicodedata
class FirstBloodDisplay(Effect):
def __init__(self, screen, team, challenge, duration=None, color=None, shade_colors=None, team_color=None, chall_color=None, **kwargs):
super(FirstBloodDisplay, self).__init__(screen, **kwargs)
if duration is None: duration = 60
if color is None: color = Screen.COLOUR_RED
if shade_colors is None: shade_colors = []
if team_color is None: team_color = Screen.COLOUR_WHITE
if chall_color is None: chall_color = Screen.COLOUR_WHITE
self._duration = duration
# This screen has four things on it.
# 0 and 1: FIRST and BLOOD
# 2: Team name
# 3: Challenge name
# The animation smacks each part in quickly from the right,
# and once in place it drifts slowly leftward
# Keep trying different fonts until one fits on screen
for font in [ "poison",
"cricket",
"rectangles",
"thin",
None # No figlet at all. Boo.
]:
render = lambda txt: txt
if font is not None:
figlet = Figlet(font=font)
render = figlet.renderText
drift_dist = 5
FIRST = self._new_drifter(screen, render("First"), color, shade_colors, 5, 0, drift_dist)
BLOOD = self._new_drifter(screen, render("Blood"), color, shade_colors, FIRST["x1"] + FIRST["w"] + 2, 2, drift_dist)
TEAM = self._new_drifter(screen, team["name"], team_color, [], 12, FIRST["h"]+2, drift_dist)
CHALL = self._new_drifter(screen, challenge["name"], chall_color, [], TEAM["x1"] + 4, TEAM["y"]+1, drift_dist)
if FIRST["w"] + BLOOD["w"] + 7 < screen.width:
break
continue
self._v1 = 12 # Chars per frame (Fast speed)
self._v2 = 15 # Frames per char (Slow speed)
# Set up the timing of the animations
FIRST["t1"] = 1
FIRST["t2"] = FIRST["t1"] + duration // 10
BLOOD["t1"] = FIRST["t2"]
BLOOD["t2"] = BLOOD["t1"] + duration // 10
TEAM["t1"] = BLOOD["t2"]
TEAM["t2"] = TEAM["t1"] + duration // 10
CHALL["t1"] = TEAM["t2"]
CHALL["t2"] = CHALL["t1"] + duration // 10
self._drifters = [ FIRST, BLOOD, TEAM, CHALL ]
# Pass in the x and y where the text should start drifting slowly
def _new_drifter(self, screen, text, color, shade_colors, x, y, drift_dist):
# Phase 0: Invisible
# Phase 1: Smashing in at light speed
# Phase 2: Drifting slowly leftward
lines = text.split("\n")
width = max([ len(l) for l in lines] )
return {
"text": text, "w": width, "h": len(lines),
"color": color, "shade_colors": shade_colors,
"phase": 0, "x": screen.width, "y": y,
"x1": x, "x2": x-drift_dist,
}
def reset(self):
pass
@property
def stop_frame(self):
return self._duration
def _update(self, frame_no):
H,W = self._screen.dimensions
if (frame_no == 1):
self._screen.clear()
self._screen.refresh()
for d in self._drifters:
if frame_no == d["t1"]: d["phase"] = 1
if frame_no == d["t2"]: d["phase"] = 2
if d["phase"] == 0:
continue
if d["phase"] == 1:
dt = d["t2"] - frame_no - 1
d["x"] = d["x1"] + (dt * self._v1)
# Sync up phase 2 so they drift together
elif d["phase"] == 2:
if frame_no % self._v2 == 0:
d["x"] -= 1
lines = d["text"].split("\n")
for dy in range(len(lines)):
txt = lines[dy]
x = int(d["x"] + 0.5)
y = d["y"] + dy
# Pad out with whitespace to properly clear stuff to the right
txt += " "*(W - len(txt))
txt = txt[:W - x]
self._screen.print_at(txt, x, y, d["color"], transparent=False)
# Optionally, drifting text gets a shady background around its edges
# to help with the illusion of a slower drift. It's pretty neat.
if d["phase"] == 2:
if len(d["shade_colors"]) > 0 and len(txt.strip()) > 0:
# Identify every leading and trailing edge of the text
lead_edges = [ i for i in range(-1, len(txt)-1) if (i == -1 or txt[i] == " ") and txt[i+1] != " " ]
trail_edges = [ i for i in range(1, len(txt)) if (txt[i] == " " or i == len(txt)-1) and txt[i-1] != " " ]
shades = d["shade_colors"]
drift_progress = (frame_no % self._v2) / self._v2
shade_idx = int((len(shades) + 1) * drift_progress) - 1
#
# Leading shade
#
# Overwrite the last whitespace just before the text begins
for text_start in lead_edges:
lead_text = txt[text_start+1]
lead_x = x + text_start
if shade_idx >= 0:
self._screen.print_at(lead_text, lead_x, y, shades[shade_idx], transparent=False)
#
# Trailing shade
#
for text_end in trail_edges:
trail_text = txt[text_end-1]
trail_x = x + text_end - 1
if shade_idx >= 0:
self._screen.print_at(trail_text, trail_x, y, shades[len(shades) - shade_idx - 1], transparent=False)
# XXX: Should this be a Renderer?
class ScoreboardDisplay(Effect):
def __init__(self, screen, conf, attr=None, focused_attr=None, awards_attr=None, **kwargs):
super(ScoreboardDisplay, self).__init__(screen, **kwargs)
self.conf = conf
self.attr = {
"default": { "colour": 7, "bg": Screen.COLOUR_BLACK },
"focused": { "colour": 213, "bg": Screen.COLOUR_BLACK },
"awards": { "colour": 220, "bg": Screen.COLOUR_BLACK },
}
if attr is not None: self.attr["default"] = attr
if focused_attr is not None: self.attr["focused"] = focused_attr
if awards_attr is not None: self.attr["awards"] = awards_attr
self._limits(screen)
self.teams = {}
def update_scores(self, teams):
self.teams = teams
def reset(self):
pass
@property
def stop_frame(self):
return 1
def _sanitize(self, text):
cleaned = ftfy.fix_text(text, normalization="NFKC")
# Remove all that line-crossing garbage in the Marks characters
cleaned = u"".join( x for x in cleaned if not unicodedata.category(x).startswith("M") )
return cleaned
def _limits(self, screen):
# At most, as many places as will fit in the window
max_len = screen.height - 1
if "max-length" in self.conf:
self.max_count = min(self.conf["max-length"], max_len)
else:
# Nobody specified, so let's default to a full screen of scores
self.max_count = max_len
# Provide a team object
def _team_is_focused(self, team):
for expr in self.conf["focus-teams"]:
if re.match(expr, team["name"]) != None: return True
return False
def _attr_by_team(self, team):
if self._team_is_focused(team):
return self.attr["focused"]
else:
return self.attr["default"]
# Generate two lists of teams which should go on the toplist
# This takes care of cropping the top list to fit the focused teams underneath
# returns (toplist, extra_focused_teams)
def _make_toplist(self):
# Pick out which teams to even show
ranking = [ (team["place"], team) for tid,team in self.teams.items() ]
ranking = sorted(ranking, key=lambda x: x[0])
boundary = self.max_count
toplist = ranking[:boundary]
focused = []
for r,team in ranking[boundary:]:
if self._team_is_focused(team):
focused.append((r,team))
if len(toplist) + len(focused) > boundary:
toplist = toplist[:boundary - len(focused)]
return toplist, focused
def _print_table(self, screen):
columns = [
(" ", "marker"),
("#", "place"),
("Score", "score"),
(" ", "awards"), # Prints up to 5x unicode trophy, which may be quite wide.
("Team", "name"),
]
toplist, focused = self._make_toplist()
toplist = toplist + focused
# Each cell is a tuple of (text, attr)
# The +1 gives us a header line at the top
table = [ [ "" for c in columns ] for _ in range(len(toplist) + 1) ]
for c in range(len(columns)):
header,field = columns[c]
table[0][c] = ( header, self.attr["default"])
for i in range(len(toplist)):
team = toplist[i][1]
text = self._sanitize(str(team[field]))
# Some fields are colored differently
attr = self.attr["default"]
if field == "name": attr = self._attr_by_team(team)
elif field == "awards": attr = self.attr["awards"]
table[i+1][c] = (text, attr)
column_widths = [ max([ len(row[c][0]) for row in table ]) for c in range(len(columns)) ]
# X-coordinate of each column, based on the widest of all preceding columns
padding = 1
col_offset = lambda c: sum(column_widths[:c]) + (padding * (c - 1))
h,w = self.screen.dimensions
# Center the table
x0 = (w - col_offset(len(columns))) // 2
y0 = (h - len(table)) // 2
# Super big tables don't fall outside the window
x0 = max(0, x0)
y0 = max(0, y0)
for r in range(len(table)):
for c in range(len(columns)):
text,attr = table[r][c]
x = x0 + col_offset(c)
y = y0 + r
# Don't overflow the window
if y > h: break
if len(text) > w-x:
text = text[:w-x-3] + "..."
screen.print_at(text, x, y, transparent=True, **attr)
def _update(self, frame_no):
self.screen.clear_buffer(self.attr["default"]["colour"],
Screen.A_NORMAL,
self.attr["default"]["bg"])
self._print_table(self.screen)
self.screen.refresh()
| 10,160 | 775 | 45 |
416cbbc595a1d461047ca88d1b55531361f0ea82 | 10,031 | py | Python | benchmarks/benchmark.py | findvid/main | dd9bd14255af8c642b39b08d59e64cbfa314d908 | [
"MIT"
] | null | null | null | benchmarks/benchmark.py | findvid/main | dd9bd14255af8c642b39b08d59e64cbfa314d908 | [
"MIT"
] | null | null | null | benchmarks/benchmark.py | findvid/main | dd9bd14255af8c642b39b08d59e64cbfa314d908 | [
"MIT"
] | null | null | null | import pymongo
import shutil
import os
import argparse
import re
import urllib2
import subprocess
import time
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import kmeanstree
from sys import exit
# instanciate and configure an argument parser
PARSER = argparse.ArgumentParser(description='')
PARSER.add_argument('serverfile', metavar='SERVERFILE',
help='The full path to the cherrypy python file, which will be used for the benchmark (absolute!!!)')
PARSER.add_argument('port', metavar='PORT',
help='The port on which the webserver will run')
PARSER.add_argument('database', metavar='DB',
help='The name of the MongoDB Database on localhost')
PARSER.add_argument('collection', metavar='COLLECTION',
help='The name of the Collection in the Database')
PARSER.add_argument('origpath', metavar='ORIGINALPATH',
help='The original path, where the queries and targets are (absolute!!!). This is used for the informations in the output files and reading the groundtruth file.')
PARSER.add_argument('tolerance', metavar='TOLERANCE',
help='The tolerance (in frames) how many frames the found target can be away from ground truth')
# parse input arguments
ARGS = PARSER.parse_args()
SERVERFILE = ARGS.serverfile
PORT = ARGS.port
DBNAME = ARGS.database
COLNAME = ARGS.collection
ORIGPATH = ARGS.origpath
TOLERANCE = int(ARGS.tolerance)
# Directory of this file
ROOTDIR = os.path.abspath('.')
if (not os.path.exists(SERVERFILE)) or (not os.path.isfile(SERVERFILE)):
print "The webserver file: '" + SERVERFILE + "', doesn't exist or is not a file!"
sys.exit(1)
if (not os.path.exists(ORIGPATH)) or (not os.path.isdir(ORIGPATH)):
print "The given path: '" + ORIGPATH + "', doesn't exist or is not a directory!"
sys.exit(1)
GTFILE = os.path.join(ORIGPATH, 'BENCHMARK_FULL.TXT')
if (not os.path.exists(GTFILE)) or (not os.path.isfile(GTFILE)):
print "The groundtruth file: '" + GTFILE + "', doesn't exist or is not a file!"
sys.exit(1)
# Read Ground Truth file and split on lines and on spaces for each line
# Ground Truth looks like:
# <querypath> <targetpath> <position_from> <position_to>...
GTDATA = open(GTFILE, 'r').read().split('\n')
for lineNum in range(0, len(GTDATA)):
GTDATA[lineNum] = GTDATA[lineNum].split()
# Establish MongoDb Connection and get db and video collection
MONGOCLIENT = pymongo.MongoClient(port=8099)
DB = MONGOCLIENT[DBNAME]
VIDEOS = DB[COLNAME]
# Get config from MongoDb
CONFIG = VIDEOS.find_one({'_id': 'config'})
COLORNORMAL = '\033[0m'
COLORWARNING = '\033[93m'
COLORFAIL = '\033[91m'
# Search for all videos starting with "query" in database
# RegEx: Search for substring "query" in path, with digits after the string and a period after the digits
# (so we can be sure 'query' is not some directory name or else...)
if __name__ == '__main__':
outdir = os.path.join(ROOTDIR, 'out')
try:
os.mkdir(outdir)
except OSError:
print COLORWARNING + "WARNING: Output directory already exists. Existing data may be overwritten." + COLORNORMAL
benchmarkTreeBuild(outdir)
benchmarkSceneSearch(outdir) | 30.213855 | 165 | 0.692453 | import pymongo
import shutil
import os
import argparse
import re
import urllib2
import subprocess
import time
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import kmeanstree
from sys import exit
# instanciate and configure an argument parser
PARSER = argparse.ArgumentParser(description='')
PARSER.add_argument('serverfile', metavar='SERVERFILE',
help='The full path to the cherrypy python file, which will be used for the benchmark (absolute!!!)')
PARSER.add_argument('port', metavar='PORT',
help='The port on which the webserver will run')
PARSER.add_argument('database', metavar='DB',
help='The name of the MongoDB Database on localhost')
PARSER.add_argument('collection', metavar='COLLECTION',
help='The name of the Collection in the Database')
PARSER.add_argument('origpath', metavar='ORIGINALPATH',
help='The original path, where the queries and targets are (absolute!!!). This is used for the informations in the output files and reading the groundtruth file.')
PARSER.add_argument('tolerance', metavar='TOLERANCE',
help='The tolerance (in frames) how many frames the found target can be away from ground truth')
# parse input arguments
ARGS = PARSER.parse_args()
SERVERFILE = ARGS.serverfile
PORT = ARGS.port
DBNAME = ARGS.database
COLNAME = ARGS.collection
ORIGPATH = ARGS.origpath
TOLERANCE = int(ARGS.tolerance)
# Directory of this file
ROOTDIR = os.path.abspath('.')
if (not os.path.exists(SERVERFILE)) or (not os.path.isfile(SERVERFILE)):
print "The webserver file: '" + SERVERFILE + "', doesn't exist or is not a file!"
sys.exit(1)
if (not os.path.exists(ORIGPATH)) or (not os.path.isdir(ORIGPATH)):
print "The given path: '" + ORIGPATH + "', doesn't exist or is not a directory!"
sys.exit(1)
GTFILE = os.path.join(ORIGPATH, 'BENCHMARK_FULL.TXT')
if (not os.path.exists(GTFILE)) or (not os.path.isfile(GTFILE)):
print "The groundtruth file: '" + GTFILE + "', doesn't exist or is not a file!"
sys.exit(1)
# Read Ground Truth file and split on lines and on spaces for each line
# Ground Truth looks like:
# <querypath> <targetpath> <position_from> <position_to>...
GTDATA = open(GTFILE, 'r').read().split('\n')
for lineNum in range(0, len(GTDATA)):
GTDATA[lineNum] = GTDATA[lineNum].split()
# Establish MongoDb Connection and get db and video collection
MONGOCLIENT = pymongo.MongoClient(port=8099)
DB = MONGOCLIENT[DBNAME]
VIDEOS = DB[COLNAME]
# Get config from MongoDb
CONFIG = VIDEOS.find_one({'_id': 'config'})
COLORNORMAL = '\033[0m'
COLORWARNING = '\033[93m'
COLORFAIL = '\033[91m'
def startWebserver(ksplit, imax, dbfile):
command = 'python ' + SERVERFILE + ' ' + PORT + ' ' + DBNAME + ' ' + COLNAME + ' ' + str(ksplit) + ' ' + str(imax) + ' ' + str(dbfile) + ' --quiet'
return subprocess.Popen('exec ' + command, stdout=subprocess.PIPE, shell=True)
def stopWebserver(process):
process.terminate()
return process.wait()
def benchmarkTreeBuild(outdir='./out'):
videos = VIDEOS.find({'_id': {'$not': {'$eq': 'config'}}}, {'scenes': 0})
videocount = videos.count()
scenecount = 0
for video in videos:
scenecount += len(video['cuts']) - 1
print "Building the trees with " + str(videocount) + " Videos and " + str(scenecount) + " Scenes\n"
for i in range(0, 2):
paramValues = []
timeValues = []
hitValues = []
searchTimeValues = []
for power in range(0, 8):
if i == 0:
ksplit = 2**power
imax = 100
else:
ksplit = 8
imax = 2**power
paramValues.append(power)
print "Bulding tree - ksplit: " + str(ksplit) + " - imax: " + str(imax)
starttime = time.time()
kmeanstree.loadOrBuildAndSaveTree(videos=VIDEOS, filename=os.path.join(ROOTDIR, 'temptree.db'), k=ksplit, imax=imax)
endtime = time.time()
difftime = endtime-starttime
print "Builded tree in " + str(difftime) + " seconds"
timeValues.append(difftime)
print "Testing SceneSearch..."
process = startWebserver(ksplit=ksplit, imax=imax, dbfile=os.path.join(ROOTDIR, 'temptree.db'))
searchBenchmark = sceneSearch(limit=10)
stopWebserver(process)
hitValues.append(searchBenchmark['correct'])
searchTimeValues.append(searchBenchmark['averagetime'])
print str(searchBenchmark['correct']) + "% hitrate"
print "In average " + str(searchBenchmark['averagetime']) + " seconds per searchquery\n"
os.remove(os.path.join(ROOTDIR, 'temptree.db'))
if i == 0:
label = "ksplit"
else:
label = "imax"
plt.subplot(311)
plt.title('Time of tree builds')
plt.plot(paramValues, timeValues, 'ro')
plt.plot(paramValues, timeValues, 'b-')
plt.xlabel(label + "(2**x)")
plt.ylabel('time in seconds')
plt.subplot(312)
plt.title('Hitrate of trees')
plt.plot(paramValues, hitValues, 'ro')
plt.plot(paramValues, hitValues, 'b-')
plt.xlabel(label + "(2**x)")
plt.ylabel('hits in %')
plt.subplot(313)
plt.title('Average time per scenesearch of trees')
plt.plot(paramValues, searchTimeValues, 'ro')
plt.plot(paramValues, searchTimeValues, 'b-')
plt.xlabel(label + "(2**x)")
plt.ylabel('avrg. time in seconds')
plt.tight_layout()
plt.savefig(os.path.join(outdir, label + ".png"))
plt.gcf().clear()
def benchmarkSceneSearch(outdir='./out', ksplit=8, imax=100):
kmeanstree.loadOrBuildAndSaveTree(videos=VIDEOS, filename=os.path.join(ROOTDIR, 'temptree.db'), k=ksplit, imax=imax)
process = startWebserver(ksplit=ksplit, imax=imax, dbfile=os.path.join(ROOTDIR, 'temptree.db'))
print "Benchmarking scenesearch - ksplit: " + str(ksplit) + " - imax: " + str(imax) + "\n"
paramValues = []
hitValues = []
searchTimeValues = []
# nnlimit from 100 to 10000
for i in range(0, 100):
nnlimit = 100+(100*i)
paramValues.append(nnlimit)
print "Testing SceneSearch - nnlimit: " + str(nnlimit)
searchBenchmark = sceneSearch(limit=10, nnlimit=nnlimit)
hitValues.append(searchBenchmark['correct'])
searchTimeValues.append(searchBenchmark['averagetime'])
print str(searchBenchmark['correct']) + "% hitrate"
print "In average " + str(searchBenchmark['averagetime']) + " seconds per searchquery\n"
stopWebserver(process)
os.remove(os.path.join(ROOTDIR, 'temptree.db'))
plt.subplot(211)
plt.title('Hitrate of trees')
plt.plot(paramValues, hitValues, 'ro')
plt.plot(paramValues, hitValues, 'b-')
plt.xlabel('nnlimit')
plt.ylabel('hits in %')
plt.subplot(212)
plt.title('Average time per scenesearch of trees')
plt.plot(paramValues, searchTimeValues, 'ro')
plt.plot(paramValues, searchTimeValues, 'b-')
plt.xlabel('nnlimit')
plt.ylabel('avrg. time in seconds')
plt.tight_layout()
plt.savefig(os.path.join(outdir, "nnlimit.png"))
plt.gcf().clear()
# Search for all videos starting with "query" in database
# RegEx: Search for substring "query" in path, with digits after the string and a period after the digits
# (so we can be sure 'query' is not some directory name or else...)
def sceneSearch(limit=100, nnlimit=1000):
queryvideos = VIDEOS.find({'filename': {'$regex': 'query\d*\.'}}, {'filename': 1, 'cuts': 1})
count = queryvideos.count()
totalTime = 0
correct = 0
for video in queryvideos:
vidid = video['_id']
filename = video['filename']
scenecount = len(video['cuts']) - 1
frame = 0
# When the video has more than 1 scene, take the longest
if scenecount > 1:
length = 0
for cut in range(0, scenecount):
thislength = video['cuts'][cut+1]-1 - video['cuts'][cut]
if thislength > length:
length = thislength
frame = video['cuts'][cut]
response = False
tries = 0
while not response:
try:
starttime = time.time()
response = urllib2.urlopen('http://localhost:'+str(PORT)+'/searchSceneList?vidid='+str(vidid)+'&frame='+str(frame)+'&limit='+str(limit)+'&nnlimit='+str(nnlimit))
body = response.read()
endtime = time.time()
except urllib2.URLError:
# Webserver is not ready - wait 10 seconds and try again
time.sleep(10)
response = False
results = body.split('\n')
for lineNum in range(0, len(results)):
results[lineNum] = results[lineNum].split()
gtline = None
basename = os.path.splitext(os.path.basename(filename))[0]
for query in GTDATA:
# The last splitted entry can be empty
if len(query) == 0:
continue
gtfilename = os.path.splitext(os.path.basename(query[0]))[0]
if basename == gtfilename:
gtline = query
break
if not gtline:
print COLORFAIL + "ERROR: Video not found in Ground Truth! Skipping... ("+basename+")" + COLORNORMAL
count-=1
continue
difftime = endtime-starttime
totalTime += difftime
counter = 1.0
for result in results:
# The last splitted entry can be empty
if len(result) == 0:
continue
# Same target file
if os.path.splitext(os.path.basename(result[0]))[0] == os.path.splitext(os.path.basename(gtline[1]))[0]:
# Check if time values are within tolerance
startResult = int(result[1])
endResult = int(result[2])
startGt = int(gtline[2])
endGt = int(gtline[3])
# If the start value is not in tolerance range of groundtruth value: continue
if not ((startResult >= startGt-TOLERANCE) and (startResult <= startGt+TOLERANCE)):
continue
# If the end value is not in tolerance range of groundtruth value: continue
if not ((endResult >= endGt-TOLERANCE) and (endResult <= endGt+TOLERANCE)):
continue
# If we got here, congratulation. We found a correct target.
correct += counter
break
counter-=0.1
#outfile.write(str(filename) + ' ' + str(difftime) + '\n')
#outfile.write(body)
#print "Searchquery took " + str(difftime) + " seconds\n"
correctPerc = correct / float(count) * 100.0
averagetime = totalTime / float(count)
return {'correct': correctPerc, 'averagetime': averagetime}
#outfile.close()
if __name__ == '__main__':
outdir = os.path.join(ROOTDIR, 'out')
try:
os.mkdir(outdir)
except OSError:
print COLORWARNING + "WARNING: Output directory already exists. Existing data may be overwritten." + COLORNORMAL
benchmarkTreeBuild(outdir)
benchmarkSceneSearch(outdir) | 6,844 | 0 | 114 |
530100f067e6af290aeb323e1a855fc74b1333d2 | 2,703 | py | Python | tests/unit/utils/test_AwkwardOps.py | shane-breeze/zinv-analysis | 496abf9cb0e77831d580be417bcad7845c347704 | [
"MIT"
] | 1 | 2019-02-06T12:15:42.000Z | 2019-02-06T12:15:42.000Z | tests/unit/utils/test_AwkwardOps.py | shane-breeze/zinv-analysis | 496abf9cb0e77831d580be417bcad7845c347704 | [
"MIT"
] | 12 | 2019-03-27T15:52:34.000Z | 2020-02-06T12:09:37.000Z | tests/unit/utils/test_AwkwardOps.py | shane-breeze/zinv-analysis | 496abf9cb0e77831d580be417bcad7845c347704 | [
"MIT"
] | 1 | 2019-03-14T17:23:33.000Z | 2019-03-14T17:23:33.000Z | import pytest
import mock
import numpy as np
import awkward as awk
from zinv.utils.AwkwardOps import (
get_nth_object,
get_nth_sorted_object_indices,
get_attr_for_min_ref,
jagged_prod,
)
@pytest.mark.parametrize("array,id,size,out", ([
awk.JaggedArray.fromiter([[0, 1, 2], [3, 4], [5, 6, 7, 8]]),
0, 3,
np.array([0, 3, 5]),
], [
awk.JaggedArray.fromiter([[0, 1, 2], [3, 4], [5, 6, 7, 8]]),
1, 3,
np.array([1, 4, 6]),
], [
awk.JaggedArray.fromiter([[0, 1, 2], [3, 4], [5, 6, 7, 8]]),
2, 3,
np.array([2, np.nan, 7]),
], [
awk.JaggedArray.fromiter([[0, 1, 2], [3, 4], [5, 6, 7, 8]]),
3, 3,
np.array([np.nan, np.nan, 8]),
], [
awk.JaggedArray.fromiter([[0, 1, 2], [3, 4], [5, 6, 7, 8]]),
4, 3,
np.array([np.nan, np.nan, np.nan]),
]))
@pytest.mark.parametrize("array,ref,id,size,out", ([
awk.JaggedArray.fromiter([[0, 1, 2], [3, 4], [5, 6, 7, 8]]),
awk.JaggedArray.fromiter([[3, 1, 2], [1, 2], [4, 1, 3, 2]]),
0, 3,
np.array([0, 4, 5]),
], [
awk.JaggedArray.fromiter([[0, 1, 2], [3, 4], [5, 6, 7, 8]]),
awk.JaggedArray.fromiter([[3, 1, 2], [1, 2], [4, 1, 3, 2]]),
1, 3,
np.array([2, 3, 7]),
], [
awk.JaggedArray.fromiter([[0, 1, 2], [3, 4], [5, 6, 7, 8]]),
awk.JaggedArray.fromiter([[3, 1, 2], [1, 2], [4, 1, 3, 2]]),
2, 3,
np.array([1, np.nan, 8]),
], [
awk.JaggedArray.fromiter([[0, 1, 2], [3, 4], [5, 6, 7, 8]]),
awk.JaggedArray.fromiter([[3, 1, 2], [1, 2], [4, 1, 3, 2]]),
3, 3,
np.array([np.nan, np.nan, 6]),
], [
awk.JaggedArray.fromiter([[0, 1, 2], [3, 4], [5, 6, 7, 8]]),
awk.JaggedArray.fromiter([[3, 1, 2], [1, 2], [4, 1, 3, 2]]),
4, 3,
np.array([np.nan, np.nan, np.nan]),
]))
@pytest.mark.parametrize("array,ref,size,out", ([
awk.JaggedArray.fromiter([[0, 1, 2], [3, 4], [5, 6, 7, 8]]),
awk.JaggedArray.fromiter([[3, 1, 2], [1, 2], [4, 1, 3, 2]]),
3,
np.array([1, 3, 6]),
],))
@pytest.mark.parametrize("input_,output", ([
awk.JaggedArray.fromiter([[0, 1, 2], [3, 4], [5, 6, 7, 8]]).astype(np.float32),
np.array([0, 12, 1680]),
],))
| 33.37037 | 107 | 0.557159 | import pytest
import mock
import numpy as np
import awkward as awk
from zinv.utils.AwkwardOps import (
get_nth_object,
get_nth_sorted_object_indices,
get_attr_for_min_ref,
jagged_prod,
)
@pytest.mark.parametrize("array,id,size,out", ([
awk.JaggedArray.fromiter([[0, 1, 2], [3, 4], [5, 6, 7, 8]]),
0, 3,
np.array([0, 3, 5]),
], [
awk.JaggedArray.fromiter([[0, 1, 2], [3, 4], [5, 6, 7, 8]]),
1, 3,
np.array([1, 4, 6]),
], [
awk.JaggedArray.fromiter([[0, 1, 2], [3, 4], [5, 6, 7, 8]]),
2, 3,
np.array([2, np.nan, 7]),
], [
awk.JaggedArray.fromiter([[0, 1, 2], [3, 4], [5, 6, 7, 8]]),
3, 3,
np.array([np.nan, np.nan, 8]),
], [
awk.JaggedArray.fromiter([[0, 1, 2], [3, 4], [5, 6, 7, 8]]),
4, 3,
np.array([np.nan, np.nan, np.nan]),
]))
def test_get_nth_object(array, id, size, out):
assert np.allclose(get_nth_object(array, id, size), out, rtol=1e-5, equal_nan=True)
@pytest.mark.parametrize("array,ref,id,size,out", ([
awk.JaggedArray.fromiter([[0, 1, 2], [3, 4], [5, 6, 7, 8]]),
awk.JaggedArray.fromiter([[3, 1, 2], [1, 2], [4, 1, 3, 2]]),
0, 3,
np.array([0, 4, 5]),
], [
awk.JaggedArray.fromiter([[0, 1, 2], [3, 4], [5, 6, 7, 8]]),
awk.JaggedArray.fromiter([[3, 1, 2], [1, 2], [4, 1, 3, 2]]),
1, 3,
np.array([2, 3, 7]),
], [
awk.JaggedArray.fromiter([[0, 1, 2], [3, 4], [5, 6, 7, 8]]),
awk.JaggedArray.fromiter([[3, 1, 2], [1, 2], [4, 1, 3, 2]]),
2, 3,
np.array([1, np.nan, 8]),
], [
awk.JaggedArray.fromiter([[0, 1, 2], [3, 4], [5, 6, 7, 8]]),
awk.JaggedArray.fromiter([[3, 1, 2], [1, 2], [4, 1, 3, 2]]),
3, 3,
np.array([np.nan, np.nan, 6]),
], [
awk.JaggedArray.fromiter([[0, 1, 2], [3, 4], [5, 6, 7, 8]]),
awk.JaggedArray.fromiter([[3, 1, 2], [1, 2], [4, 1, 3, 2]]),
4, 3,
np.array([np.nan, np.nan, np.nan]),
]))
def test_get_nth_sorted_object_indices(array, ref, id, size, out):
assert np.allclose(get_nth_sorted_object_indices(array, ref, id, size), out, rtol=1e-5, equal_nan=True)
@pytest.mark.parametrize("array,ref,size,out", ([
awk.JaggedArray.fromiter([[0, 1, 2], [3, 4], [5, 6, 7, 8]]),
awk.JaggedArray.fromiter([[3, 1, 2], [1, 2], [4, 1, 3, 2]]),
3,
np.array([1, 3, 6]),
],))
def test_get_attr_for_min_ref(array, ref, size, out):
assert np.allclose(get_attr_for_min_ref(array, ref, size), out, rtol=1e-5, equal_nan=True)
@pytest.mark.parametrize("input_,output", ([
awk.JaggedArray.fromiter([[0, 1, 2], [3, 4], [5, 6, 7, 8]]).astype(np.float32),
np.array([0, 12, 1680]),
],))
def test_jagged_prod(input_, output):
assert np.allclose(jagged_prod(input_), output, rtol=1e-5, equal_nan=True)
| 488 | 0 | 88 |
3fa7227dd1d0fac4716379482b210f70d2f71a80 | 3,534 | py | Python | kombucha_monitor.py | BeefKastle/Kombucha-Monitor | 5c1440876a281e3b23f877984130fa450a0ef09d | [
"MIT"
] | null | null | null | kombucha_monitor.py | BeefKastle/Kombucha-Monitor | 5c1440876a281e3b23f877984130fa450a0ef09d | [
"MIT"
] | null | null | null | kombucha_monitor.py | BeefKastle/Kombucha-Monitor | 5c1440876a281e3b23f877984130fa450a0ef09d | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import datetime
import time
import os
from shutil import copyfile
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from grovepi import *
SLEEP_TIME = 3
dht_sensor_port = 7
dht_sensor_type = 0
g_login = GoogleAuth()
g_login.LocalWebserverAuth()
drive = GoogleDrive(g_login)
# set loop delay time in seconds, must be less than 1000
loopdelay = 5
# set time at which to upload to google drive
upload_time = datetime.time(12, 0, 0)
# generate two time objects to represent a range of time using the loop delay
# to make sure the current time will only be within the update range once per day
upload_time_begin = datetime.time(upload_time.hour, upload_time.minute, upload_time.second)
minuteoffset = loopdelay/60
secondoffset = loopdelay%60
upload_time_end = datetime.time(upload_time.hour, (upload_time.minute + minuteoffset), (upload_time.second + secondoffset))
# object to hold the currrent time
now = datetime.datetime.now().time()
today = datetime.datetime.now().date()
# check to see if there is an old log file; if there is delete it
if os.path.exists("logdata.tmp"):
os.remove("logdata.tmp")
#initalize temp file to hold the log data as it is produced
tempfile = open("logdata.tmp", 'w')
# begin file with time program was started
print("Program started at %s on %s\n" % (now.strftime("%H:%M:%S"), today))
tempfile.write("Log started at %s on %s\n" % (now.strftime("%H:%M:%S"), today))
#print(upload_time_begin)
#print(upload_time_end)
[temperature, humidity] = dht(dht_sensor_port, dht_sensor_type)
# Main loop
while True:
#try block to catch if the user intrupts the script running
try:
#update the now time
now = datetime.datetime.now().time()
today = datetime.datetime.now().date()
# if now is between upload time and loopdelay seconds after that time:
if upload_time_begin < now < upload_time_end:
#close the file
tempfile.close()
# generate logfile final na,e
logfilename = "%s.dat" % datetime.datetime.now().date()
#copy contents of temp file to final log file form
copyfile("logdata.tmp", logfilename)
# will be uploading logic
upload(logfilename)
# delete old files
os.remove("logdata.tmp")
os.remove(logfilename)
# open new tempfile and write the first line
tempfile = open("logdata.tmp", 'w')
tempfile.write("Log started at %s on %s\n" % (now, today))
# get/write data to the file
[temperature, humidity] = dht(dht_sensor_port, dht_sensor_type)
tempstring = now.strftime("%H:%M:%S") + "| TEMPERATURE: " + str(temperature) + " | HUMIDITY: " + str(humidity) + "\n"
tempfile.write(tempstring)
print(tempstring)
# wait for a user difined number of seconds
time.sleep(loopdelay)
except KeyboardInterrupt:
tempfile.close()
interruptfile = "%s-interrupt.dat" % datetime.datetime.now().date()
copyfile("logdata.tmp", interruptfile)
upload(interruptfile)
os.remove(interruptfile)
break
| 30.465517 | 127 | 0.667233 | #!/usr/bin/env python
import datetime
import time
import os
from shutil import copyfile
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from grovepi import *
SLEEP_TIME = 3
dht_sensor_port = 7
dht_sensor_type = 0
g_login = GoogleAuth()
g_login.LocalWebserverAuth()
drive = GoogleDrive(g_login)
def upload(file):
print("uploaded", file)
#copyfile(file, "uploaded %s" % file)
with open(file, 'r') as upload_file:
file_drive = drive.CreateFile({'title:': 'temp'})
file_drive['title'] = file
file_drive.SetContentString(upload_file.read())
file_drive.Upload()
upload_file.close()
# set loop delay time in seconds, must be less than 1000
loopdelay = 5
# set time at which to upload to google drive
upload_time = datetime.time(12, 0, 0)
# generate two time objects to represent a range of time using the loop delay
# to make sure the current time will only be within the update range once per day
upload_time_begin = datetime.time(upload_time.hour, upload_time.minute, upload_time.second)
minuteoffset = loopdelay/60
secondoffset = loopdelay%60
upload_time_end = datetime.time(upload_time.hour, (upload_time.minute + minuteoffset), (upload_time.second + secondoffset))
# object to hold the currrent time
now = datetime.datetime.now().time()
today = datetime.datetime.now().date()
# check to see if there is an old log file; if there is delete it
if os.path.exists("logdata.tmp"):
os.remove("logdata.tmp")
#initalize temp file to hold the log data as it is produced
tempfile = open("logdata.tmp", 'w')
# begin file with time program was started
print("Program started at %s on %s\n" % (now.strftime("%H:%M:%S"), today))
tempfile.write("Log started at %s on %s\n" % (now.strftime("%H:%M:%S"), today))
#print(upload_time_begin)
#print(upload_time_end)
[temperature, humidity] = dht(dht_sensor_port, dht_sensor_type)
# Main loop
while True:
#try block to catch if the user intrupts the script running
try:
#update the now time
now = datetime.datetime.now().time()
today = datetime.datetime.now().date()
# if now is between upload time and loopdelay seconds after that time:
if upload_time_begin < now < upload_time_end:
#close the file
tempfile.close()
# generate logfile final na,e
logfilename = "%s.dat" % datetime.datetime.now().date()
#copy contents of temp file to final log file form
copyfile("logdata.tmp", logfilename)
# will be uploading logic
upload(logfilename)
# delete old files
os.remove("logdata.tmp")
os.remove(logfilename)
# open new tempfile and write the first line
tempfile = open("logdata.tmp", 'w')
tempfile.write("Log started at %s on %s\n" % (now, today))
# get/write data to the file
[temperature, humidity] = dht(dht_sensor_port, dht_sensor_type)
tempstring = now.strftime("%H:%M:%S") + "| TEMPERATURE: " + str(temperature) + " | HUMIDITY: " + str(humidity) + "\n"
tempfile.write(tempstring)
print(tempstring)
# wait for a user difined number of seconds
time.sleep(loopdelay)
except KeyboardInterrupt:
tempfile.close()
interruptfile = "%s-interrupt.dat" % datetime.datetime.now().date()
copyfile("logdata.tmp", interruptfile)
upload(interruptfile)
os.remove(interruptfile)
break
| 312 | 0 | 23 |
e78cd382c98ea849d056a08763555ee519c0dc04 | 145 | py | Python | src/colors.py | imesut/alfred-yeelight-pbc | 2678a2544d200c209afaa7d6dc383466f179966d | [
"Unlicense"
] | 1 | 2021-08-09T09:47:00.000Z | 2021-08-09T09:47:00.000Z | src/colors.py | imesut/alfred-yeelight-pbc | 2678a2544d200c209afaa7d6dc383466f179966d | [
"Unlicense"
] | 1 | 2022-03-22T10:42:39.000Z | 2022-03-30T19:46:53.000Z | src/colors.py | imesut/alfred-yeelight-pbc | 2678a2544d200c209afaa7d6dc383466f179966d | [
"Unlicense"
] | null | null | null | colors = {
"white": (255, 255, 255),
"red": (255, 0, 0),
"green": (0, 255, 0),
"blue": (0, 0, 255),
"orange": (255, 128, 0)
} | 20.714286 | 29 | 0.413793 | colors = {
"white": (255, 255, 255),
"red": (255, 0, 0),
"green": (0, 255, 0),
"blue": (0, 0, 255),
"orange": (255, 128, 0)
} | 0 | 0 | 0 |
7b03396009d2739320de791d9e4f0892e0b95d29 | 1,947 | py | Python | services/account.py | vulpemventures/gdk-ocean | 5691e61f1cee99ae23203d069ac6043f1a18f96a | [
"MIT"
] | null | null | null | services/account.py | vulpemventures/gdk-ocean | 5691e61f1cee99ae23203d069ac6043f1a18f96a | [
"MIT"
] | null | null | null | services/account.py | vulpemventures/gdk-ocean | 5691e61f1cee99ae23203d069ac6043f1a18f96a | [
"MIT"
] | 1 | 2022-02-07T08:45:27.000Z | 2022-02-07T08:45:27.000Z | from domain.address_details import AddressDetails
from domain.gdk_account import GdkAccount
from domain.gdk_wallet import GdkWallet
from typing import Dict, List
from domain.utxo import Utxo
from services.wallet import WalletService
class AccountService:
"""create a new GDK account"""
"""derive new addresses for an account"""
"""list all known addresses of the account"""
"""get the balance of the account, include only unspent where min_num_confs is met"""
"""list all the known unspents for an account""" | 41.425532 | 92 | 0.694402 | from domain.address_details import AddressDetails
from domain.gdk_account import GdkAccount
from domain.gdk_wallet import GdkWallet
from typing import Dict, List
from domain.utxo import Utxo
from services.wallet import WalletService
class AccountService:
def __init__(self, wallet_svc: WalletService) -> None:
self._wallet_svc = wallet_svc
"""create a new GDK account"""
def create_account(self, account_name: str) -> GdkAccount:
wallet = self._wallet_svc.get_wallet()
return wallet.create_new_account(account_name)
"""derive new addresses for an account"""
def derive_address(self, account_name: str, num_addresses: int) -> List[AddressDetails]:
wallet = self._wallet_svc.get_wallet()
account = wallet.get_account(account_name)
addresses = []
for _ in range(num_addresses):
addresses.append(account.get_new_address())
return addresses
"""list all known addresses of the account"""
def list_addresses(self, account_name: str) -> List[AddressDetails]:
wallet = self._wallet_svc.get_wallet()
account = wallet.get_account(account_name)
return account.list_all_addresses()
"""get the balance of the account, include only unspent where min_num_confs is met"""
def balance(self, account_name: str, min_num_confs: int) -> Dict[str, int]:
wallet = self._wallet_svc.get_wallet()
account = wallet.get_account(account_name)
return account.get_balance(min_num_confs)
"""list all the known unspents for an account"""
def list_utxos(self, account_name: str) -> List[Utxo]:
wallet = self._wallet_svc.get_wallet()
account = wallet.get_account(account_name)
utxosByAsset = account.get_all_utxos(False)
utxos: List[Utxo] = []
for utxosForAsset in utxosByAsset.values():
utxos.extend(utxosForAsset)
return utxos | 1,236 | 0 | 156 |
ed0e13f65780912f9e869e7db56a098453508cf9 | 537 | py | Python | python/ngsi_v2/ngsi_v2/api/__init__.py | orchestracities/sdk | 9dd1e618d6c013ab916f3880df84c7882f6beec6 | [
"Apache-2.0"
] | 2 | 2019-12-22T01:01:34.000Z | 2021-07-03T20:30:03.000Z | python/ngsi_v2/ngsi_v2/api/__init__.py | orchestracities/sdk | 9dd1e618d6c013ab916f3880df84c7882f6beec6 | [
"Apache-2.0"
] | 2 | 2019-06-06T05:45:45.000Z | 2019-06-06T09:03:10.000Z | python/ngsi_v2/ngsi_v2/api/__init__.py | orchestracities/sdk | 9dd1e618d6c013ab916f3880df84c7882f6beec6 | [
"Apache-2.0"
] | 2 | 2021-07-03T20:30:06.000Z | 2021-11-30T21:55:02.000Z | from __future__ import absolute_import
# flake8: noqa
# import apis into api package
from ngsi_v2.api.api_entry_point_api import APIEntryPointApi
from ngsi_v2.api.attribute_value_api import AttributeValueApi
from ngsi_v2.api.attributes_api import AttributesApi
from ngsi_v2.api.batch_operations_api import BatchOperationsApi
from ngsi_v2.api.entities_api import EntitiesApi
from ngsi_v2.api.registrations_api import RegistrationsApi
from ngsi_v2.api.subscriptions_api import SubscriptionsApi
from ngsi_v2.api.types_api import TypesApi
| 38.357143 | 63 | 0.877095 | from __future__ import absolute_import
# flake8: noqa
# import apis into api package
from ngsi_v2.api.api_entry_point_api import APIEntryPointApi
from ngsi_v2.api.attribute_value_api import AttributeValueApi
from ngsi_v2.api.attributes_api import AttributesApi
from ngsi_v2.api.batch_operations_api import BatchOperationsApi
from ngsi_v2.api.entities_api import EntitiesApi
from ngsi_v2.api.registrations_api import RegistrationsApi
from ngsi_v2.api.subscriptions_api import SubscriptionsApi
from ngsi_v2.api.types_api import TypesApi
| 0 | 0 | 0 |
c43fab04520df60172f22adeab2cfed564710d36 | 1,883 | py | Python | analysis/models.py | haticeaydinn/AmazonReviewAnalysis | 8ad2a2d894e7825ece76a16f4cc625db266e2a9a | [
"MIT"
] | 1 | 2021-06-21T01:26:56.000Z | 2021-06-21T01:26:56.000Z | analysis/models.py | haticeaydinn/AmazonReviewAnalysis | 8ad2a2d894e7825ece76a16f4cc625db266e2a9a | [
"MIT"
] | null | null | null | analysis/models.py | haticeaydinn/AmazonReviewAnalysis | 8ad2a2d894e7825ece76a16f4cc625db266e2a9a | [
"MIT"
] | null | null | null | from django.db import models
| 42.795455 | 77 | 0.744557 | from django.db import models
class ReviewTable(models.Model):
title = models.CharField(max_length=500)
content = models.CharField(max_length=6000)
date = models.CharField(max_length=20)
variant = models.CharField(max_length=1000, null=True)
images = models.CharField(max_length=2083)
verified = models.CharField(max_length=10)
author = models.CharField(max_length=300)
rating = models.FloatField(default=0.0)
product = models.CharField(max_length=500)
url = models.CharField(max_length=2083)
avg_polarity = models.FloatField(default=0.0)
sentiment = models.CharField(max_length=10, default='No Info')
product_id = models.CharField(max_length=50, default='No Product')
class ReviewTableNew(models.Model):
overall = models.IntegerField()
verified = models.CharField(max_length=10)
reviewTime = models.CharField(max_length=20)
reviewerID = models.CharField(max_length=100)
asin = models.CharField(max_length=50)
style = models.CharField(max_length=1000, null=True)
reviewerName = models.CharField(max_length=300)
reviewText = models.CharField(max_length=11000)
summary = models.CharField(max_length=500)
unixReviewTime = models.DateField()
vote = models.FloatField(null=True)
image = models.CharField(max_length=2083, null=True)
avg_polarity = models.FloatField(default=0.0, null=True)
sentiment = models.CharField(max_length=10, default='No Info', null=True)
pos_sentence_rate = models.FloatField(default=0.0, null=True)
neg_sentence_rate = models.FloatField(default=0.0, null=True)
class ProductTable(models.Model):
title = models.CharField(max_length=1000)
main_cat = models.CharField(max_length=50)
price = models.CharField(max_length=10, null=True)
asin = models.CharField(max_length=50)
imageURLHighRes = models.CharField(max_length=2000) | 0 | 1,783 | 69 |
b465cf8b3f874ec4178fbb6ee355ed6a404956a9 | 1,403 | py | Python | cli_stryket/read_input.py | mile95/cli-stryket | 3c4ea10c1937a179a17881b0b235b5daa3d6de91 | [
"MIT"
] | null | null | null | cli_stryket/read_input.py | mile95/cli-stryket | 3c4ea10c1937a179a17881b0b235b5daa3d6de91 | [
"MIT"
] | null | null | null | cli_stryket/read_input.py | mile95/cli-stryket | 3c4ea10c1937a179a17881b0b235b5daa3d6de91 | [
"MIT"
] | null | null | null | from __future__ import annotations
from cli_stryket.system_exception import InvalidSystemException
| 30.5 | 76 | 0.613685 | from __future__ import annotations
from cli_stryket.system_exception import InvalidSystemException
def read_input(filename: str) -> list(str):
raw_system = parse_input_file(filename)
validated_system = validate_system(raw_system)
formatted_system = [format_system_row(row) for row in validated_system]
return formatted_system
def parse_input_file(filename: str) -> list(str):
with open(filename) as f:
raw_system = f.read().split("\n")
return raw_system
def validate_system(system: list(str)) -> list(str):
system = [row for row in system if row] # Remove empty lines
if len(system) != 13:
raise InvalidSystemException("System must have 13 rows")
for i, row in enumerate(system):
if row not in ["1", "x", "2", "1x", "12", "x2", "1x2"]:
raise InvalidSystemException(f"Row {i + 1}: {row} is not valid")
return system
def format_system_row(system_row: str) -> str:
if len(system_row) == 3:
return system_row
if len(system_row) == 2:
if "1" not in system_row:
return " x2"
if "x" not in system_row:
return "1 2"
if "2" not in system_row:
return "1x "
if len(system_row) == 1:
if "1" in system_row:
return "1 "
if "x" in system_row:
return " x "
if "2" in system_row:
return " 2"
| 1,208 | 0 | 92 |
0d7f27fc43e340167cee28afdee347076b00555b | 3,709 | py | Python | politi_train/snoop.py | us241098/exif_consistency | d9bb27611564b5e566fc2b4593d3d22f967380bd | [
"Apache-2.0"
] | null | null | null | politi_train/snoop.py | us241098/exif_consistency | d9bb27611564b5e566fc2b4593d3d22f967380bd | [
"Apache-2.0"
] | 5 | 2021-03-19T13:30:41.000Z | 2022-03-12T00:35:05.000Z | politi_train/snoop.py | us241098/exif_consistency | d9bb27611564b5e566fc2b4593d3d22f967380bd | [
"Apache-2.0"
] | null | null | null | import pandas as pd
import glob
txt_files = glob.glob("*.txt")
image_files=[]
for i in txt_files:
i=i.replace(".txt", "");
image_files.append(i)
print(image_files)
print(len(image_files))
#print((txt_files))
df=pd.read_csv('train.csv') # enter your filename with exiftags
df['Assessment']=''
df['Compression/Ratio']=''
df['BPP']=''
df['Signature']=''
df['Signature Rotated']=''
df['SW']=''
df['Luminance(QT)']=''
df['Chrominance(QT)']=''
df['Quality Factor(Luminance)']=''
df['Quality Factor(Chrominance)']=''
print (df.name)
for i in image_files:
#df2=df[df['name'].str.contains(i)]
#print(df2)
file_name=i+".txt"
sw=[]
with open(file_name, 'r') as f:
for line in f:
if 'Destination ID=0 (Luminance)' in line:
count=0
lum_list=[]
for line in f:
if count<9:
#print (line)
count=count+1
lum_list.append(line)
else:
break
if 'Destination ID=1 (Chrominance)' in line:
count=0
chrom_list=[]
for line in f:
if count<9:
#print (line)
count=count+1
chrom_list.append(line)
else:
break
#cr=''
if 'Compression Ratio:' in line:
cr=line
print (cr)
else:
cr=''
#bpp=''
if 'Bits per pixel:' in line:
bpp=line
#print (line)
else:
bpp=''
if 'Signature:' in line:
sign=line
#print (line)
if 'Signature (Rotated):' in line:
signR=line
#print (line)
if 'SW :' in line:
#print(line)
sw.append(line)
if 'ASSESSMENT:' in line:
assessment=line
#print (line)
lum_li=lum_list[:-1]
chrom_li=chrom_list[:-1]
#print (sw)
#print(lum_li)
#print(chrom_li)
#print (cr)
cr= cr
print (cr)
bpp = bpp.strip('Bits per pixel: ')
sign = sign.strip('Signature: ')
signR = signR.strip('Signature (Rotated): ')
assessment=assessment.strip('ASSESSMENT: Class 1 - ')
lum_qf= (lum_list[-1])
chrom_qf= (chrom_list[-1])
lum_qf=lum_qf.strip('Approx quality factor = ')
chrom_qf=chrom_qf.strip('Approx quality factor = ')
df.loc[df['name'].str.contains(i), 'Assessment'] = assessment
df.loc[df['name'].str.contains(i), 'Compression/Ratio'] = str(cr)
df.loc[df['name'].str.contains(i), 'BPP'] = str(bpp)
df.loc[df['name'].str.contains(i), 'Signature'] = sign
df.loc[df['name'].str.contains(i), 'Signature Rotated'] = signR
df.loc[df['name'].str.contains(i), 'SW'] = str(sw)
df.loc[df['name'].str.contains(i), 'Luminance(QT)'] = str(lum_li)
df.loc[df['name'].str.contains(i), 'Chrominance(QT)'] = str(chrom_li)
df.loc[df['name'].str.contains(i), 'Quality Factor(Luminance)'] = lum_qf
df.loc[df['name'].str.contains(i), 'Quality Factor(Chrominance)'] = chrom_qf
df.to_csv('updated_test.csv', index=False) #your csv with exif and jpegsnoop data
print (df.shape) | 30.154472 | 84 | 0.465624 | import pandas as pd
import glob
txt_files = glob.glob("*.txt")
image_files=[]
for i in txt_files:
i=i.replace(".txt", "");
image_files.append(i)
print(image_files)
print(len(image_files))
#print((txt_files))
df=pd.read_csv('train.csv') # enter your filename with exiftags
df['Assessment']=''
df['Compression/Ratio']=''
df['BPP']=''
df['Signature']=''
df['Signature Rotated']=''
df['SW']=''
df['Luminance(QT)']=''
df['Chrominance(QT)']=''
df['Quality Factor(Luminance)']=''
df['Quality Factor(Chrominance)']=''
print (df.name)
for i in image_files:
#df2=df[df['name'].str.contains(i)]
#print(df2)
file_name=i+".txt"
sw=[]
with open(file_name, 'r') as f:
for line in f:
if 'Destination ID=0 (Luminance)' in line:
count=0
lum_list=[]
for line in f:
if count<9:
#print (line)
count=count+1
lum_list.append(line)
else:
break
if 'Destination ID=1 (Chrominance)' in line:
count=0
chrom_list=[]
for line in f:
if count<9:
#print (line)
count=count+1
chrom_list.append(line)
else:
break
#cr=''
if 'Compression Ratio:' in line:
cr=line
print (cr)
else:
cr=''
#bpp=''
if 'Bits per pixel:' in line:
bpp=line
#print (line)
else:
bpp=''
if 'Signature:' in line:
sign=line
#print (line)
if 'Signature (Rotated):' in line:
signR=line
#print (line)
if 'SW :' in line:
#print(line)
sw.append(line)
if 'ASSESSMENT:' in line:
assessment=line
#print (line)
lum_li=lum_list[:-1]
chrom_li=chrom_list[:-1]
#print (sw)
#print(lum_li)
#print(chrom_li)
#print (cr)
cr= cr
print (cr)
bpp = bpp.strip('Bits per pixel: ')
sign = sign.strip('Signature: ')
signR = signR.strip('Signature (Rotated): ')
assessment=assessment.strip('ASSESSMENT: Class 1 - ')
lum_qf= (lum_list[-1])
chrom_qf= (chrom_list[-1])
lum_qf=lum_qf.strip('Approx quality factor = ')
chrom_qf=chrom_qf.strip('Approx quality factor = ')
df.loc[df['name'].str.contains(i), 'Assessment'] = assessment
df.loc[df['name'].str.contains(i), 'Compression/Ratio'] = str(cr)
df.loc[df['name'].str.contains(i), 'BPP'] = str(bpp)
df.loc[df['name'].str.contains(i), 'Signature'] = sign
df.loc[df['name'].str.contains(i), 'Signature Rotated'] = signR
df.loc[df['name'].str.contains(i), 'SW'] = str(sw)
df.loc[df['name'].str.contains(i), 'Luminance(QT)'] = str(lum_li)
df.loc[df['name'].str.contains(i), 'Chrominance(QT)'] = str(chrom_li)
df.loc[df['name'].str.contains(i), 'Quality Factor(Luminance)'] = lum_qf
df.loc[df['name'].str.contains(i), 'Quality Factor(Chrominance)'] = chrom_qf
df.to_csv('updated_test.csv', index=False) #your csv with exif and jpegsnoop data
print (df.shape) | 0 | 0 | 0 |
972f9280ed74904eaa019b25f28e79a5477d0743 | 1,426 | py | Python | lib/modules/twitter.py | nkrios/omnibus | d73c1e720f1d97aa104a4286187f785ef0dcaae5 | [
"MIT"
] | 251 | 2018-05-08T20:40:37.000Z | 2022-03-22T22:31:17.000Z | lib/modules/twitter.py | samyoyo/omnibus | 65f6251137d6e38128c19120aa204a577b2cbcaf | [
"MIT"
] | 33 | 2018-05-08T21:30:54.000Z | 2020-08-19T16:24:28.000Z | lib/modules/twitter.py | samyoyo/omnibus | 65f6251137d6e38128c19120aa204a577b2cbcaf | [
"MIT"
] | 70 | 2018-05-16T12:53:05.000Z | 2022-03-22T22:31:20.000Z | #!/usr/bin/env python
##
# omnibus - deadbits
# Twitter username search
##
from BeautifulSoup import BeautifulSoup
from http import get
| 30.340426 | 148 | 0.586255 | #!/usr/bin/env python
##
# omnibus - deadbits
# Twitter username search
##
from BeautifulSoup import BeautifulSoup
from http import get
class Plugin(object):
def __init__(self, artifact):
self.artifact = artifact
self.artifact['data']['twitter'] = None
self.headers = {'User-Agent': 'OSINT Omnibus (https://github.com/InQuest/Omnibus)'}
def run(self):
url = 'https://www.twitter.com/%s' % self.artifact['name']
try:
status, response = get(url, headers=self.headers)
if status:
soup = BeautifulSoup(response.content, 'lxml')
self.artifact['data']['twitter'] = {}
self.artifact['data']['twitter']['name'] = soup.find('h1').contents[1].text
try:
self.artifact['data']['twitter']['location'] = soup.find('span', class_='ProfileHeaderCard-locationText u-dir').contents[1].text
except:
self.artifact['data']['twitter']['location'] = None
self.artifact['data']['twitter']['description'] = soup.find('div', class_='ProfileHeaderCard').contents[5].text
self.artifact['data']['twitter']['created'] = soup.find('div', class_='ProfileHeaderCard-joinDate').contents[3].text
except:
pass
def main(artifact):
plugin = Plugin(artifact)
plugin.run()
return plugin.artifact
| 1,186 | 0 | 99 |
5e8ff68482070eb5109267cce3591f87f0e356e1 | 473 | py | Python | tests/__main__.py | NiftyPET/NIPET | b1c3799c4a795e1db34dc36decd197ab74e95c6e | [
"Apache-2.0"
] | 10 | 2019-12-11T07:05:07.000Z | 2021-12-21T13:52:56.000Z | tests/__main__.py | NiftyPET/NIPET | b1c3799c4a795e1db34dc36decd197ab74e95c6e | [
"Apache-2.0"
] | 21 | 2019-12-11T11:47:07.000Z | 2022-02-08T19:42:40.000Z | tests/__main__.py | pjmark/NIPET | b1c3799c4a795e1db34dc36decd197ab74e95c6e | [
"Apache-2.0"
] | 3 | 2019-02-19T21:19:00.000Z | 2019-08-27T02:26:31.000Z | import logging
from niftypet.ninst import install_tools as tls
from .conftest import HOME
log = logging.getLogger(__name__)
DATA_URL = "https://zenodo.org/record/3877529/files/amyloidPET_FBP_TP0_extra.zip?download=1"
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
main()
| 23.65 | 92 | 0.72093 | import logging
from niftypet.ninst import install_tools as tls
from .conftest import HOME
log = logging.getLogger(__name__)
DATA_URL = "https://zenodo.org/record/3877529/files/amyloidPET_FBP_TP0_extra.zip?download=1"
def main():
log.info(f"Downloading {DATA_URL}\nto ${{DATA_ROOT:-~}} ({HOME})")
with tls.urlopen_cached(DATA_URL, HOME) as fd:
tls.extractall(fd, HOME)
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
main()
| 145 | 0 | 23 |
26386774389ead7024b7022fe9cb7770be70ac81 | 466 | py | Python | solution_cid_check.py | nmansard/wsmemmo_pinocchio | 42722cc07e700140748f1d87a2671672d28cecc4 | [
"MIT"
] | 5 | 2019-01-28T15:02:50.000Z | 2020-08-08T01:25:28.000Z | solution_cid_check.py | nmansard/wsmemmo_pinocchio | 42722cc07e700140748f1d87a2671672d28cecc4 | [
"MIT"
] | 2 | 2019-01-28T13:07:48.000Z | 2019-01-30T14:14:28.000Z | solution_cid_check.py | nmansard/wsmemmo_pinocchio | 42722cc07e700140748f1d87a2671672d28cecc4 | [
"MIT"
] | 4 | 2019-05-05T19:29:20.000Z | 2022-02-16T23:57:40.000Z | forces = pinocchio.StdVect_Force()
for i in range(rmodel.njoints): forces.append(pinocchio.Force.Zero())
tau,fr,fl = pbm.x2vars(res)
Mr = rmodel.frames[pbm.idR].placement
jr = rmodel.frames[pbm.idR].parent
forces[jr] = Mr.act(pinocchio.Force(fr))
Ml = rmodel.frames[pbm.idL].placement
jl = rmodel.frames[pbm.idL].parent
fl = pinocchio.Force(fl)
forces[jl] = Mr.act(pinocchio.Force(fl))
print(pinocchio.rnea(rmodel,rdata,pbm.q,pbm.vq,zero(rmodel.nv),forces)-tau)
| 29.125 | 75 | 0.742489 | forces = pinocchio.StdVect_Force()
for i in range(rmodel.njoints): forces.append(pinocchio.Force.Zero())
tau,fr,fl = pbm.x2vars(res)
Mr = rmodel.frames[pbm.idR].placement
jr = rmodel.frames[pbm.idR].parent
forces[jr] = Mr.act(pinocchio.Force(fr))
Ml = rmodel.frames[pbm.idL].placement
jl = rmodel.frames[pbm.idL].parent
fl = pinocchio.Force(fl)
forces[jl] = Mr.act(pinocchio.Force(fl))
print(pinocchio.rnea(rmodel,rdata,pbm.q,pbm.vq,zero(rmodel.nv),forces)-tau)
| 0 | 0 | 0 |
c567ceae1a3c6867872194127272d492f0ef6adb | 1,782 | py | Python | boxdb/support_litebase.py | kshitij1235/boxdb | 4aa121f856c148c5136368041a610a584fb1dbc6 | [
"MIT"
] | 1 | 2022-01-31T17:21:02.000Z | 2022-01-31T17:21:02.000Z | boxdb/support_litebase.py | kshitij1235/boxdb | 4aa121f856c148c5136368041a610a584fb1dbc6 | [
"MIT"
] | null | null | null | boxdb/support_litebase.py | kshitij1235/boxdb | 4aa121f856c148c5136368041a610a584fb1dbc6 | [
"MIT"
] | null | null | null | '''
boxdb/support_litebase -> v0.3
This file contain code for
1)get the data from file, and get row data
[ ]get_content() speed optimization
[ ]get_element() function added
'''
def get_content(context, target):
"""
It gets the content from any file with
data in it(auto generated) and returns in list
"""
lines = []
try:
with open(target,encoding='UTF-8') as file:
for line in file:
line = line.strip()
lines.append(line)
except FileNotFoundError:
print(f"{context} file missing")
return lines
def get_columns(table_name):
"""
It gets the content from any file with
data in it(auto generated) and returns in list
"""
lines = []
try:
with open(f"{table_name}/{table_name}_data.txt",encoding='UTF-8') as file:
for line in file:
line = line.strip()
try:
lines.append(line.removesuffix("-P").strip())
except Exception:
lines.append(line)
except FileNotFoundError:
print("column file missing")
return lines
def get_primary_column(table_name):
"""
This gets all the primary key rows from the table
"""
#FIXME optimization need takes 0.009 secs
columns= get_content("row", f"{table_name}/{table_name}_data.txt")
return [
elements[: len(elements) - 2].strip()
for elements in columns
if elements.find("-P") > 0]
def get_elements(table_name,column):
"""
get values from column
"""
with open(f'.\\{table_name}\\tables\\{column}.txt','r+',encoding="UTF-8") as files:
line=files.readlines()
return [elements.removesuffix('\n').strip() for elements in line]
| 27.415385 | 87 | 0.594837 | '''
boxdb/support_litebase -> v0.3
This file contain code for
1)get the data from file, and get row data
[ ]get_content() speed optimization
[ ]get_element() function added
'''
def get_content(context, target):
"""
It gets the content from any file with
data in it(auto generated) and returns in list
"""
lines = []
try:
with open(target,encoding='UTF-8') as file:
for line in file:
line = line.strip()
lines.append(line)
except FileNotFoundError:
print(f"{context} file missing")
return lines
def get_columns(table_name):
"""
It gets the content from any file with
data in it(auto generated) and returns in list
"""
lines = []
try:
with open(f"{table_name}/{table_name}_data.txt",encoding='UTF-8') as file:
for line in file:
line = line.strip()
try:
lines.append(line.removesuffix("-P").strip())
except Exception:
lines.append(line)
except FileNotFoundError:
print("column file missing")
return lines
def get_primary_column(table_name):
"""
This gets all the primary key rows from the table
"""
#FIXME optimization need takes 0.009 secs
columns= get_content("row", f"{table_name}/{table_name}_data.txt")
return [
elements[: len(elements) - 2].strip()
for elements in columns
if elements.find("-P") > 0]
def get_elements(table_name,column):
"""
get values from column
"""
with open(f'.\\{table_name}\\tables\\{column}.txt','r+',encoding="UTF-8") as files:
line=files.readlines()
return [elements.removesuffix('\n').strip() for elements in line]
| 0 | 0 | 0 |
5322bc5aa344a86a7e2d830119a5f46c1675a541 | 1,960 | py | Python | city_translit.py | SergeiMinaev/city_translit | 632e7c71a66e11591a41be8e6157c6d197c370ba | [
"MIT"
] | null | null | null | city_translit.py | SergeiMinaev/city_translit | 632e7c71a66e11591a41be8e6157c6d197c370ba | [
"MIT"
] | null | null | null | city_translit.py | SergeiMinaev/city_translit | 632e7c71a66e11591a41be8e6157c6d197c370ba | [
"MIT"
] | null | null | null | import re
CITY_RE = re.compile('[^a-zA-Z- ]')
DATA = {
'a': {
'anzherosudzhensk':'анжеро-судженск',
'ashukino':'ашукино',
},
'b': {
'bronnitsy':'бронницы',
'biysk':'бийск',
},
'c': {
'chelyabinsk':'челябинск',
},
'd': {
},
'e': {
'elektrostal':'электросталь',
},
'f': {
},
'g': {
},
'h': {
},
'i': {
'ivanteyevka':'ивантеевка',
'irkutsk':'иркутск',
'ivanovo':'иваново',
},
'j': {
},
'k': {
'kirov':'киров',
'krasnoarmeysk':'красноармейск',
'korolyov':'королев',
'kashira':'кашира',
'kazan':'казань',
'kozhevnikovo':'кожевниково',
'klin':'клин',
'klimovsk':'климовск',
'krasnodar':'краснодар',
},
'l': {
'leninsk_kuznetsky':'ленинск-кузнецкий',
},
'm': {
'moscow':'москва',
'mezhdurechensk':'междуреченск',
},
'n': {
'novoaltaysk':'новоалтайск',
'nizhniy_novgorod':'нижний новгород',
},
'o': {
'oktyabrsky':'октябрьский',
'orenburg':'оренбург',
},
'p': {
'podolsk':'подольск',
'plavsk':'плавск',
'prokopyevsk':'прокопьевск',
},
'q': {
},
'r': {
'ramenskoye':'раменское',
},
's': {
'st_petersburg':'санкт-петербург',
'staraya yurga':'старая юрга',
'sverdlovskiy':'свердловский',
},
't': {
'tayga':'тайга',
'tula':'тула',
},
'v': {
'volgograd':'волгоград',
},
'u': {
},
'w': {
},
'x': {
},
'y': {
'yaroslavl':'ярославль',
'yekaterinburg':'екатеринбург',
},
'z': {
'zhukovskiy':'жуковский',
},
}
| 19.405941 | 56 | 0.448469 | import re
CITY_RE = re.compile('[^a-zA-Z- ]')
DATA = {
'a': {
'anzherosudzhensk':'анжеро-судженск',
'ashukino':'ашукино',
},
'b': {
'bronnitsy':'бронницы',
'biysk':'бийск',
},
'c': {
'chelyabinsk':'челябинск',
},
'd': {
},
'e': {
'elektrostal':'электросталь',
},
'f': {
},
'g': {
},
'h': {
},
'i': {
'ivanteyevka':'ивантеевка',
'irkutsk':'иркутск',
'ivanovo':'иваново',
},
'j': {
},
'k': {
'kirov':'киров',
'krasnoarmeysk':'красноармейск',
'korolyov':'королев',
'kashira':'кашира',
'kazan':'казань',
'kozhevnikovo':'кожевниково',
'klin':'клин',
'klimovsk':'климовск',
'krasnodar':'краснодар',
},
'l': {
'leninsk_kuznetsky':'ленинск-кузнецкий',
},
'm': {
'moscow':'москва',
'mezhdurechensk':'междуреченск',
},
'n': {
'novoaltaysk':'новоалтайск',
'nizhniy_novgorod':'нижний новгород',
},
'o': {
'oktyabrsky':'октябрьский',
'orenburg':'оренбург',
},
'p': {
'podolsk':'подольск',
'plavsk':'плавск',
'prokopyevsk':'прокопьевск',
},
'q': {
},
'r': {
'ramenskoye':'раменское',
},
's': {
'st_petersburg':'санкт-петербург',
'staraya yurga':'старая юрга',
'sverdlovskiy':'свердловский',
},
't': {
'tayga':'тайга',
'tula':'тула',
},
'v': {
'volgograd':'волгоград',
},
'u': {
},
'w': {
},
'x': {
},
'y': {
'yaroslavl':'ярославль',
'yekaterinburg':'екатеринбург',
},
'z': {
'zhukovskiy':'жуковский',
},
}
def get_city_ru(name_en: str) -> str:
name_en = CITY_RE.sub('', name_en.lower())
name_en = name_en.replace('-', '_').replace(' ','_')
return DATA[name_en[0]].get(name_en, None)
| 167 | 0 | 23 |
5b1105e73c18792d36bae18e66c4af3e8663d757 | 22,966 | py | Python | checkpoint/sequences.py | rahul-netizen/checkpoint | 6b42bd3ace57ffd91589109b7691ccdc68801d6b | [
"MIT"
] | 17 | 2021-06-17T14:05:55.000Z | 2022-03-10T19:14:15.000Z | checkpoint/sequences.py | rahul-netizen/checkpoint | 6b42bd3ace57ffd91589109b7691ccdc68801d6b | [
"MIT"
] | 25 | 2021-06-18T15:31:35.000Z | 2022-03-28T04:22:47.000Z | checkpoint/sequences.py | rahul-netizen/checkpoint | 6b42bd3ace57ffd91589109b7691ccdc68801d6b | [
"MIT"
] | 10 | 2021-07-11T15:43:27.000Z | 2022-03-20T14:12:56.000Z | import json
import os
from collections import OrderedDict
from itertools import count
from multiprocessing import cpu_count
from tempfile import TemporaryDirectory as InTemporaryDirectory
from types import MethodType
from joblib import Parallel, delayed
from checkpoint import __version__ as version
from checkpoint.crypt import Crypt, generate_key
from checkpoint.io import IO
from checkpoint.readers import get_all_readers
from checkpoint.utils import LogColors, Logger, get_reader_by_extension
class Sequence:
"""Class to represent a sequence of operations."""
def __init__(self, sequence_name, order_dict=None, logger=None, terminal_log=False):
"""Initialize the sequence class.
Parameters
----------
sequence_name: str
Name of the sequence.
order_dict: dict, optional
Dictionary of function names and their order in the sequence.
logger: `checkpoint.utils.Logger`, optional
Logger for the sequence class
log: bool, optional
If True, the sequence will be logged.
"""
self.terminal_log = terminal_log
self.log_mode = 't' if self.terminal_log else 'f'
self.logger = logger or Logger(log_mode=self.log_mode)
self.sequence_name = sequence_name
self.sequence_dict = OrderedDict()
self.order_dict = order_dict or {}
self._sequence_functions = self.sequence_dict.items()
self.sequence_functions = []
self.get_sequence_functions()
# User hook that is triggered when the sequence/sequence function has finished
self.on_sequence_end = lambda seq: None
self.on_sequence_function_end = lambda seq: None
def __repr__(self):
"""Return the string representation of the Sequence."""
_member_functions = [
_func.__name__ for _func in self.sequence_dict.values()]
return f'Name: {self.name}, Member Function: {_member_functions}'
def add_sequence_function(self, func, order=0):
"""Add a member function to the sequence.
Parameters
----------
func: method
Function that is to be added to the sequence.
order: int, optional
The order of the function in the sequence
"""
if not func.__name__.startswith('seq'):
raise ValueError('Function name must start with "seq"')
if order in self.sequence_dict:
_msg = f'Warning: overriting {self.sequence_dict[order].__name__} with {func.__name__}'
self.logger.log(
_msg, LogColors.WARNING, timestamp=True, log_caller=True, log_type="INFO")
self.sequence_dict[order] = func
def add_sub_sequence(self, sequence, order=0):
"""Add a sub sequence to the current sequence.
Parameter
---------
sequence: :class: `Sequence`
The sub sequence that is to be added
order: int, optional
The order of the sub sequence in the sequence
"""
if not isinstance(sequence, Sequence):
raise TypeError('Sub sequence must be of type Sequence')
_iterator = (count(start=order, step=1))
for func_obj in sequence.sequence_dict.items():
self.add_sequence_function(func_obj[1], order=next(_iterator))
def execute_sequence(self, execution_policy='decreasing_order', pass_args=False):
"""Execute all functions in the current sequence.
Parameters
----------
execution_policy: str
The policy to be followed while executing the functions.
Possible values are 'increasing_order' or 'decreasing_order'.
pass_args: bool
If True, the arguments of the executed function will be passed to the next function.
"""
self.update_order()
_return_values = []
if execution_policy == 'decreasing_order':
_sorted_sequence = sorted(self.sequence_dict.items(), reverse=True)
for func_obj in _sorted_sequence:
context_text = func_obj[1].__name__.split(
'seq_')[-1].replace('_', ' ').title()
try:
if pass_args:
if len(_return_values) > 0:
_return_value = func_obj[1](_return_values[-1])
else:
_return_value = func_obj[1]()
else:
_return_value = func_obj[1]()
except Exception as e:
_msg = f'{context_text}'
self.logger.log(
_msg, [LogColors.ERROR, LogColors.UNDERLINE],
timestamp=True, log_type="ERROR")
raise type(e)(f'{context_text} failed with error: {e}')
_msg = f'{context_text}'
self.logger.log(
_msg, [LogColors.SUCCESS, LogColors.UNDERLINE],
timestamp=True, log_type="SUCCESS")
self.on_sequence_function_end(self)
_return_values.append(_return_value)
self.on_sequence_end(self)
elif execution_policy == 'increasing_order':
for _, func in self.sequence_dict.items():
if pass_args:
_return_value = func(_return_values[-1])
else:
_return_value = func()
_return_values.append(_return_value)
self.on_sequence_end(self)
else:
raise ValueError(
f'{execution_policy} is an invalid execution policy')
return _return_values
def update_order(self):
"""Update the order of sequence functions in sequence dict."""
self.sequence_dict = OrderedDict(sorted(self.sequence_dict.items()))
def flush_sequence(self):
"""Flush the sequence."""
self.sequence_dict.clear()
def get_sequence_functions(self):
"""Get all the sequence functions."""
self.sequence_functions.clear()
for name in dir(self):
if name.startswith('seq') and isinstance(getattr(self, name), MethodType):
_func = getattr(self, name)
if name not in self.order_dict:
self.order_dict[name] = len(self.sequence_functions)
self.sequence_functions.append(_func)
self.generate_sequence()
def generate_sequence(self):
"""Generate a sequence from all memeber functions."""
for func in self.sequence_functions:
_name = func.__name__
_order = self.order_dict[_name]
self.add_sequence_function(func, _order)
@property
@property
@sequence_functions.setter
def sequence_functions(self, functions):
"""Set the value of sequence functions to a list.
Parameters
----------
functions: list of methods
List of methods that are to be assigned
"""
self._sequence_functions = functions[:]
class IOSequence(Sequence):
"""Class to represent a sequence of IO operations."""
def __init__(self, sequence_name='IO_Sequence', order_dict=None,
root_dir=None, ignore_dirs=None, num_cores=None,
terminal_log=False):
"""Initialize the IO sequence class.
Default execution sequence is:
1. Walk through the root directory
2. Group files by extension
3. Map readers based on extension
4. Read files
5. Encrypt the files
Parameters
----------
sequence_name: str
Name of the sequence.
order_dict: dict, optional
Dictionary of function names and their order in the sequence.
root_dir: str, optional
The root directory.
ignore_dirs: list of str, optional
List of directories to be ignored.
num_cores: int, optional
Number of cores to be used for parallel processing.
terminal_log: bool, optional
If True, messages will be logged to the terminal
"""
self.default_order_dict = {
'seq_walk_directories': 4,
'seq_group_files': 3,
'seq_map_readers': 2,
'seq_read_files': 1,
'seq_encrypt_files': 0,
}
super(IOSequence, self).__init__(sequence_name,
order_dict or self.default_order_dict,
terminal_log=terminal_log)
self.root_dir = root_dir or os.getcwd()
self.ignore_dirs = ignore_dirs or []
self.ignore_dirs.append('.checkpoint')
self.io = IO(self.root_dir, ignore_dirs=self.ignore_dirs)
self.num_cores = num_cores or cpu_count()
def seq_walk_directories(self):
"""Walk through all directories in the root directory.
Parameters
----------
root_directory: str
The root directory to be walked through.
"""
directory2files = {}
for root, file in self.io.walk_directory():
if root in directory2files:
directory2files[root].append(os.path.join(root, file))
else:
directory2files[root] = [os.path.join(root, file)]
return directory2files
def seq_group_files(self, directory2files):
"""Group files in the same directory.
Parameters
----------
directory2files: dict
Dictionary of directory names and their files.
"""
extensions_dict = {}
for files in directory2files.items():
for file in files[1]:
base_file = os.path.basename(file)
extension = base_file.split('.')[-1].lower()
if extension not in extensions_dict:
extensions_dict[extension] = [file]
else:
extensions_dict[extension].append(file)
return extensions_dict
def seq_map_readers(self, extensions_dict):
"""Map the extensions to their respective Readers.
Parameters
----------
extensions_dict: dict
Dictionary of extensions and their files.
Returns
-------
dict
Dictionary of extensions and their Readers.
"""
_readers = {}
unavailabe_extensions = []
for extension, _ in extensions_dict.items():
_readers[extension] = get_reader_by_extension(extension)
if not _readers[extension]:
all_readers = get_all_readers()
with InTemporaryDirectory() as temp_dir:
temp_file = os.path.join(temp_dir, f'temp.{extension}')
self.io.write(temp_file, 'w+', 'test content')
selected_reader = None
for reader in all_readers:
try:
_msg = f'Trying {reader.__name__} for extension {extension}'
self.logger.log(
_msg, colors=LogColors.BOLD, log_caller=True, log_type="INFO")
reader = reader()
reader.read(temp_file, validate=False)
selected_reader = reader
except Exception:
selected_reader = None
continue
if selected_reader:
_msg = f'{selected_reader.__class__.__name__} selected'
self.logger.log(
_msg, colors=LogColors.SUCCESS, timestamp=True, log_type="SUCCESS")
_readers[extension] = selected_reader
else:
unavailabe_extensions.append(extension)
del _readers[extension]
self.logger.log(
f'No reader found for extension {extension}, skipping',
colors=LogColors.ERROR, log_caller=True, log_type="ERROR")
for extension in unavailabe_extensions:
del extensions_dict[extension]
return [_readers, extensions_dict]
def seq_read_files(self, readers_extension):
"""Read the gathered files using their respective reader.
Parameters
----------
readers_extension: list
Readers dict and extensions dict packed in a list.
Returns
-------
dict
Dictionary of files and their content.
"""
readers_dict, extension_dict = readers_extension
contents = \
Parallel(self.num_cores)(delayed(readers_dict[ext].read)(files,
validate=False) for (ext, files) in
extension_dict.items())
return contents
def seq_encrypt_files(self, contents):
"""Encrypt the read files.
Parameters
----------
contents: dict
Dictionary of file paths and their content.
Returns
-------
dict
Dictionary of file paths and their encrypted content.
"""
# TODO: Parallelize this
path2content = {}
crypt_obj = Crypt(key='crypt.key', key_path=os.path.join(
self.root_dir, '.checkpoint'))
for content in contents:
for obj in content:
path = list(obj.keys())[0]
path2content[path] = crypt_obj.encrypt(path)
return path2content
class CheckpointSequence(Sequence):
"""Sequence to perform checkpoint operations."""
def __init__(self, sequence_name, order_dict, root_dir, ignore_dirs,
terminal_log=False):
"""Initialize the CheckpointSequence class.
Parameters
----------
sequence_name: str
Name of the sequence.
order_dict: dict
Dictionary of function names and their order in the sequence.
root_dir: str
The root directory.
ignore_dirs: list of str
List of directories to be ignored.
terminal_log: bool, optional
If True, messages will be logged to the terminal
"""
self.sequence_name = sequence_name
self.order_dict = order_dict
self.root_dir = root_dir
self.ignore_dirs = ignore_dirs
super(CheckpointSequence, self).__init__(sequence_name, order_dict,
terminal_log=terminal_log)
def _validate_checkpoint(self):
"""Validate if a checkpoint is valid."""
checkpoint_path = os.path.join(self.root_dir, '.checkpoint', self.sequence_name)
if not os.path.isdir(checkpoint_path):
raise ValueError(f'Checkpoint {self.sequence_name} does not exist')
def seq_init_checkpoint(self):
"""Initialize the checkpoint directory."""
_io = IO(path=self.root_dir, mode="a",
ignore_dirs=self.ignore_dirs)
path = _io.make_dir('.checkpoint')
generate_key('crypt.key', path)
checkpoint_config = {
'current_checkpoint': None,
'checkpoints': [],
'ignore_dirs': self.ignore_dirs,
'root_dir': self.root_dir,
}
config_path = os.path.join(self.root_dir, '.checkpoint', '.config')
_io.write(config_path, 'w+', json.dumps(checkpoint_config))
def seq_create_checkpoint(self):
"""Create a new checkpoint for the target directory."""
checkpoint_path = os.path.join(self.root_dir, '.checkpoint', self.sequence_name)
if os.path.isdir(checkpoint_path):
raise ValueError(f'Checkpoint {self.sequence_name} already exists')
_io = IO(path=self.root_dir, mode="a",
ignore_dirs=self.ignore_dirs)
_io_sequence = IOSequence(root_dir=self.root_dir,
ignore_dirs=self.ignore_dirs,
terminal_log=self.terminal_log)
enc_files = _io_sequence.execute_sequence(pass_args=True)[-1]
checkpoint_path = os.path.join(
self.root_dir, '.checkpoint', self.sequence_name)
checkpoint_path = _io.make_dir(checkpoint_path)
checkpoint_file_path = os.path.join(
checkpoint_path, f'{self.sequence_name}.json')
config_path = os.path.join(self.root_dir, '.checkpoint', '.config')
with open(checkpoint_file_path, 'w+') as checkpoint_file:
json.dump(enc_files, checkpoint_file, indent=4)
with open(config_path, 'r') as config_file:
checkpoint_config = json.load(config_file)
checkpoint_config['checkpoints'].append(self.sequence_name)
checkpoint_config['current_checkpoint'] = self.sequence_name
with open(config_path, 'w+') as config_file:
json.dump(checkpoint_config, config_file, indent=4)
root2file = {}
for root, file in _io.walk_directory():
if root in root2file:
root2file[root].append(os.path.join(root, file))
else:
root2file[root] = [os.path.join(root, file)]
with open(os.path.join(checkpoint_path, '.metadata'), 'w+') as metadata_file:
json.dump(root2file, metadata_file, indent=4)
def seq_delete_checkpoint(self):
"""Delete the checkpoint for the target directory."""
self._validate_checkpoint()
_io = IO(path=self.root_dir, mode="a",
ignore_dirs=self.ignore_dirs)
checkpoint_path = os.path.join(
self.root_dir, '.checkpoint', self.sequence_name)
config_path = os.path.join(self.root_dir, '.checkpoint', '.config')
with open(config_path, 'r') as config_file:
checkpoint_config = json.load(config_file)
checkpoint_config['checkpoints'].remove(self.sequence_name)
if len(checkpoint_config['checkpoints']):
_new_current_checkpoint = checkpoint_config['checkpoints'][-1]
else:
_new_current_checkpoint = None
checkpoint_config['current_checkpoint'] = _new_current_checkpoint
with open(config_path, 'w+') as config_file:
json.dump(checkpoint_config, config_file, indent=4)
_io.delete_dir(checkpoint_path)
def seq_restore_checkpoint(self):
"""Restore back to a specific checkpoint."""
self._validate_checkpoint()
_io = IO(path=self.root_dir, mode="a",
ignore_dirs=self.ignore_dirs)
_key = os.path.join(self.root_dir, '.checkpoint')
crypt = Crypt(key='crypt.key', key_path=_key)
checkpoint_path = os.path.join(self.root_dir, '.checkpoint',
self.sequence_name, f'{self.sequence_name}.json')
config_path = os.path.join(self.root_dir, '.checkpoint', '.config')
with open(checkpoint_path, 'r') as checkpoint_file:
checkpoint_dict = json.load(checkpoint_file)
with open(config_path, 'r') as config_file:
checkpoint_config = json.load(config_file)
checkpoint_config['current_checkpoint'] = self.sequence_name
with open(config_path, 'w+') as config_file:
json.dump(checkpoint_config, config_file, indent=4)
for file, content in checkpoint_dict.items():
content = crypt.decrypt(content)
_io.write(file, 'wb+', content)
def seq_version(self):
"""Print the version of the sequence."""
_msg = f'Running version {version}'
self.logger.log(_msg, timestamp=True, log_type="INFO")
class CLISequence(Sequence):
"""Sequence for the CLI environment."""
def __init__(self, sequence_name='CLI_Sequence', order_dict=None,
arg_parser=None, args=None, terminal_log=False):
"""Initialize the CLISequence class.
Default execution sequence is:
1. Parse the arguments.
2. Determine the action to perform from the arguments.
3. Perform the action.
Parameters
----------
sequence_name: str
Name of the sequence.
order_dict: dict
Dictionary of the order of the functions in the sequence.
arg_parser: ArgumentParser
Argument parser for the CLI.
"""
self.default_order_dict = {
'seq_parse_args': 2,
'seq_determine_action': 1,
'seq_perform_action': 0,
}
self.args = args
self.arg_parser = arg_parser
super(CLISequence, self).__init__(sequence_name=sequence_name,
order_dict=order_dict or self.default_order_dict,
terminal_log=terminal_log)
def seq_parse_args(self):
"""Parse the arguments from the CLI."""
if self.args is None:
args = self.arg_parser.parse_args()
else:
args = self.arg_parser.parse_args(self.args)
return args
def seq_determine_action(self, args):
"""Determine the action to be performed.
Parameters
----------
args: ArgumentParser
Parsed arguments from the CLI.
"""
if args.action == 'create':
action = 'seq_create_checkpoint'
elif args.action == 'restore':
action = 'seq_restore_checkpoint'
elif args.action == 'delete':
action = 'seq_delete_checkpoint'
elif args.action == 'init':
action = 'seq_init_checkpoint'
elif args.action == 'version':
action = 'seq_version'
else:
raise ValueError('Invalid action.')
return [action, args]
def seq_perform_action(self, action_args):
"""Perform the action.
Parameters
----------
action_args: list
List containing action and args NameSpace.
"""
action, args = action_args
_name = args.name
_path = args.path
_ignore_dirs = args.ignore_dirs or []
_helper_actions = ['seq_init_checkpoint', 'seq_version']
if not (_name and _path) and action not in _helper_actions:
raise ValueError(f'{args.action} requires a valid name and a path')
order_dict = {action: 0}
_checkpoint_sequence = CheckpointSequence(
_name, order_dict, _path, _ignore_dirs,
terminal_log=self.terminal_log)
action_function = getattr(_checkpoint_sequence, action)
action_function()
| 36.570064 | 99 | 0.587347 | import json
import os
from collections import OrderedDict
from itertools import count
from multiprocessing import cpu_count
from tempfile import TemporaryDirectory as InTemporaryDirectory
from types import MethodType
from joblib import Parallel, delayed
from checkpoint import __version__ as version
from checkpoint.crypt import Crypt, generate_key
from checkpoint.io import IO
from checkpoint.readers import get_all_readers
from checkpoint.utils import LogColors, Logger, get_reader_by_extension
class Sequence:
"""Class to represent a sequence of operations."""
def __init__(self, sequence_name, order_dict=None, logger=None, terminal_log=False):
"""Initialize the sequence class.
Parameters
----------
sequence_name: str
Name of the sequence.
order_dict: dict, optional
Dictionary of function names and their order in the sequence.
logger: `checkpoint.utils.Logger`, optional
Logger for the sequence class
log: bool, optional
If True, the sequence will be logged.
"""
self.terminal_log = terminal_log
self.log_mode = 't' if self.terminal_log else 'f'
self.logger = logger or Logger(log_mode=self.log_mode)
self.sequence_name = sequence_name
self.sequence_dict = OrderedDict()
self.order_dict = order_dict or {}
self._sequence_functions = self.sequence_dict.items()
self.sequence_functions = []
self.get_sequence_functions()
# User hook that is triggered when the sequence/sequence function has finished
self.on_sequence_end = lambda seq: None
self.on_sequence_function_end = lambda seq: None
def __repr__(self):
"""Return the string representation of the Sequence."""
_member_functions = [
_func.__name__ for _func in self.sequence_dict.values()]
return f'Name: {self.name}, Member Function: {_member_functions}'
def add_sequence_function(self, func, order=0):
"""Add a member function to the sequence.
Parameters
----------
func: method
Function that is to be added to the sequence.
order: int, optional
The order of the function in the sequence
"""
if not func.__name__.startswith('seq'):
raise ValueError('Function name must start with "seq"')
if order in self.sequence_dict:
_msg = f'Warning: overriting {self.sequence_dict[order].__name__} with {func.__name__}'
self.logger.log(
_msg, LogColors.WARNING, timestamp=True, log_caller=True, log_type="INFO")
self.sequence_dict[order] = func
def add_sub_sequence(self, sequence, order=0):
"""Add a sub sequence to the current sequence.
Parameter
---------
sequence: :class: `Sequence`
The sub sequence that is to be added
order: int, optional
The order of the sub sequence in the sequence
"""
if not isinstance(sequence, Sequence):
raise TypeError('Sub sequence must be of type Sequence')
_iterator = (count(start=order, step=1))
for func_obj in sequence.sequence_dict.items():
self.add_sequence_function(func_obj[1], order=next(_iterator))
def execute_sequence(self, execution_policy='decreasing_order', pass_args=False):
"""Execute all functions in the current sequence.
Parameters
----------
execution_policy: str
The policy to be followed while executing the functions.
Possible values are 'increasing_order' or 'decreasing_order'.
pass_args: bool
If True, the arguments of the executed function will be passed to the next function.
"""
self.update_order()
_return_values = []
if execution_policy == 'decreasing_order':
_sorted_sequence = sorted(self.sequence_dict.items(), reverse=True)
for func_obj in _sorted_sequence:
context_text = func_obj[1].__name__.split(
'seq_')[-1].replace('_', ' ').title()
try:
if pass_args:
if len(_return_values) > 0:
_return_value = func_obj[1](_return_values[-1])
else:
_return_value = func_obj[1]()
else:
_return_value = func_obj[1]()
except Exception as e:
_msg = f'{context_text}'
self.logger.log(
_msg, [LogColors.ERROR, LogColors.UNDERLINE],
timestamp=True, log_type="ERROR")
raise type(e)(f'{context_text} failed with error: {e}')
_msg = f'{context_text}'
self.logger.log(
_msg, [LogColors.SUCCESS, LogColors.UNDERLINE],
timestamp=True, log_type="SUCCESS")
self.on_sequence_function_end(self)
_return_values.append(_return_value)
self.on_sequence_end(self)
elif execution_policy == 'increasing_order':
for _, func in self.sequence_dict.items():
if pass_args:
_return_value = func(_return_values[-1])
else:
_return_value = func()
_return_values.append(_return_value)
self.on_sequence_end(self)
else:
raise ValueError(
f'{execution_policy} is an invalid execution policy')
return _return_values
def update_order(self):
"""Update the order of sequence functions in sequence dict."""
self.sequence_dict = OrderedDict(sorted(self.sequence_dict.items()))
def flush_sequence(self):
"""Flush the sequence."""
self.sequence_dict.clear()
def get_sequence_functions(self):
"""Get all the sequence functions."""
self.sequence_functions.clear()
for name in dir(self):
if name.startswith('seq') and isinstance(getattr(self, name), MethodType):
_func = getattr(self, name)
if name not in self.order_dict:
self.order_dict[name] = len(self.sequence_functions)
self.sequence_functions.append(_func)
self.generate_sequence()
def generate_sequence(self):
"""Generate a sequence from all memeber functions."""
for func in self.sequence_functions:
_name = func.__name__
_order = self.order_dict[_name]
self.add_sequence_function(func, _order)
@property
def name(self):
return self.sequence_name
@property
def sequence_functions(self):
return self._sequence_functions
@sequence_functions.setter
def sequence_functions(self, functions):
"""Set the value of sequence functions to a list.
Parameters
----------
functions: list of methods
List of methods that are to be assigned
"""
self._sequence_functions = functions[:]
class IOSequence(Sequence):
"""Class to represent a sequence of IO operations."""
def __init__(self, sequence_name='IO_Sequence', order_dict=None,
root_dir=None, ignore_dirs=None, num_cores=None,
terminal_log=False):
"""Initialize the IO sequence class.
Default execution sequence is:
1. Walk through the root directory
2. Group files by extension
3. Map readers based on extension
4. Read files
5. Encrypt the files
Parameters
----------
sequence_name: str
Name of the sequence.
order_dict: dict, optional
Dictionary of function names and their order in the sequence.
root_dir: str, optional
The root directory.
ignore_dirs: list of str, optional
List of directories to be ignored.
num_cores: int, optional
Number of cores to be used for parallel processing.
terminal_log: bool, optional
If True, messages will be logged to the terminal
"""
self.default_order_dict = {
'seq_walk_directories': 4,
'seq_group_files': 3,
'seq_map_readers': 2,
'seq_read_files': 1,
'seq_encrypt_files': 0,
}
super(IOSequence, self).__init__(sequence_name,
order_dict or self.default_order_dict,
terminal_log=terminal_log)
self.root_dir = root_dir or os.getcwd()
self.ignore_dirs = ignore_dirs or []
self.ignore_dirs.append('.checkpoint')
self.io = IO(self.root_dir, ignore_dirs=self.ignore_dirs)
self.num_cores = num_cores or cpu_count()
def seq_walk_directories(self):
"""Walk through all directories in the root directory.
Parameters
----------
root_directory: str
The root directory to be walked through.
"""
directory2files = {}
for root, file in self.io.walk_directory():
if root in directory2files:
directory2files[root].append(os.path.join(root, file))
else:
directory2files[root] = [os.path.join(root, file)]
return directory2files
def seq_group_files(self, directory2files):
"""Group files in the same directory.
Parameters
----------
directory2files: dict
Dictionary of directory names and their files.
"""
extensions_dict = {}
for files in directory2files.items():
for file in files[1]:
base_file = os.path.basename(file)
extension = base_file.split('.')[-1].lower()
if extension not in extensions_dict:
extensions_dict[extension] = [file]
else:
extensions_dict[extension].append(file)
return extensions_dict
def seq_map_readers(self, extensions_dict):
"""Map the extensions to their respective Readers.
Parameters
----------
extensions_dict: dict
Dictionary of extensions and their files.
Returns
-------
dict
Dictionary of extensions and their Readers.
"""
_readers = {}
unavailabe_extensions = []
for extension, _ in extensions_dict.items():
_readers[extension] = get_reader_by_extension(extension)
if not _readers[extension]:
all_readers = get_all_readers()
with InTemporaryDirectory() as temp_dir:
temp_file = os.path.join(temp_dir, f'temp.{extension}')
self.io.write(temp_file, 'w+', 'test content')
selected_reader = None
for reader in all_readers:
try:
_msg = f'Trying {reader.__name__} for extension {extension}'
self.logger.log(
_msg, colors=LogColors.BOLD, log_caller=True, log_type="INFO")
reader = reader()
reader.read(temp_file, validate=False)
selected_reader = reader
except Exception:
selected_reader = None
continue
if selected_reader:
_msg = f'{selected_reader.__class__.__name__} selected'
self.logger.log(
_msg, colors=LogColors.SUCCESS, timestamp=True, log_type="SUCCESS")
_readers[extension] = selected_reader
else:
unavailabe_extensions.append(extension)
del _readers[extension]
self.logger.log(
f'No reader found for extension {extension}, skipping',
colors=LogColors.ERROR, log_caller=True, log_type="ERROR")
for extension in unavailabe_extensions:
del extensions_dict[extension]
return [_readers, extensions_dict]
def seq_read_files(self, readers_extension):
"""Read the gathered files using their respective reader.
Parameters
----------
readers_extension: list
Readers dict and extensions dict packed in a list.
Returns
-------
dict
Dictionary of files and their content.
"""
readers_dict, extension_dict = readers_extension
contents = \
Parallel(self.num_cores)(delayed(readers_dict[ext].read)(files,
validate=False) for (ext, files) in
extension_dict.items())
return contents
def seq_encrypt_files(self, contents):
"""Encrypt the read files.
Parameters
----------
contents: dict
Dictionary of file paths and their content.
Returns
-------
dict
Dictionary of file paths and their encrypted content.
"""
# TODO: Parallelize this
path2content = {}
crypt_obj = Crypt(key='crypt.key', key_path=os.path.join(
self.root_dir, '.checkpoint'))
for content in contents:
for obj in content:
path = list(obj.keys())[0]
path2content[path] = crypt_obj.encrypt(path)
return path2content
class CheckpointSequence(Sequence):
"""Sequence to perform checkpoint operations."""
def __init__(self, sequence_name, order_dict, root_dir, ignore_dirs,
terminal_log=False):
"""Initialize the CheckpointSequence class.
Parameters
----------
sequence_name: str
Name of the sequence.
order_dict: dict
Dictionary of function names and their order in the sequence.
root_dir: str
The root directory.
ignore_dirs: list of str
List of directories to be ignored.
terminal_log: bool, optional
If True, messages will be logged to the terminal
"""
self.sequence_name = sequence_name
self.order_dict = order_dict
self.root_dir = root_dir
self.ignore_dirs = ignore_dirs
super(CheckpointSequence, self).__init__(sequence_name, order_dict,
terminal_log=terminal_log)
def _validate_checkpoint(self):
"""Validate if a checkpoint is valid."""
checkpoint_path = os.path.join(self.root_dir, '.checkpoint', self.sequence_name)
if not os.path.isdir(checkpoint_path):
raise ValueError(f'Checkpoint {self.sequence_name} does not exist')
def seq_init_checkpoint(self):
"""Initialize the checkpoint directory."""
_io = IO(path=self.root_dir, mode="a",
ignore_dirs=self.ignore_dirs)
path = _io.make_dir('.checkpoint')
generate_key('crypt.key', path)
checkpoint_config = {
'current_checkpoint': None,
'checkpoints': [],
'ignore_dirs': self.ignore_dirs,
'root_dir': self.root_dir,
}
config_path = os.path.join(self.root_dir, '.checkpoint', '.config')
_io.write(config_path, 'w+', json.dumps(checkpoint_config))
def seq_create_checkpoint(self):
"""Create a new checkpoint for the target directory."""
checkpoint_path = os.path.join(self.root_dir, '.checkpoint', self.sequence_name)
if os.path.isdir(checkpoint_path):
raise ValueError(f'Checkpoint {self.sequence_name} already exists')
_io = IO(path=self.root_dir, mode="a",
ignore_dirs=self.ignore_dirs)
_io_sequence = IOSequence(root_dir=self.root_dir,
ignore_dirs=self.ignore_dirs,
terminal_log=self.terminal_log)
enc_files = _io_sequence.execute_sequence(pass_args=True)[-1]
checkpoint_path = os.path.join(
self.root_dir, '.checkpoint', self.sequence_name)
checkpoint_path = _io.make_dir(checkpoint_path)
checkpoint_file_path = os.path.join(
checkpoint_path, f'{self.sequence_name}.json')
config_path = os.path.join(self.root_dir, '.checkpoint', '.config')
with open(checkpoint_file_path, 'w+') as checkpoint_file:
json.dump(enc_files, checkpoint_file, indent=4)
with open(config_path, 'r') as config_file:
checkpoint_config = json.load(config_file)
checkpoint_config['checkpoints'].append(self.sequence_name)
checkpoint_config['current_checkpoint'] = self.sequence_name
with open(config_path, 'w+') as config_file:
json.dump(checkpoint_config, config_file, indent=4)
root2file = {}
for root, file in _io.walk_directory():
if root in root2file:
root2file[root].append(os.path.join(root, file))
else:
root2file[root] = [os.path.join(root, file)]
with open(os.path.join(checkpoint_path, '.metadata'), 'w+') as metadata_file:
json.dump(root2file, metadata_file, indent=4)
def seq_delete_checkpoint(self):
"""Delete the checkpoint for the target directory."""
self._validate_checkpoint()
_io = IO(path=self.root_dir, mode="a",
ignore_dirs=self.ignore_dirs)
checkpoint_path = os.path.join(
self.root_dir, '.checkpoint', self.sequence_name)
config_path = os.path.join(self.root_dir, '.checkpoint', '.config')
with open(config_path, 'r') as config_file:
checkpoint_config = json.load(config_file)
checkpoint_config['checkpoints'].remove(self.sequence_name)
if len(checkpoint_config['checkpoints']):
_new_current_checkpoint = checkpoint_config['checkpoints'][-1]
else:
_new_current_checkpoint = None
checkpoint_config['current_checkpoint'] = _new_current_checkpoint
with open(config_path, 'w+') as config_file:
json.dump(checkpoint_config, config_file, indent=4)
_io.delete_dir(checkpoint_path)
def seq_restore_checkpoint(self):
"""Restore back to a specific checkpoint."""
self._validate_checkpoint()
_io = IO(path=self.root_dir, mode="a",
ignore_dirs=self.ignore_dirs)
_key = os.path.join(self.root_dir, '.checkpoint')
crypt = Crypt(key='crypt.key', key_path=_key)
checkpoint_path = os.path.join(self.root_dir, '.checkpoint',
self.sequence_name, f'{self.sequence_name}.json')
config_path = os.path.join(self.root_dir, '.checkpoint', '.config')
with open(checkpoint_path, 'r') as checkpoint_file:
checkpoint_dict = json.load(checkpoint_file)
with open(config_path, 'r') as config_file:
checkpoint_config = json.load(config_file)
checkpoint_config['current_checkpoint'] = self.sequence_name
with open(config_path, 'w+') as config_file:
json.dump(checkpoint_config, config_file, indent=4)
for file, content in checkpoint_dict.items():
content = crypt.decrypt(content)
_io.write(file, 'wb+', content)
def seq_version(self):
"""Print the version of the sequence."""
_msg = f'Running version {version}'
self.logger.log(_msg, timestamp=True, log_type="INFO")
class CLISequence(Sequence):
"""Sequence for the CLI environment."""
def __init__(self, sequence_name='CLI_Sequence', order_dict=None,
arg_parser=None, args=None, terminal_log=False):
"""Initialize the CLISequence class.
Default execution sequence is:
1. Parse the arguments.
2. Determine the action to perform from the arguments.
3. Perform the action.
Parameters
----------
sequence_name: str
Name of the sequence.
order_dict: dict
Dictionary of the order of the functions in the sequence.
arg_parser: ArgumentParser
Argument parser for the CLI.
"""
self.default_order_dict = {
'seq_parse_args': 2,
'seq_determine_action': 1,
'seq_perform_action': 0,
}
self.args = args
self.arg_parser = arg_parser
super(CLISequence, self).__init__(sequence_name=sequence_name,
order_dict=order_dict or self.default_order_dict,
terminal_log=terminal_log)
def seq_parse_args(self):
"""Parse the arguments from the CLI."""
if self.args is None:
args = self.arg_parser.parse_args()
else:
args = self.arg_parser.parse_args(self.args)
return args
def seq_determine_action(self, args):
"""Determine the action to be performed.
Parameters
----------
args: ArgumentParser
Parsed arguments from the CLI.
"""
if args.action == 'create':
action = 'seq_create_checkpoint'
elif args.action == 'restore':
action = 'seq_restore_checkpoint'
elif args.action == 'delete':
action = 'seq_delete_checkpoint'
elif args.action == 'init':
action = 'seq_init_checkpoint'
elif args.action == 'version':
action = 'seq_version'
else:
raise ValueError('Invalid action.')
return [action, args]
def seq_perform_action(self, action_args):
"""Perform the action.
Parameters
----------
action_args: list
List containing action and args NameSpace.
"""
action, args = action_args
_name = args.name
_path = args.path
_ignore_dirs = args.ignore_dirs or []
_helper_actions = ['seq_init_checkpoint', 'seq_version']
if not (_name and _path) and action not in _helper_actions:
raise ValueError(f'{args.action} requires a valid name and a path')
order_dict = {action: 0}
_checkpoint_sequence = CheckpointSequence(
_name, order_dict, _path, _ignore_dirs,
terminal_log=self.terminal_log)
action_function = getattr(_checkpoint_sequence, action)
action_function()
| 76 | 0 | 52 |
f070a3fbb33a80cabd688d5c6bc9b192e7c3cc5b | 1,168 | py | Python | Gathered CTF writeups/ctf-7867/2020/cyber_security_rumble/hashfun/generate.py | mihaid-b/CyberSakura | f60e6b6bfd6898c69b84424b080090ae98f8076c | [
"MIT"
] | 1 | 2022-03-27T06:00:41.000Z | 2022-03-27T06:00:41.000Z | Gathered CTF writeups/ctf-7867/2020/cyber_security_rumble/hashfun/generate.py | mihaid-b/CyberSakura | f60e6b6bfd6898c69b84424b080090ae98f8076c | [
"MIT"
] | null | null | null | Gathered CTF writeups/ctf-7867/2020/cyber_security_rumble/hashfun/generate.py | mihaid-b/CyberSakura | f60e6b6bfd6898c69b84424b080090ae98f8076c | [
"MIT"
] | 1 | 2022-03-27T06:01:42.000Z | 2022-03-27T06:01:42.000Z | # from secret import FLAG
FLAG = "CSR{fuckingIdiotShitStupid}"
c = hashfun(FLAG)
print(c)
print(len(FLAG))
print(len(c))
m = revhash(c)
print(m)
c = [10, 30, 31, 62, 27, 9, 4, 0, 1, 1, 4, 4, 7, 13, 8, 12, 21, 28, 12, 6, 60]
m = revhash(c)
print(m)
| 20.491228 | 78 | 0.52226 | # from secret import FLAG
FLAG = "CSR{fuckingIdiotShitStupid}"
def hashfun(msg):
digest = []
for i in range(len(msg) - 4):
digest.append(ord(msg[i]) ^ ord(msg[i + 4]))
return digest
def revhash(cipher):
msg_len = len(c) + 4
msg = ["."] * (msg_len + 1)
msg[0:5] = "CSR{"
ascii_start = ord(' ')
ascii_end = ord('~')
i = 4
while i < msg_len:
prev_msg_char = msg[i - 4]
prev_msg_char_i = ord(prev_msg_char)
prev_cipher_char_i = cipher[i - 4]
found = False
for guess_char_i in range(ascii_start, ascii_end + 1):
if guess_char_i ^ prev_msg_char_i == prev_cipher_char_i:
cur_char = chr(guess_char_i)
print(cur_char)
msg[i] = cur_char
i += 1
found = True
break
if not found:
print("Couldn't find char")
break
print(msg)
return "".join(msg)
c = hashfun(FLAG)
print(c)
print(len(FLAG))
print(len(c))
m = revhash(c)
print(m)
c = [10, 30, 31, 62, 27, 9, 4, 0, 1, 1, 4, 4, 7, 13, 8, 12, 21, 28, 12, 6, 60]
m = revhash(c)
print(m)
| 867 | 0 | 46 |
453f691706e832e723117b6a568b51a1c72e861c | 164 | py | Python | 2019/01/part1.py | mihaip/adventofcode | 3725668595bfcf619fe6c97d12e2f14b42e3f0cb | [
"Apache-2.0"
] | null | null | null | 2019/01/part1.py | mihaip/adventofcode | 3725668595bfcf619fe6c97d12e2f14b42e3f0cb | [
"Apache-2.0"
] | null | null | null | 2019/01/part1.py | mihaip/adventofcode | 3725668595bfcf619fe6c97d12e2f14b42e3f0cb | [
"Apache-2.0"
] | null | null | null | #!/usr/local/bin/python3
total_fuel = 0
with open("input.txt") as f:
for line in f.readlines():
total_fuel += int(int(line) / 3) - 2
print(total_fuel)
| 20.5 | 44 | 0.634146 | #!/usr/local/bin/python3
total_fuel = 0
with open("input.txt") as f:
for line in f.readlines():
total_fuel += int(int(line) / 3) - 2
print(total_fuel)
| 0 | 0 | 0 |
915393967b0cdd20d663859048d87ba6b77ee1b8 | 3,134 | py | Python | models/BugReport.py | SibylLab/Task-based-Bug-Report-Summerizer | 2acd5d9a3d3ed6b009ab322dd5c73e39786fdfc5 | [
"MIT"
] | null | null | null | models/BugReport.py | SibylLab/Task-based-Bug-Report-Summerizer | 2acd5d9a3d3ed6b009ab322dd5c73e39786fdfc5 | [
"MIT"
] | null | null | null | models/BugReport.py | SibylLab/Task-based-Bug-Report-Summerizer | 2acd5d9a3d3ed6b009ab322dd5c73e39786fdfc5 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
__author__ = "Akalanka Galappaththi"
__email__ = "a.galappaththi@uleth.ca"
__copyright__ = "Copyright 2020, The Bug Report Summarization Project @ Sybil-Lab"
__license__ = "MIT"
__maintainer__ = "Akalanka Galappaththi"
from models.Turn import Turn
class BugReport:
"""Class represents a comment of a bug report.
Parameters
----------
title : str
Title of the bug report (One sentence summary)
bug_id : int
bug_id is a unique identifier
product : str
Software product name
list_of_turns :list of int
Contains a list of comment numbers
"""
def add_topics(self, topics):
"""Add topic list to the bug report
Parameters
----------
topics : list
List of topic words
"""
self.topics.extend(topics)
def add_a_turn(self, turn):
"""Add a comment to the list
Parameters
----------
turn : object
Turn object
"""
self.list_of_turns.append(turn)
def number_of_turns(self):
"""Return the number of turns in the bug report
Returns
-------
len : int
Length of the list
"""
return len(self.list_of_turns)
def get_turns(self):
"""Returns a list of turns
Returns
-------
list_of_turns : list
List of turns
"""
return self.list_of_turns
def get_a_turn(self, turn_id):
"""Return a turn with a matching ID
Parameters
----------
turn_id : int
Turn ID
Returns
-------
t : object
Turn object
"""
for t in self.list_of_turns:
if t.get_id() == turn_id:
return t
def get_title(self):
"""Get title
Returns
-------
title : str
Title of the bug report
"""
return self.title
def set_title(self, title):
"""Set title
Parameters
----------
title : str
Title of the bug report
"""
self.title = title
def set_bug_id(self, bug_id):
"""Set bug ID
Parameters
----------
bug_id : int
Bug ID
"""
self.bug_id = bug_id
def get_bug_id(self):
"""Get bug ID
Returns
-------
bug_id : int
Bug ID
"""
return self.bug_id
def get_product(self):
"""Get product name
Returns
-------
product : str
Product name
"""
return self.product
def set_product(self, product):
"""Set product name
Parameters
----------
product : str
Product name
"""
self.product = product
| 21.175676 | 82 | 0.49298 | #!/usr/bin/env python
__author__ = "Akalanka Galappaththi"
__email__ = "a.galappaththi@uleth.ca"
__copyright__ = "Copyright 2020, The Bug Report Summarization Project @ Sybil-Lab"
__license__ = "MIT"
__maintainer__ = "Akalanka Galappaththi"
from models.Turn import Turn
class BugReport:
"""Class represents a comment of a bug report.
Parameters
----------
title : str
Title of the bug report (One sentence summary)
bug_id : int
bug_id is a unique identifier
product : str
Software product name
list_of_turns :list of int
Contains a list of comment numbers
"""
def __init__(self, title, id, product):
self.title = title
self.bug_id = id
self.product = product
self.list_of_turns = list()
self.topics = list()
def add_topics(self, topics):
"""Add topic list to the bug report
Parameters
----------
topics : list
List of topic words
"""
self.topics.extend(topics)
def add_a_turn(self, turn):
"""Add a comment to the list
Parameters
----------
turn : object
Turn object
"""
self.list_of_turns.append(turn)
def number_of_turns(self):
"""Return the number of turns in the bug report
Returns
-------
len : int
Length of the list
"""
return len(self.list_of_turns)
def get_turns(self):
"""Returns a list of turns
Returns
-------
list_of_turns : list
List of turns
"""
return self.list_of_turns
def get_a_turn(self, turn_id):
"""Return a turn with a matching ID
Parameters
----------
turn_id : int
Turn ID
Returns
-------
t : object
Turn object
"""
for t in self.list_of_turns:
if t.get_id() == turn_id:
return t
def get_title(self):
"""Get title
Returns
-------
title : str
Title of the bug report
"""
return self.title
def set_title(self, title):
"""Set title
Parameters
----------
title : str
Title of the bug report
"""
self.title = title
def set_bug_id(self, bug_id):
"""Set bug ID
Parameters
----------
bug_id : int
Bug ID
"""
self.bug_id = bug_id
def get_bug_id(self):
"""Get bug ID
Returns
-------
bug_id : int
Bug ID
"""
return self.bug_id
def get_product(self):
"""Get product name
Returns
-------
product : str
Product name
"""
return self.product
def set_product(self, product):
"""Set product name
Parameters
----------
product : str
Product name
"""
self.product = product
| 166 | 0 | 27 |
5c4e6bb88bbc889fc1681cf32a3207b80306f5fe | 1,540 | py | Python | app_tools/tool_manager.py | planktontoolbox/plankton-toolbox | 626930120329983fb9419a9aed94712148bac219 | [
"MIT"
] | 5 | 2016-12-02T08:24:35.000Z | 2021-02-24T08:41:41.000Z | app_tools/tool_manager.py | planktontoolbox/plankton-toolbox | 626930120329983fb9419a9aed94712148bac219 | [
"MIT"
] | 53 | 2016-11-14T13:11:41.000Z | 2022-01-13T09:28:11.000Z | app_tools/tool_manager.py | planktontoolbox/plankton-toolbox | 626930120329983fb9419a9aed94712148bac219 | [
"MIT"
] | 1 | 2020-11-27T01:20:10.000Z | 2020-11-27T01:20:10.000Z | #!/usr/bin/python3
# -*- coding:utf-8 -*-
# Project: http://plankton-toolbox.org
# Copyright (c) 2010-2018 SMHI, Swedish Meteorological and Hydrological Institute
# License: MIT License (see LICENSE.txt or http://opensource.org/licenses/mit).
import app_tools
import toolbox_utils
@toolbox_utils.singleton
class ToolManager(object):
"""
The tool manager is used to set up available tools.
"""
def __init__(self):
""" """
self._parent = None
self._toollist = [] # List of tools derived from ToolsBase.
def set_parent(self, parentwidget):
""" """
self._parent = parentwidget
def init_tools(self):
""" Tool activator. """
self._toollist.append(app_tools.DatasetViewerTool('Dataset viewer', self._parent))
self._toollist.append(app_tools.GraphPlotterTool('Graph plotter', self._parent))
self._toollist.append(app_tools.LogTool('Toolbox logging', self._parent))
def get_tool_by_name(self, object_name):
""" Returns the tool. """
for tool in self._toollist:
if tool.objectName() == object_name:
return tool
return None
def show_tool_by_name(self, object_name):
""" Makes a tool visible. """
for tool in self._toollist:
if tool.objectName() == object_name:
tool.show_tool()
return
def get_tool_list(self):
""" """
return self._toollist
| 32.083333 | 91 | 0.601299 | #!/usr/bin/python3
# -*- coding:utf-8 -*-
# Project: http://plankton-toolbox.org
# Copyright (c) 2010-2018 SMHI, Swedish Meteorological and Hydrological Institute
# License: MIT License (see LICENSE.txt or http://opensource.org/licenses/mit).
import app_tools
import toolbox_utils
@toolbox_utils.singleton
class ToolManager(object):
"""
The tool manager is used to set up available tools.
"""
def __init__(self):
""" """
self._parent = None
self._toollist = [] # List of tools derived from ToolsBase.
def set_parent(self, parentwidget):
""" """
self._parent = parentwidget
def init_tools(self):
""" Tool activator. """
self._toollist.append(app_tools.DatasetViewerTool('Dataset viewer', self._parent))
self._toollist.append(app_tools.GraphPlotterTool('Graph plotter', self._parent))
self._toollist.append(app_tools.LogTool('Toolbox logging', self._parent))
def get_tool_by_name(self, object_name):
""" Returns the tool. """
for tool in self._toollist:
if tool.objectName() == object_name:
return tool
return None
def show_tool_by_name(self, object_name):
""" Makes a tool visible. """
for tool in self._toollist:
if tool.objectName() == object_name:
tool.show_tool()
return
def get_tool_list(self):
""" """
return self._toollist
| 0 | 0 | 0 |
127f168e8ac3a233174a2e259c95e146ff00c3c9 | 4,724 | py | Python | sdk/containerregistry/azure-containerregistry/azure/containerregistry/_generated/_container_registry.py | romahamu/azure-sdk-for-python | a57c9f73b9121f79d317e1679b81fd460d6a25b8 | [
"MIT"
] | 1 | 2021-04-05T17:38:42.000Z | 2021-04-05T17:38:42.000Z | sdk/containerregistry/azure-containerregistry/azure/containerregistry/_generated/_container_registry.py | romahamu/azure-sdk-for-python | a57c9f73b9121f79d317e1679b81fd460d6a25b8 | [
"MIT"
] | null | null | null | sdk/containerregistry/azure-containerregistry/azure/containerregistry/_generated/_container_registry.py | romahamu/azure-sdk-for-python | a57c9f73b9121f79d317e1679b81fd460d6a25b8 | [
"MIT"
] | 1 | 2021-12-18T20:01:22.000Z | 2021-12-18T20:01:22.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
from azure.core import PipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from ._configuration import ContainerRegistryConfiguration
from .operations import ContainerRegistryOperations
from .operations import ContainerRegistryRepositoryOperations
from .operations import ContainerRegistryBlobOperations
from .operations import RefreshTokensOperations
from .operations import AccessTokensOperations
from . import models
class ContainerRegistry(object):
"""Metadata API definition for the Azure Container Registry runtime.
:ivar container_registry: ContainerRegistryOperations operations
:vartype container_registry: azure.containerregistry.operations.ContainerRegistryOperations
:ivar container_registry_repository: ContainerRegistryRepositoryOperations operations
:vartype container_registry_repository: azure.containerregistry.operations.ContainerRegistryRepositoryOperations
:ivar container_registry_blob: ContainerRegistryBlobOperations operations
:vartype container_registry_blob: azure.containerregistry.operations.ContainerRegistryBlobOperations
:ivar refresh_tokens: RefreshTokensOperations operations
:vartype refresh_tokens: azure.containerregistry.operations.RefreshTokensOperations
:ivar access_tokens: AccessTokensOperations operations
:vartype access_tokens: azure.containerregistry.operations.AccessTokensOperations
:param url: Registry login URL.
:type url: str
"""
def _send_request(self, http_request, **kwargs):
# type: (HttpRequest, Any) -> HttpResponse
"""Runs the network request through the client's chained policies.
:param http_request: The network request you want to make. Required.
:type http_request: ~azure.core.pipeline.transport.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to True.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.pipeline.transport.HttpResponse
"""
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
}
http_request.url = self._client.format_url(http_request.url, **path_format_arguments)
stream = kwargs.pop("stream", True)
pipeline_response = self._client._pipeline.run(http_request, stream=stream, **kwargs)
return pipeline_response.http_response
| 46.313725 | 116 | 0.71613 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
from azure.core import PipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from ._configuration import ContainerRegistryConfiguration
from .operations import ContainerRegistryOperations
from .operations import ContainerRegistryRepositoryOperations
from .operations import ContainerRegistryBlobOperations
from .operations import RefreshTokensOperations
from .operations import AccessTokensOperations
from . import models
class ContainerRegistry(object):
"""Metadata API definition for the Azure Container Registry runtime.
:ivar container_registry: ContainerRegistryOperations operations
:vartype container_registry: azure.containerregistry.operations.ContainerRegistryOperations
:ivar container_registry_repository: ContainerRegistryRepositoryOperations operations
:vartype container_registry_repository: azure.containerregistry.operations.ContainerRegistryRepositoryOperations
:ivar container_registry_blob: ContainerRegistryBlobOperations operations
:vartype container_registry_blob: azure.containerregistry.operations.ContainerRegistryBlobOperations
:ivar refresh_tokens: RefreshTokensOperations operations
:vartype refresh_tokens: azure.containerregistry.operations.RefreshTokensOperations
:ivar access_tokens: AccessTokensOperations operations
:vartype access_tokens: azure.containerregistry.operations.AccessTokensOperations
:param url: Registry login URL.
:type url: str
"""
def __init__(
self,
url, # type: str
**kwargs # type: Any
):
# type: (...) -> None
base_url = '{url}'
self._config = ContainerRegistryConfiguration(url, **kwargs)
self._client = PipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._serialize.client_side_validation = False
self._deserialize = Deserializer(client_models)
self.container_registry = ContainerRegistryOperations(
self._client, self._config, self._serialize, self._deserialize)
self.container_registry_repository = ContainerRegistryRepositoryOperations(
self._client, self._config, self._serialize, self._deserialize)
self.container_registry_blob = ContainerRegistryBlobOperations(
self._client, self._config, self._serialize, self._deserialize)
self.refresh_tokens = RefreshTokensOperations(
self._client, self._config, self._serialize, self._deserialize)
self.access_tokens = AccessTokensOperations(
self._client, self._config, self._serialize, self._deserialize)
def _send_request(self, http_request, **kwargs):
# type: (HttpRequest, Any) -> HttpResponse
"""Runs the network request through the client's chained policies.
:param http_request: The network request you want to make. Required.
:type http_request: ~azure.core.pipeline.transport.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to True.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.pipeline.transport.HttpResponse
"""
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
}
http_request.url = self._client.format_url(http_request.url, **path_format_arguments)
stream = kwargs.pop("stream", True)
pipeline_response = self._client._pipeline.run(http_request, stream=stream, **kwargs)
return pipeline_response.http_response
def close(self):
# type: () -> None
self._client.close()
def __enter__(self):
# type: () -> ContainerRegistry
self._client.__enter__()
return self
def __exit__(self, *exc_details):
# type: (Any) -> None
self._client.__exit__(*exc_details)
| 1,474 | 0 | 108 |
d201337848c0e5c871726fae3c443daff8fb932e | 1,062 | py | Python | motor/motor.py | donaldong/RPIdrone | 5e33597b1a8cfa6d4417dae14cd5a6bf2b9b8ec3 | [
"MIT"
] | 6 | 2018-11-11T09:11:06.000Z | 2021-02-07T19:46:57.000Z | motor/motor.py | donaldong/RPIdrone | 5e33597b1a8cfa6d4417dae14cd5a6bf2b9b8ec3 | [
"MIT"
] | null | null | null | motor/motor.py | donaldong/RPIdrone | 5e33597b1a8cfa6d4417dae14cd5a6bf2b9b8ec3 | [
"MIT"
] | 4 | 2019-04-04T13:23:10.000Z | 2020-04-09T05:57:37.000Z | from RPIO import PWM
| 27.947368 | 86 | 0.619586 | from RPIO import PWM
class Motor:
V = 11.1
KV = 920
__dma_channel = 0
__subcycle_time_us = 20000
__pulse_incr_us = 10
def __init__(self, gpio, name):
self.__gpio = gpio
self.name = name
self.__servo = PWM.Servo(
self.__dma_channel,
self.__subcycle_time_us,
self.__pulse_incr_us)
self.__width = 0
self.set_pulse_width(0)
def __str__(self):
_str = {}
_str["name"] = self.name
_str["pulse_width"] = self.__width
_str["rpm"] = float(self.__width) / self.__subcycle_time_us * self.KV * self.V
return str(_str)
def set_pulse_width(self, us):
self.__width = min(self.__subcycle_time_us - self.__pulse_incr_us, us)
self.__width -= self.__width % self.__pulse_incr_us
self.__servo.set_servo(self.__gpio, self.__width)
def set_percentage(self, percent):
self.set_pulse_width(percent * self.__subcycle_time_us)
def stop(self):
self.__servo.stop_servo(self.__gpio)
| 783 | 234 | 23 |
5b4245a2b2b3e0e6cfc23ca588cf782c0dadae2d | 2,254 | py | Python | day14.py | davidfpc/AoC2021 | b526e606dbf1cc59de4951a321aa9b98d04fde4c | [
"MIT"
] | null | null | null | day14.py | davidfpc/AoC2021 | b526e606dbf1cc59de4951a321aa9b98d04fde4c | [
"MIT"
] | null | null | null | day14.py | davidfpc/AoC2021 | b526e606dbf1cc59de4951a321aa9b98d04fde4c | [
"MIT"
] | null | null | null | # program to compute the time
# of execution of any python code
import time
from collections import Counter
if __name__ == "__main__":
puzzle_input = read_input("day14.txt")
start = time.time()
print(f"Part 1: {part1(puzzle_input)}")
print(f"Part 2: {part2(puzzle_input)}")
end = time.time()
print(f"Took {round(end - start, 5)} to process the puzzle")
| 30.459459 | 68 | 0.61535 | # program to compute the time
# of execution of any python code
import time
from collections import Counter
def read_input(file_name: str) -> (str, {str: str}):
with open("inputFiles/" + file_name, "r") as file:
lines = file.read().splitlines()
rule = {}
for i in lines[2:]:
pair, element = i.split(" -> ")
rule[pair] = element
return lines[0], rule
def part1(input_value: (str, {str: str})) -> int:
template, rules = input_value
for i in range(10):
tmp_template = ""
for pos in range(len(template) - 1):
tmp_template += template[pos]
tmp_template += rules[template[pos] + template[pos + 1]]
tmp_template += template[len(template) - 1]
template = tmp_template
frequency = [value for value in Counter(template).values()]
min_freq = min(frequency)
max_freq = max(frequency)
return max_freq - min_freq
def add_to_dict(dictionary, key, value):
if key in dictionary.keys():
dictionary[key] = dictionary[key] + value
else:
dictionary[key] = value
def part2(input_value: (str, {str: str})) -> int:
template, rules = input_value
pair_count: {str: int} = {}
# prepare dict
for pos in range(len(template) - 1):
pair = template[pos] + template[pos + 1]
add_to_dict(pair_count, pair, 1)
for i in range(40):
tmp_pair_count = {}
for pair, count in pair_count.items():
new_letter = rules[pair]
add_to_dict(tmp_pair_count, pair[0] + new_letter, count)
add_to_dict(tmp_pair_count, new_letter + pair[1], count)
pair_count = tmp_pair_count
letter_count: {str: int} = {}
for pair, count in pair_count.items():
add_to_dict(letter_count, pair[0], count)
add_to_dict(letter_count, template[-1], 1)
min_freq = min(letter_count.values())
max_freq = max(letter_count.values())
return max_freq - min_freq
if __name__ == "__main__":
puzzle_input = read_input("day14.txt")
start = time.time()
print(f"Part 1: {part1(puzzle_input)}")
print(f"Part 2: {part2(puzzle_input)}")
end = time.time()
print(f"Took {round(end - start, 5)} to process the puzzle")
| 1,779 | 0 | 92 |
0768781c51dc064f20678bf1f84eed7a31493726 | 3,259 | py | Python | models.py | aldragan0/voice-recognition | 452bf9b9d1e2ad2a8e1c68ef960a82d6171708f0 | [
"MIT"
] | 12 | 2019-07-29T08:18:59.000Z | 2021-09-17T09:34:25.000Z | models.py | aldragan0/voice-recognition | 452bf9b9d1e2ad2a8e1c68ef960a82d6171708f0 | [
"MIT"
] | null | null | null | models.py | aldragan0/voice-recognition | 452bf9b9d1e2ad2a8e1c68ef960a82d6171708f0 | [
"MIT"
] | 4 | 2019-08-02T17:51:52.000Z | 2022-03-12T17:59:59.000Z | import numpy as np
from sklearn.model_selection import train_test_split
from file_io import features_from_file, labels_from_file, add_history
from utils import labels_to_categorical, transpose_vector, get_count, get_mean_stddev, normalize_data
BATCH_SIZE = 128
| 35.423913 | 106 | 0.684873 | import numpy as np
from sklearn.model_selection import train_test_split
from file_io import features_from_file, labels_from_file, add_history
from utils import labels_to_categorical, transpose_vector, get_count, get_mean_stddev, normalize_data
BATCH_SIZE = 128
def train_deepnn(model_file, inputs, outputs, model, num_epochs):
x_train, x_valid, y_train, y_valid = train_test_split(inputs, outputs, test_size=0.2, random_state=36)
means, std_dev = get_mean_stddev(x_train)
filepath = '/'.join(model_file.split("/")[:-1])
filename = model_file.split("_")[2] + "_" + str(x_train.shape[2])
np.save(filepath + "/means_" + filename + ".npy", means)
np.save(filepath + "/stddev_" + filename + ".npy", std_dev)
x_train = normalize_data(x_train, means, std_dev)
x_valid = normalize_data(x_valid, means, std_dev)
y_train = labels_to_categorical(y_train)
y_valid = labels_to_categorical(y_valid)
for epoch in range(num_epochs):
history_train = model.fit(x_train, y_train, batch_size=BATCH_SIZE, epochs=1, verbose=0)
history_valid = model.evaluate(x_valid, y_valid, verbose=0, batch_size=BATCH_SIZE)
key_list = list(history_train.history.keys())
score_train = history_train.history["loss"][0]
acc_train = history_train.history["acc"][0]
print()
print("Epoch {}/{}".format(epoch + 1, num_epochs))
print(" - loss: {:.4f} - acc: {:.4f}".format(score_train, acc_train))
print()
print("logloss score: %.4f" % history_valid[0])
print("Validation set Accuracy: %.4f" % history_valid[1])
add_history(model_file, history_train.history, history_valid, key_list)
return model
def train_model(dataset, model_type, train_fn, model_file, **kwargs):
num_epochs = kwargs['num_epochs']
num_features = kwargs['num_features']
file_prefix = kwargs['file_prefix']
print("Loading dataset...")
input_train_file = dataset + "/" + file_prefix + "_in"
output_train_file = dataset + "/" + file_prefix + "_out"
inputs = features_from_file(input_train_file, num_features)
inputs = transpose_vector(inputs)
outputs = labels_from_file(output_train_file)
label_count = get_count(outputs)
print(label_count)
print("Finished loading dataset")
print(inputs.shape)
model = model_type(len(label_count))
print("Training model..")
model = train_fn(model_file, inputs, outputs, model, num_epochs=num_epochs)
print("Done training model...")
model.save(model_file + ".model")
def train_multi_epoch(dataset, filepath, model, train_fn, num_epoch_start, num_epoch_end=0,
delta_epochs=10, **kwargs):
num_epoch_end = max(num_epoch_start, num_epoch_end) + delta_epochs
print("Training on: " + dataset)
print("Output file: " + filepath)
model_name = filepath.split('/')[1]
if model_name[-1] != '_':
model_name += '_'
if filepath[-1] != '/':
filepath += '/'
for epochs in range(num_epoch_start, num_epoch_end, delta_epochs):
model_file = filepath + model_name + str(epochs)
print("Model file: ", model_file)
train_model(dataset, model, train_fn, model_file, **kwargs, num_epochs=epochs)
| 2,923 | 0 | 69 |
84efcf3645933b64b01ee37feb768493c8936a16 | 6,927 | py | Python | CalibrationHelpers.py | omarubilla/3D | 87820b3db4ee9789fd22b89cd4a0d61868eed02f | [
"BSD-3-Clause"
] | null | null | null | CalibrationHelpers.py | omarubilla/3D | 87820b3db4ee9789fd22b89cd4a0d61868eed02f | [
"BSD-3-Clause"
] | null | null | null | CalibrationHelpers.py | omarubilla/3D | 87820b3db4ee9789fd22b89cd4a0d61868eed02f | [
"BSD-3-Clause"
] | null | null | null | import cv2
import numpy as np
import glob
# This function records images from the connected camera to specified directory
# when the "Space" key is pressed.
# directory: should be a string corresponding to the name of an existing
# directory
print("Hello!")
folder = 'calibration_data' ####### THE FOLDER YOU CREATED GOES HERE!
print(folder)
# This function calls OpenCV's camera calibration on the directory of images
# created above.
# Returns the following values
# intrinsics: the current camera intrinsic calibration matrix
# distortion: the current distortion coefficients
# roi: the region of the image with full data
# new_intrinsics: the intrinsic calibration matrix of an image after
# undistortion and roi cropping
# This function will save the calibration data to a file in the specified
# directory
# This function will load the calibration data from a file in the specified
# directory
| 47.445205 | 80 | 0.656561 | import cv2
import numpy as np
import glob
# This function records images from the connected camera to specified directory
# when the "Space" key is pressed.
# directory: should be a string corresponding to the name of an existing
# directory
print("Hello!")
folder = 'calibration_data' ####### THE FOLDER YOU CREATED GOES HERE!
print(folder)
def CaptureImages(folder):
print("inside capture images")
# Open the camera for capture
# the 0 value should default to the webcam, but you may need to change this
# for your camera, especially if you are using a camera besides the default
cam = cv2.VideoCapture(0)
img_counter = 0
# Read until user quits
while True:
ret, frame = cam.read()
if not ret:
break
# display the current image
cv2.imshow("Display", frame)
# wait for 1ms or key press
k = cv2.waitKey(1) #k is the key pressed
if k == 27 or k==113: #27, 113 are ascii for escape and q respectively
#exit
break
elif k == 32: #32 is ascii for space
#record image
print("space key pressed")
img_name = "calib_image_{}.png".format(img_counter)
cv2.imwrite(folder+'/'+img_name, frame)
print("Writing: {}".format(folder+'/'+img_name))
img_counter += 1
cam.release()
# This function calls OpenCV's camera calibration on the directory of images
# created above.
# Returns the following values
# intrinsics: the current camera intrinsic calibration matrix
# distortion: the current distortion coefficients
# roi: the region of the image with full data
# new_intrinsics: the intrinsic calibration matrix of an image after
# undistortion and roi cropping
def CalibrateCamera(directory,visualize=False):
# termination criteria
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
# here we only care about computing the intrinsic parameters of the camera
# and not the true positions of the checkerboard, so we can do everything
# up to a scale factor, this means we can prepare our object points as
# (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0) representing the coordinates
# of the corners in the checkerboard's local coordinate frame
# if we cared about exact position we would need to scale these according
# to the true size of the checkerboard
objp = np.zeros((9*6,3), np.float32)
objp[:,:2] = np.mgrid[0:9,0:6].T.reshape(-1,2)
# Set up arrays to store object points and image points from all the images
# Here the image points are the 2d positions of the corners in each image
# the object points are the true 3d positions of the corners in the
# checkerboards coordinate frame
objpoints = [] # 3d point in the checkerboard's coordinate frame
imgpoints = [] # 2d points in image plane.
# Grab all images in the directory
images = glob.glob(directory+'/*.png')
for fname in images:
# read the image
img = cv2.imread(fname)
# convert to grayscale (this simplifies corner detection)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Find the chess board corners
ret, corners = cv2.findChessboardCorners(gray, (9,6), None)
# If found, add object points, image points (after refining them)
# This ensures that only images where all corners could be detected are
# used for calibration
if ret == True:
# the object points in the checkerboard frame are always the same
# so we just duplicate them for each image
objpoints.append(objp)
# refine corner locations, initial corner detection is performed by
# sliding a convultional filter across the image, so accuracy is
# at best 1 pixel, but from the image gradient we can compute the
# location of the corner at sub-pixel accuracy
corners2 = \
cv2.cornerSubPix(gray,corners, (11,11), (-1,-1), criteria)
# append the refined pixel location to our image points array
imgpoints.append(corners2)
# if visualization is enabled, draw and display the corners
if visualize==True:
cv2.drawChessboardCorners(img, (9,6), corners2, ret)
cv2.imshow('Display', img)
cv2.waitKey(500)
# Perform camera calibration
# Here I have fixed K3, the highest order distortion coefficient
# This simplifies camera calibration and makes it easier to get a good
# result, however this is only works under the assumption that your camera
# does not have too much distortion, if your camera is very wide field of
# view, you should remove this flag
ret, intrinsics, distortion, rvecs, tvecs = \
cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, \
None,flags=cv2.CALIB_FIX_K3,criteria=criteria)
# print error if calibration unsuccessful
if not ret:
print("Calibration failed, recollect images and try again")
# if successful, compute an print reprojection error, this is a good metric
# for how good the calibration is. If your result is greater than 1px you
# should probably recalibrate
total_error = 0
for i in range(len(objpoints)):
# project the object points onto each camera image and compare
# against the detected image positions
imgpoints2, _ = cv2.projectPoints(objpoints[i], rvecs[i], tvecs[i], \
intrinsics, distortion)
error = cv2.norm(imgpoints[i], imgpoints2, cv2.NORM_L2)/len(imgpoints2)
total_error += error
print( "mean error: {}".format(total_error/len(objpoints)) )
# compute the region for where we have full information and the resulting
# intrinsic calibration matrix
h, w = img.shape[:2]
new_intrinsics, roi = cv2.getOptimalNewCameraMatrix(intrinsics, \
distortion, (w,h), 1,\
(w,h))
# return only the information we will need going forward
return intrinsics, distortion, roi, new_intrinsics
# This function will save the calibration data to a file in the specified
# directory
def SaveCalibrationData(directory, intrinsics, distortion, new_intrinsics, \
roi):
np.savez(directory+'/calib', intrinsics=intrinsics, distortion=distortion,\
new_intrinsics = new_intrinsics, roi=roi)
print("clib data saved")
# This function will load the calibration data from a file in the specified
# directory
def LoadCalibrationData(directory):
npzfile = np.load(directory+'/calib.npz')
return npzfile['intrinsics'], npzfile['distortion'], \
npzfile['new_intrinsics'], npzfile['roi']
| 5,912 | 0 | 89 |
981e79f6d2a6d52ef160903b42183ea6a22283d5 | 260 | py | Python | puzzle1/part_one.py | Tomer23/advent-of-code-2021 | 6781616807c9e5910cd4fe512aa9a3a9ec6738e2 | [
"Apache-2.0"
] | null | null | null | puzzle1/part_one.py | Tomer23/advent-of-code-2021 | 6781616807c9e5910cd4fe512aa9a3a9ec6738e2 | [
"Apache-2.0"
] | null | null | null | puzzle1/part_one.py | Tomer23/advent-of-code-2021 | 6781616807c9e5910cd4fe512aa9a3a9ec6738e2 | [
"Apache-2.0"
] | null | null | null | FILENAME = './puzzle1/data/input'
s = 0
previous_value = None
with open(FILENAME) as file:
for line in file:
if previous_value:
if int(line) > previous_value:
s += 1
previous_value = int(line)
print(s) | 23.636364 | 43 | 0.565385 | FILENAME = './puzzle1/data/input'
s = 0
previous_value = None
with open(FILENAME) as file:
for line in file:
if previous_value:
if int(line) > previous_value:
s += 1
previous_value = int(line)
print(s) | 0 | 0 | 0 |
adbf72a8eeb268e24a003c60a1b99687307e3bca | 754 | py | Python | Leetcode/0251-0300/0260-single-number-iii.py | MiKueen/Data-Structures-and-Algorithms | 8788bde5349f326aac0267531f39ac7a2a708ee6 | [
"MIT"
] | null | null | null | Leetcode/0251-0300/0260-single-number-iii.py | MiKueen/Data-Structures-and-Algorithms | 8788bde5349f326aac0267531f39ac7a2a708ee6 | [
"MIT"
] | null | null | null | Leetcode/0251-0300/0260-single-number-iii.py | MiKueen/Data-Structures-and-Algorithms | 8788bde5349f326aac0267531f39ac7a2a708ee6 | [
"MIT"
] | 1 | 2019-10-06T15:46:14.000Z | 2019-10-06T15:46:14.000Z | '''
Author : MiKueen
Level : Medium
Problem Statement : Single Number III
Given an array of numbers nums, in which exactly two elements appear only once and all the other elements appear exactly twice. Find the two elements that appear only once.
Example:
Input: [1,2,1,3,2,5]
Output: [3,5]
Note:
The order of the result is not important. So in the above example, [5, 3] is also correct.
Your algorithm should run in linear runtime complexity. Could you implement it using only constant space complexity?
'''
| 29 | 172 | 0.655172 | '''
Author : MiKueen
Level : Medium
Problem Statement : Single Number III
Given an array of numbers nums, in which exactly two elements appear only once and all the other elements appear exactly twice. Find the two elements that appear only once.
Example:
Input: [1,2,1,3,2,5]
Output: [3,5]
Note:
The order of the result is not important. So in the above example, [5, 3] is also correct.
Your algorithm should run in linear runtime complexity. Could you implement it using only constant space complexity?
'''
class Solution:
def singleNumber(self, nums: List[int]) -> List[int]:
res = set()
for i in nums:
if i in res:
res.remove(i)
else:
res.add(i)
return res
| 194 | -6 | 49 |
cad8825299209b87e93310e4923bb67289f4e2f4 | 142 | py | Python | HelloWord/demo1.py | smockgithub/MyPythonDemo | 583d4c4da4f68e4e19ed8a926c445d46fd743d25 | [
"MIT"
] | null | null | null | HelloWord/demo1.py | smockgithub/MyPythonDemo | 583d4c4da4f68e4e19ed8a926c445d46fd743d25 | [
"MIT"
] | null | null | null | HelloWord/demo1.py | smockgithub/MyPythonDemo | 583d4c4da4f68e4e19ed8a926c445d46fd743d25 | [
"MIT"
] | null | null | null | print('hello word','sub2')
name = input()
print('hello,', name)
if name=='test':
print('hello1,', name)
else:
print('hello2,', name) | 15.777778 | 26 | 0.598592 | print('hello word','sub2')
name = input()
print('hello,', name)
if name=='test':
print('hello1,', name)
else:
print('hello2,', name) | 0 | 0 | 0 |
c080bd9163a638975d4b96614dd410fd7ace058d | 87 | py | Python | blinds/apps.py | BoraDowon/BackendBlackberry | e177ac6a986e57534734c009edc12faa595415cc | [
"MIT"
] | 2 | 2018-07-14T15:15:15.000Z | 2018-07-28T03:37:55.000Z | blinds/apps.py | BoraDowon/BackendBlackberry | e177ac6a986e57534734c009edc12faa595415cc | [
"MIT"
] | null | null | null | blinds/apps.py | BoraDowon/BackendBlackberry | e177ac6a986e57534734c009edc12faa595415cc | [
"MIT"
] | null | null | null | from django.apps import AppConfig
| 14.5 | 33 | 0.747126 | from django.apps import AppConfig
class BlindsConfig(AppConfig):
name = 'blinds'
| 0 | 29 | 23 |
cc09c9fd8563ee68ca002bb4d2e90c0b50d291c7 | 53 | py | Python | Modulo_1/semana3/Ciclos/package-unpackage/package-unpackage.py | rubens233/cocid_python | 492ebdf21817e693e5eb330ee006397272f2e0cc | [
"MIT"
] | null | null | null | Modulo_1/semana3/Ciclos/package-unpackage/package-unpackage.py | rubens233/cocid_python | 492ebdf21817e693e5eb330ee006397272f2e0cc | [
"MIT"
] | null | null | null | Modulo_1/semana3/Ciclos/package-unpackage/package-unpackage.py | rubens233/cocid_python | 492ebdf21817e693e5eb330ee006397272f2e0cc | [
"MIT"
] | 1 | 2022-03-04T00:57:18.000Z | 2022-03-04T00:57:18.000Z | t = (1, 2, 3)
a, b, c = t
print(a)
print(b)
print(c) | 8.833333 | 13 | 0.490566 | t = (1, 2, 3)
a, b, c = t
print(a)
print(b)
print(c) | 0 | 0 | 0 |
28365e241198cda38a59954fc4967d8d7545ac81 | 1,803 | py | Python | datautil.py | ThorntheLetter/turbulent-guacamole | 51b2541aa8dbaed1a22493289de67fbea8eaf709 | [
"MIT"
] | null | null | null | datautil.py | ThorntheLetter/turbulent-guacamole | 51b2541aa8dbaed1a22493289de67fbea8eaf709 | [
"MIT"
] | null | null | null | datautil.py | ThorntheLetter/turbulent-guacamole | 51b2541aa8dbaed1a22493289de67fbea8eaf709 | [
"MIT"
] | null | null | null | import numpy as np
import wave
#takes input frame and width of sample in bytes and transforms it into a number between -1 and 1
#reverses squash()
vunsquash = np.vectorize(unsquash)
vsquash = np.vectorize(squash)
#Gets next frame in the file
#Arranges the file into numpy matrix for input using every possible sequence.
#Arranges the file into numpy matrix for input sequentially.
| 33.388889 | 96 | 0.715474 | import numpy as np
import wave
#takes input frame and width of sample in bytes and transforms it into a number between -1 and 1
def squash(input, width = 2):
return(input/(2 ** ((8 * width)-1)))
#reverses squash()
def unsquash(input, width = 2):
return(input * (2 ** ((8 * width) - 1)))
vunsquash = np.vectorize(unsquash)
vsquash = np.vectorize(squash)
#Gets next frame in the file
def get_next_frames(file, nframes):
return np.fromstring(file.readframes(nframes), dtype = 'int16')
#Arranges the file into numpy matrix for input using every possible sequence.
def arrange_samples_full(file, sample_length):
file = wave.open(filename, 'rb')
file.rewind()
position = file.tell()
number_of_frames = file.getnframes()
number_of_samples = number_of_frames - sample_length - 1
x = np.zeros((number_of_samples, sample_length, 1), dtype = 'float32')
y = np.zeros((number_of_samples, 1), dtype = 'float32')
for i in range(number_of_samples):
print(i, "/", number_of_samples, end = '\r')
file.setpos(position)
x[i,:,0] = get_next_frames(file, sample_length)
y[i,:] = get_next_frames(file, 1)
position = file.tell()
file.readframes(1)
print()
return (vsquash(x), vsquash(y))
#Arranges the file into numpy matrix for input sequentially.
def arrange_samples_sequential(filename, sample_length):
file = wave.open(filename, 'rb')
file.rewind()
number_of_frames = file.getnframes()
number_of_samples = number_of_frames // (sample_length + 1)
x = np.zeros((number_of_samples, sample_length, 1), dtype = 'float32')
y = np.zeros((number_of_samples, 1), dtype = 'float32')
for i in range(number_of_samples):
print(i, "/", number_of_samples, end ='\r')
x[i,:,0] = get_next_frames(file, sample_length)
y[i,:] = get_next_frames(file, 1)
print()
return (vsquash(x), vsquash(y))
| 1,304 | 0 | 110 |
6de75ede73cb4358ec40e29b11707cd5219d4a6a | 2,970 | py | Python | polr/cli.py | kylieCat/polr-py | 0cf268fad8fa979b4d76fec1a984aa4c52f8c5ca | [
"MIT"
] | null | null | null | polr/cli.py | kylieCat/polr-py | 0cf268fad8fa979b4d76fec1a984aa4c52f8c5ca | [
"MIT"
] | 48 | 2020-10-18T03:35:58.000Z | 2022-03-31T19:48:03.000Z | polr/cli.py | kylieCat/polr-py | 0cf268fad8fa979b4d76fec1a984aa4c52f8c5ca | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Console script for polr."""
import sys
import click
from .polr import Polr
from . import utils
from . import settings
_client = None
@click.group()
def polr(args=None):
"""
Console script for polr.
"""
return 0
SHORTEN_HELP_STR = "Return an error if a link with the desired customending already exists"
@polr.command(name="shorten")
@click.argument("url")
@click.option("-e", "--ending", "ending", help="A custom ending for the shortened link.")
@click.option("-f", "--fail", "raise_on_exists", is_flag=True, help=SHORTEN_HELP_STR)
def shorten(url, ending="", raise_on_exists=False):
"""
Shorten a link with the option to give it a custom ending. Checks to see if a link with
the given ending exists. Can be configured to fail if it already exists with [-f|--fail].
Usage:
jinc go shorten URL [(-e|--ending=)ending] [(-f|--fail)]
Examples:
\b
# Use default ending
$ polr shorten https://example.com
http://go/ad14gfwe
\b
# Use custom ending, if ending already exists don't return error, return link with that ending.
$ polr shorten https://example.com -e my-custom-ending
http://go/my-custom-ending
\b
# Use custom ending, return error if it already exists.
polr shorten https://example.com -e my-custom-ending -f
"""
client = get_client()
try:
shortened = client.shorten(url, ending=ending, raise_on_exists=raise_on_exists)
click.echo(shortened)
except client.ShortenerException as err:
utils.print_error_and_exit(f"{err}")
@polr.command(name="shorten-bulk")
@click.argument("links")
@polr.command(name="exists", help="Check to see if a link with the given ending already exists.")
@click.argument("ending")
@polr.command(name="lookup")
@click.argument("ending")
@polr.command(name="data")
@click.argument("ending")
if __name__ == "__main__":
sys.exit(polr()) # pragma: no cover
| 25.826087 | 103 | 0.662963 | # -*- coding: utf-8 -*-
"""Console script for polr."""
import sys
import click
from .polr import Polr
from . import utils
from . import settings
_client = None
def get_client():
global _client
if _client is None:
_client = Polr(settings.POLR_URL, settings.POLR_API_KEY)
return _client
@click.group()
def polr(args=None):
"""
Console script for polr.
"""
return 0
SHORTEN_HELP_STR = "Return an error if a link with the desired customending already exists"
@polr.command(name="shorten")
@click.argument("url")
@click.option("-e", "--ending", "ending", help="A custom ending for the shortened link.")
@click.option("-f", "--fail", "raise_on_exists", is_flag=True, help=SHORTEN_HELP_STR)
def shorten(url, ending="", raise_on_exists=False):
"""
Shorten a link with the option to give it a custom ending. Checks to see if a link with
the given ending exists. Can be configured to fail if it already exists with [-f|--fail].
Usage:
jinc go shorten URL [(-e|--ending=)ending] [(-f|--fail)]
Examples:
\b
# Use default ending
$ polr shorten https://example.com
http://go/ad14gfwe
\b
# Use custom ending, if ending already exists don't return error, return link with that ending.
$ polr shorten https://example.com -e my-custom-ending
http://go/my-custom-ending
\b
# Use custom ending, return error if it already exists.
polr shorten https://example.com -e my-custom-ending -f
"""
client = get_client()
try:
shortened = client.shorten(url, ending=ending, raise_on_exists=raise_on_exists)
click.echo(shortened)
except client.ShortenerException as err:
utils.print_error_and_exit(f"{err}")
@polr.command(name="shorten-bulk")
@click.argument("links")
def shorten_bulk(links):
client = get_client()
try:
shortened = client.shorten_bulk(links)
click.echo(shortened)
except client.DuplicateEndingException as err:
utils.print_error_and_exit(f"{err}")
@polr.command(name="exists", help="Check to see if a link with the given ending already exists.")
@click.argument("ending")
def exists(ending):
client = get_client()
exists = client.exists(ending)
utils.print_and_exit(exists, code=int(exists))
@polr.command(name="lookup")
@click.argument("ending")
def lookup(ending):
client = get_client()
try:
data = client.lookup(ending)
utils.print_and_exit(data, code=0)
except client.ShortenerException as err:
utils.print_error_and_exit(f"{err}")
@polr.command(name="data")
@click.argument("ending")
def data(ending):
client = get_client()
try:
data = client.data(ending)
utils.print_and_exit(data, code=0)
except client.ShortenerException as err:
utils.print_error_and_exit(f"{err}")
if __name__ == "__main__":
sys.exit(polr()) # pragma: no cover
| 846 | 0 | 111 |
be9f23e001c451d16d1dccd3c47505e909f38e70 | 839 | py | Python | src/pynwb/tests/test_associated_files.py | NovelaNeuro/ndx-franklab-novela | d5b87f742cf5aeb2c88c6f3df699614ec3fb2328 | [
"BSD-3-Clause"
] | null | null | null | src/pynwb/tests/test_associated_files.py | NovelaNeuro/ndx-franklab-novela | d5b87f742cf5aeb2c88c6f3df699614ec3fb2328 | [
"BSD-3-Clause"
] | 4 | 2021-03-18T22:34:00.000Z | 2021-12-09T00:33:53.000Z | src/pynwb/tests/test_associated_files.py | NovelaNeuro/ndx-franklab-novela | d5b87f742cf5aeb2c88c6f3df699614ec3fb2328 | [
"BSD-3-Clause"
] | 2 | 2020-12-07T22:35:42.000Z | 2021-02-17T18:55:07.000Z | from unittest import TestCase
from src.pynwb.ndx_franklab_novela.associated_files import AssociatedFiles
| 38.136364 | 95 | 0.703218 | from unittest import TestCase
from src.pynwb.ndx_franklab_novela.associated_files import AssociatedFiles
class TestApparatus(TestCase):
def setUp(self):
self.associated_files = AssociatedFiles(
name='file1',
description='description of file1',
content='1 2 3 content of file test',
task_epochs='1, 2'
)
def test_successfull_associated_files_creation_true(self):
self.assertIsInstance(self.associated_files, AssociatedFiles)
self.assertEqual('file1', self.associated_files.name)
self.assertEqual('description of file1', self.associated_files.fields['description'])
self.assertEqual('1 2 3 content of file test', self.associated_files.fields['content'])
self.assertEqual('1, 2', self.associated_files.fields['task_epochs'])
| 646 | 9 | 77 |
a23c43bba7834ddfeda3fcc6a85fe6ae7593ab6f | 2,840 | py | Python | core/dbt/clients/registry.py | f1fe/dbt | e943b9fc842535e958ef4fd0b8703adc91556bc6 | [
"Apache-2.0"
] | 3,156 | 2017-03-05T09:59:23.000Z | 2021-06-30T01:27:52.000Z | core/dbt/clients/registry.py | f1fe/dbt | e943b9fc842535e958ef4fd0b8703adc91556bc6 | [
"Apache-2.0"
] | 2,608 | 2017-02-27T15:39:40.000Z | 2021-06-30T01:49:20.000Z | core/dbt/clients/registry.py | f1fe/dbt | e943b9fc842535e958ef4fd0b8703adc91556bc6 | [
"Apache-2.0"
] | 693 | 2017-03-13T03:04:49.000Z | 2021-06-25T15:57:41.000Z | import functools
import requests
from dbt.events.functions import fire_event
from dbt.events.types import (
RegistryProgressMakingGETRequest,
RegistryProgressGETResponse
)
from dbt.utils import memoized, _connection_exception_retry as connection_exception_retry
from dbt import deprecations
import os
if os.getenv('DBT_PACKAGE_HUB_URL'):
DEFAULT_REGISTRY_BASE_URL = os.getenv('DBT_PACKAGE_HUB_URL')
else:
DEFAULT_REGISTRY_BASE_URL = 'https://hub.getdbt.com/'
index_cached = memoized(index)
| 32.643678 | 91 | 0.737676 | import functools
import requests
from dbt.events.functions import fire_event
from dbt.events.types import (
RegistryProgressMakingGETRequest,
RegistryProgressGETResponse
)
from dbt.utils import memoized, _connection_exception_retry as connection_exception_retry
from dbt import deprecations
import os
if os.getenv('DBT_PACKAGE_HUB_URL'):
DEFAULT_REGISTRY_BASE_URL = os.getenv('DBT_PACKAGE_HUB_URL')
else:
DEFAULT_REGISTRY_BASE_URL = 'https://hub.getdbt.com/'
def _get_url(url, registry_base_url=None):
if registry_base_url is None:
registry_base_url = DEFAULT_REGISTRY_BASE_URL
return '{}{}'.format(registry_base_url, url)
def _get_with_retries(path, registry_base_url=None):
get_fn = functools.partial(_get, path, registry_base_url)
return connection_exception_retry(get_fn, 5)
def _get(path, registry_base_url=None):
url = _get_url(path, registry_base_url)
fire_event(RegistryProgressMakingGETRequest(url=url))
resp = requests.get(url, timeout=30)
fire_event(RegistryProgressGETResponse(url=url, resp_code=resp.status_code))
resp.raise_for_status()
if resp is None:
raise requests.exceptions.ContentDecodingError(
'Request error: The response is None', response=resp
)
return resp.json()
def index(registry_base_url=None):
return _get_with_retries('api/v1/index.json', registry_base_url)
index_cached = memoized(index)
def packages(registry_base_url=None):
return _get_with_retries('api/v1/packages.json', registry_base_url)
def package(name, registry_base_url=None):
response = _get_with_retries('api/v1/{}.json'.format(name), registry_base_url)
# Either redirectnamespace or redirectname in the JSON response indicate a redirect
# redirectnamespace redirects based on package ownership
# redirectname redirects based on package name
# Both can be present at the same time, or neither. Fails gracefully to old name
if ('redirectnamespace' in response) or ('redirectname' in response):
if ('redirectnamespace' in response) and response['redirectnamespace'] is not None:
use_namespace = response['redirectnamespace']
else:
use_namespace = response['namespace']
if ('redirectname' in response) and response['redirectname'] is not None:
use_name = response['redirectname']
else:
use_name = response['name']
new_nwo = use_namespace + "/" + use_name
deprecations.warn('package-redirect', old_name=name, new_name=new_nwo)
return response
def package_version(name, version, registry_base_url=None):
return _get_with_retries('api/v1/{}/{}.json'.format(name, version), registry_base_url)
def get_available_versions(name):
response = package(name)
return list(response['versions'])
| 2,139 | 0 | 184 |
6ad6d5451532f3f997a99686b38bd43e2437f651 | 1,273 | py | Python | colorblend.py | shaunco/MoodMirror | 6627ba9fb1df5d5db693cb9838a87f4c1c13a1d2 | [
"Apache-2.0"
] | null | null | null | colorblend.py | shaunco/MoodMirror | 6627ba9fb1df5d5db693cb9838a87f4c1c13a1d2 | [
"Apache-2.0"
] | null | null | null | colorblend.py | shaunco/MoodMirror | 6627ba9fb1df5d5db693cb9838a87f4c1c13a1d2 | [
"Apache-2.0"
] | null | null | null | #from https://stackoverflow.com/questions/726549/algorithm-for-additive-color-mixing-for-rgb-values/726578
rgb_scale = 255
cmyk_scale = 100
def cmyk_to_rgb(c, m, y, k):
"""
"""
r = rgb_scale*(1.0 - (c + k) / float(cmyk_scale))
g = rgb_scale*(1.0 - (m + k) / float(cmyk_scale))
b = rgb_scale*(1.0 - (y + k) / float(cmyk_scale))
return int(r), int(g), int(b)
def ink_add_for_rgb(list_of_colors):
"""input: list of rgb, opacity (r,g,b,o) colors to be added, o acts as weights.
output (r,g,b)
"""
C = 0
M = 0
Y = 0
K = 0
for (r, g, b, o) in list_of_colors:
c, m, y, k = rgb_to_cmyk(r, g, b)
C += o * c
M += o * m
Y += o * y
K += o * k
return cmyk_to_rgb(C, M, Y, K)
| 24.480769 | 106 | 0.527101 | #from https://stackoverflow.com/questions/726549/algorithm-for-additive-color-mixing-for-rgb-values/726578
rgb_scale = 255
cmyk_scale = 100
def rgb_to_cmyk(r, g, b):
if (r == 0) and (g == 0) and (b == 0):
# black
return 0, 0, 0, cmyk_scale
# rgb [0,255] -> cmy [0,1]
c = 1 - r / float(rgb_scale)
m = 1 - g / float(rgb_scale)
y = 1 - b / float(rgb_scale)
# extract out k [0,1]
min_cmy = min(c, m, y)
c = (c - min_cmy)
m = (m - min_cmy)
y = (y - min_cmy)
k = min_cmy
# rescale to the range [0,cmyk_scale]
return c * cmyk_scale, m * cmyk_scale, y * cmyk_scale, k * cmyk_scale
def cmyk_to_rgb(c, m, y, k):
"""
"""
r = rgb_scale*(1.0 - (c + k) / float(cmyk_scale))
g = rgb_scale*(1.0 - (m + k) / float(cmyk_scale))
b = rgb_scale*(1.0 - (y + k) / float(cmyk_scale))
return int(r), int(g), int(b)
def ink_add_for_rgb(list_of_colors):
"""input: list of rgb, opacity (r,g,b,o) colors to be added, o acts as weights.
output (r,g,b)
"""
C = 0
M = 0
Y = 0
K = 0
for (r, g, b, o) in list_of_colors:
c, m, y, k = rgb_to_cmyk(r, g, b)
C += o * c
M += o * m
Y += o * y
K += o * k
return cmyk_to_rgb(C, M, Y, K)
| 482 | 0 | 23 |
b23ee3eb9a4f49b0aac4c2ffab6ce5c60165893e | 12,658 | py | Python | openpifpaf/plugins/apollocar3d/apollo_to_coco.py | anhvth/openpifpaf | a88edd744b50f76dfdae9dbb180d4a403cbfd060 | [
"CC-BY-2.0",
"CC-BY-4.0"
] | null | null | null | openpifpaf/plugins/apollocar3d/apollo_to_coco.py | anhvth/openpifpaf | a88edd744b50f76dfdae9dbb180d4a403cbfd060 | [
"CC-BY-2.0",
"CC-BY-4.0"
] | null | null | null | openpifpaf/plugins/apollocar3d/apollo_to_coco.py | anhvth/openpifpaf | a88edd744b50f76dfdae9dbb180d4a403cbfd060 | [
"CC-BY-2.0",
"CC-BY-4.0"
] | null | null | null | """
Convert txt files of ApolloCar3D into json file with COCO format
"""
import glob
import os
import time
from shutil import copyfile
import json
import argparse
import numpy as np
from PIL import Image
# Packages for data processing, crowd annotations and histograms
try:
import matplotlib.pyplot as plt # pylint: disable=import-error
except ModuleNotFoundError as err:
if err.name != 'matplotlib':
raise err
plt = None
try:
import cv2 # pylint: disable=import-error
except ModuleNotFoundError as err:
if err.name != 'cv2':
raise err
cv2 = None # pylint: disable=invalid-name
from .constants import CAR_KEYPOINTS_24, CAR_SKELETON_24,\
CAR_KEYPOINTS_66, CAR_SKELETON_66, KPS_MAPPING
from .transforms import skeleton_mapping
if __name__ == "__main__":
main()
| 38.012012 | 99 | 0.55143 | """
Convert txt files of ApolloCar3D into json file with COCO format
"""
import glob
import os
import time
from shutil import copyfile
import json
import argparse
import numpy as np
from PIL import Image
# Packages for data processing, crowd annotations and histograms
try:
import matplotlib.pyplot as plt # pylint: disable=import-error
except ModuleNotFoundError as err:
if err.name != 'matplotlib':
raise err
plt = None
try:
import cv2 # pylint: disable=import-error
except ModuleNotFoundError as err:
if err.name != 'cv2':
raise err
cv2 = None # pylint: disable=invalid-name
from .constants import CAR_KEYPOINTS_24, CAR_SKELETON_24,\
CAR_KEYPOINTS_66, CAR_SKELETON_66, KPS_MAPPING
from .transforms import skeleton_mapping
def cli():
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--dir_data', default='data-apollocar3d/train',
help='dataset directory')
parser.add_argument('--dir_out', default='data-apollocar3d',
help='where to save annotations and files')
parser.add_argument('--sample', action='store_true',
help='Whether to only process the first 50 images')
parser.add_argument('--single_sample', action='store_true',
help='Whether to only process the first image')
parser.add_argument('--split_images', action='store_true',
help='Whether to copy images into train val split folder')
parser.add_argument('--histogram', action='store_true',
help='Whether to show keypoints histogram')
args = parser.parse_args()
return args
class ApolloToCoco:
# Prepare json format
map_sk = skeleton_mapping(KPS_MAPPING)
sample = False
single_sample = False
split_images = False
histogram = False
def __init__(self, dir_dataset, dir_out):
"""
:param dir_dataset: Original dataset directory
:param dir_out: Processed dataset directory
"""
assert os.path.isdir(dir_dataset), 'dataset directory not found'
self.dir_dataset = dir_dataset
self.dir_mask = os.path.join(dir_dataset, 'ignore_mask')
assert os.path.isdir(self.dir_mask), 'crowd annotations not found: ' + self.dir_mask
self.dir_out_im = os.path.join(dir_out, 'images')
self.dir_out_ann = os.path.join(dir_out, 'annotations')
os.makedirs(self.dir_out_im, exist_ok=True)
os.makedirs(self.dir_out_ann, exist_ok=True)
self.json_file_24 = {}
self.json_file_66 = {}
# Load train val split
path_train = os.path.join(self.dir_dataset, 'split', 'train-list.txt')
path_val = os.path.join(self.dir_dataset, 'split', 'validation-list.txt', )
self.splits = {}
for name, path in zip(('train', 'val'), (path_train, path_val)):
with open(path, "r") as ff:
lines = ff.readlines()
self.splits[name] = [os.path.join(self.dir_dataset, 'images', line.strip())
for line in lines]
assert self.splits[name], "specified path is empty"
def process(self):
"""Parse and process the txt dataset into a single json file compatible with coco format"""
for phase, im_paths in self.splits.items(): # Train and Val
cnt_images = 0
cnt_instances = 0
cnt_kps = [0] * 66
self.initiate_json() # Initiate json file at each phase
# save only 50 images
if self.sample:
im_paths = im_paths[:50]
if self.split_images:
path_dir = (os.path.join(self.dir_out_im, phase))
os.makedirs(path_dir, exist_ok=True)
assert not os.listdir(path_dir), "Directory to save images is not empty. " \
"Remove flag --split_images ?"
elif self.single_sample:
im_paths = self.splits['train'][:1]
print(f'Single sample for train/val:{im_paths}')
for im_path in im_paths:
im_size, im_name, im_id = self._process_image(im_path)
cnt_images += 1
# Process its annotations
txt_paths = glob.glob(os.path.join(self.dir_dataset, 'keypoints', im_name,
im_name + '*.txt'))
for txt_path in txt_paths:
data = np.loadtxt(txt_path, delimiter='\t', ndmin=2)
cnt_kps = self._process_annotation(data, txt_path, im_size, im_id, cnt_kps)
cnt_instances += 1
# Split the image in a new folder
if self.split_images:
dst = os.path.join(self.dir_out_im, phase, os.path.split(im_path)[-1])
copyfile(im_path, dst)
# Add crowd annotations
mask_path = os.path.join(self.dir_mask, im_name + '.jpg')
self._process_mask(mask_path, im_id)
# Count
if (cnt_images % 1000) == 0:
text = ' and copied to new directory' if self.split_images else ''
print(f'Parsed {cnt_images} images' + text)
self.save_json_files(phase)
print(f'\nPhase:{phase}')
print(f'Average number of keypoints labelled: {sum(cnt_kps) / cnt_instances:.1f} / 66')
print(f'JSON files directory: {self.dir_out_ann}')
print(f'Saved {cnt_instances} instances over {cnt_images} images ')
if self.histogram:
histogram(cnt_kps)
def save_json_files(self, phase):
for j_file, n_kps in [(self.json_file_24, 24), (self.json_file_66, 66)]:
name = 'apollo_keypoints_' + str(n_kps) + '_'
if self.sample:
name = name + 'sample_'
elif self.single_sample:
name = name + 'single_sample_'
path_json = os.path.join(self.dir_out_ann, name + phase + '.json')
with open(path_json, 'w') as outfile:
json.dump(j_file, outfile)
def _process_image(self, im_path):
"""Update image field in json file"""
file_name = os.path.basename(im_path)
im_name = os.path.splitext(file_name)[0]
im_id = int(im_name.split(sep='_')[1]) # Numeric code in the image
im = Image.open(im_path)
width, height = im.size
dict_ann = {
'coco_url': "unknown",
'file_name': file_name,
'id': im_id,
'license': 1,
'date_captured': "unknown",
'width': width,
'height': height}
self.json_file_24["images"].append(dict_ann)
self.json_file_66["images"].append(dict_ann)
return (width, height), im_name, im_id
def _process_mask(self, mask_path, im_id):
"""Mask crowd annotations"""
if cv2 is None:
raise Exception('OpenCV')
assert os.path.isfile(mask_path), mask_path
im_gray = cv2.imread(mask_path, cv2.IMREAD_GRAYSCALE)
blur = cv2.GaussianBlur(im_gray, (0, 0), sigmaX=3, sigmaY=3, borderType=cv2.BORDER_DEFAULT)
contours, _ = cv2.findContours(blur, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
for idx, mask in enumerate(contours):
box = cv2.boundingRect(mask)
mask_id = int(str(im_id) + '00' + str(idx)) # different id from crowds
dict_mask = {
'image_id': im_id,
'category_id': 1,
'iscrowd': 1,
'id': mask_id,
'area': box[2] * box[3],
'bbox': box,
'num_keypoints': 0,
'keypoints': [],
'segmentation': []}
self.json_file_24["annotations"].append(dict_mask)
self.json_file_66["annotations"].append(dict_mask)
def _process_annotation(self, all_kps, txt_path, im_size, im_id, cnt_kps):
"""Process single instance"""
# Enlarge box
box_tight = [np.min(all_kps[:, 1]), np.min(all_kps[:, 2]),
np.max(all_kps[:, 1]), np.max(all_kps[:, 2])]
w, h = box_tight[2] - box_tight[0], box_tight[3] - box_tight[1]
x_o = max(box_tight[0] - 0.1 * w, 0)
y_o = max(box_tight[1] - 0.1 * h, 0)
x_i = min(box_tight[0] + 1.1 * w, im_size[0])
y_i = min(box_tight[1] + 1.1 * h, im_size[1])
box = [int(x_o), int(y_o), int(x_i - x_o), int(y_i - y_o)] # (x, y, w, h)
txt_id = os.path.splitext(txt_path.split(sep='_')[-1])[0]
car_id = int(str(im_id) + str(int(txt_id))) # include the specific annotation id
kps, num = self._transform_keypoints_24(all_kps)
self.json_file_24["annotations"].append({
'image_id': im_id,
'category_id': 1,
'iscrowd': 0,
'id': car_id,
'area': box[2] * box[3],
'bbox': box,
'num_keypoints': num,
'keypoints': kps,
'segmentation': []})
kps, num = self._transform_keypoints_66(all_kps)
self.json_file_66["annotations"].append({
'image_id': im_id,
'category_id': 1,
'iscrowd': 0,
'id': car_id,
'area': box[2] * box[3],
'bbox': box,
'num_keypoints': num,
'keypoints': kps,
'segmentation': []})
# Stats
for num in all_kps[:, 0]:
cnt_kps[int(num)] += 1
return cnt_kps
def _transform_keypoints_24(self, kps):
"""
24 keypoint version
Map, filter keypoints and add visibility
:array of [[#, x, y], ...]
:return [x, y, visibility, x, y, visibility, .. ]
"""
kps_out = np.zeros((len(CAR_KEYPOINTS_24), 3))
cnt = 0
for kp in kps:
n = self.map_sk[int(kp[0])]
if n < 100: # Filter extra keypoints
kps_out[n, 0] = kp[1]
kps_out[n, 1] = kp[2]
kps_out[n, 2] = 2
cnt += 1
kps_out = list(kps_out.reshape((-1,)))
return kps_out, cnt
@staticmethod
def _transform_keypoints_66(kps):
"""
66 keypoint version
Add visibility
:array of [[#, x, y], ...]
:return [x, y, visibility, x, y, visibility, .. ]
"""
kps_out = np.zeros((len(CAR_KEYPOINTS_66), 3))
cnt = 0
for kp in kps:
n = int(kp[0])
kps_out[n, 0] = kp[1]
kps_out[n, 1] = kp[2]
kps_out[n, 2] = 2
cnt += 1
kps_out = list(kps_out.reshape((-1,)))
return kps_out, cnt
def initiate_json(self):
"""
Initiate Json for training and val phase for the 24 kp and the 66 kp version
"""
for j_file, n_kp in [(self.json_file_24, 24), (self.json_file_66, 66)]:
j_file["info"] = dict(url="https://github.com/openpifpaf/openpifpaf",
date_created=time.strftime("%a, %d %b %Y %H:%M:%S +0000",
time.localtime()),
description=("Conversion of ApolloCar3D dataset into MS-COCO"
" format with {n_kp} keypoints"))
skel = CAR_SKELETON_24 if n_kp == 24 else CAR_SKELETON_66
car_kps = CAR_KEYPOINTS_24 if n_kp == 24 else CAR_KEYPOINTS_66
j_file["categories"] = [dict(name='car',
id=1,
skeleton=skel,
supercategory='car',
keypoints=car_kps)]
j_file["images"] = []
j_file["annotations"] = []
def histogram(cnt_kps):
bins = np.arange(len(cnt_kps))
data = np.array(cnt_kps)
plt.figure()
plt.title("Distribution of the keypoints")
plt.bar(bins, data)
plt.xticks(np.arange(len(cnt_kps), step=5))
plt.show()
def main():
args = cli()
# configure
ApolloToCoco.sample = args.sample
ApolloToCoco.single_sample = args.single_sample
ApolloToCoco.split_images = args.split_images
ApolloToCoco.histogram = args.histogram
apollo_coco = ApolloToCoco(args.dir_data, args.dir_out)
apollo_coco.process()
if __name__ == "__main__":
main()
| 1,973 | 9,773 | 92 |
40cebe3a0b3916621ef060c49209893847634a5f | 1,204 | py | Python | .ci/prep_azure.py | ome/mitogen | a154b7b134d954ea34de4c1beaca3aaa6f7514b9 | [
"BSD-3-Clause"
] | null | null | null | .ci/prep_azure.py | ome/mitogen | a154b7b134d954ea34de4c1beaca3aaa6f7514b9 | [
"BSD-3-Clause"
] | null | null | null | .ci/prep_azure.py | ome/mitogen | a154b7b134d954ea34de4c1beaca3aaa6f7514b9 | [
"BSD-3-Clause"
] | 1 | 2020-09-18T05:46:01.000Z | 2020-09-18T05:46:01.000Z | #!/usr/bin/env python
import os
import sys
import ci_lib
batches = []
if 0 and os.uname()[0] == 'Linux':
batches += [
[
"sudo chown `whoami`: ~",
"chmod u=rwx,g=rx,o= ~",
"sudo mkdir /var/run/sshd",
"sudo /etc/init.d/ssh start",
"mkdir -p ~/.ssh",
"chmod u=rwx,go= ~/.ssh",
"ssh-keyscan -H localhost >> ~/.ssh/known_hosts",
"chmod u=rw,go= ~/.ssh/known_hosts",
"cat tests/data/docker/mitogen__has_sudo_pubkey.key > ~/.ssh/id_rsa",
"chmod u=rw,go= ~/.ssh/id_rsa",
"cat tests/data/docker/mitogen__has_sudo_pubkey.key.pub > ~/.ssh/authorized_keys",
"chmod u=rw,go=r ~/.ssh/authorized_keys",
]
]
if ci_lib.have_apt():
batches.append([
'echo force-unsafe-io | sudo tee /etc/dpkg/dpkg.cfg.d/nosync',
'sudo add-apt-repository ppa:deadsnakes/ppa',
'sudo apt-get update',
'sudo apt-get -y install '
'python{pv} '
'python{pv}-dev '
'libsasl2-dev '
'libldap2-dev '
.format(pv=os.environ['PYTHONVERSION'])
])
ci_lib.run_batches(batches)
| 25.617021 | 94 | 0.524917 | #!/usr/bin/env python
import os
import sys
import ci_lib
batches = []
if 0 and os.uname()[0] == 'Linux':
batches += [
[
"sudo chown `whoami`: ~",
"chmod u=rwx,g=rx,o= ~",
"sudo mkdir /var/run/sshd",
"sudo /etc/init.d/ssh start",
"mkdir -p ~/.ssh",
"chmod u=rwx,go= ~/.ssh",
"ssh-keyscan -H localhost >> ~/.ssh/known_hosts",
"chmod u=rw,go= ~/.ssh/known_hosts",
"cat tests/data/docker/mitogen__has_sudo_pubkey.key > ~/.ssh/id_rsa",
"chmod u=rw,go= ~/.ssh/id_rsa",
"cat tests/data/docker/mitogen__has_sudo_pubkey.key.pub > ~/.ssh/authorized_keys",
"chmod u=rw,go=r ~/.ssh/authorized_keys",
]
]
if ci_lib.have_apt():
batches.append([
'echo force-unsafe-io | sudo tee /etc/dpkg/dpkg.cfg.d/nosync',
'sudo add-apt-repository ppa:deadsnakes/ppa',
'sudo apt-get update',
'sudo apt-get -y install '
'python{pv} '
'python{pv}-dev '
'libsasl2-dev '
'libldap2-dev '
.format(pv=os.environ['PYTHONVERSION'])
])
ci_lib.run_batches(batches)
| 0 | 0 | 0 |
530f15fe9f040b2b08aa2e112c827e721e867894 | 3,610 | py | Python | ubereats/views.py | ronchen0927/Django-logistics | 916fac813a62abffc0499a7b7a06859622e254dd | [
"MIT"
] | null | null | null | ubereats/views.py | ronchen0927/Django-logistics | 916fac813a62abffc0499a7b7a06859622e254dd | [
"MIT"
] | null | null | null | ubereats/views.py | ronchen0927/Django-logistics | 916fac813a62abffc0499a7b7a06859622e254dd | [
"MIT"
] | null | null | null | from django.shortcuts import render, redirect
from django.http import HttpResponse, Http404
from .models import Order, _get_all_order
from .forms import DriverModelForm, OrderModelForm, StoreModelForm
| 25.971223 | 67 | 0.654848 | from django.shortcuts import render, redirect
from django.http import HttpResponse, Http404
from .models import Order, _get_all_order
from .forms import DriverModelForm, OrderModelForm, StoreModelForm
def index(request):
orders = _get_all_order()
for order in orders:
if order.is_store_completed and order.is_driver_completed:
order.is_completed = True
order.save()
context = {
'orders': orders
}
return render(request, 'index.html', context)
def create_order(request):
order_form = OrderModelForm()
if request.method == 'POST':
order_form = OrderModelForm(request.POST)
if order_form.is_valid():
order_form.save()
return redirect("/ubereats/")
context = {'order_form': order_form}
return render(request, 'create_order.html', context)
def detail_order(request, pk):
try:
order = Order.objects.get(id=pk)
except Order.DoesNotExist:
raise Http404("No OrderModel matches the given query.")
context = {
"order": order
}
return render(request, 'detail_order.html', context)
def update_order(request, pk):
try:
order = Order.objects.get(id=pk)
except Order.DoesNotExist:
raise Http404("No OrderModel matches the given query.")
order_form = OrderModelForm()
store_form = StoreModelForm()
driver_form = DriverModelForm()
if request.method == "POST":
order_form = OrderModelForm(request.POST, instance=order)
store_form = StoreModelForm(request.POST, instance=order)
driver_form = DriverModelForm(request.POST, instance=order)
if order_form.is_valid():
order_form.save()
store_form.save()
driver_form.save()
return redirect("/ubereats/")
context = {
'order_form': order_form,
'store_form': store_form,
'driver_form': driver_form
}
return render(request, 'update_order.html', context)
def delete_order(request, pk):
try:
order = Order.objects.get(id=pk)
except Order.DoesNotExist:
raise Http404("No OrderModel matches the given query.")
if request.method == "POST":
order.delete()
return redirect("/ubereats/")
context = {'order': order}
return render(request, 'delete_order.html', context)
def dispatch_store_to_order(request, pk):
try:
order = Order.objects.get(id=pk)
except Order.DoesNotExist:
raise Http404("No OrderModel matches the given query.")
store_form = StoreModelForm()
if request.method == 'POST':
order.is_store_completed = True
store_form = StoreModelForm(request.POST, instance=order)
if store_form.is_valid():
store_form.save()
return redirect("/ubereats/")
context = {'store_form': store_form}
return render(request, 'dispatch_store.html', context)
def dispatch_driver_to_order(request, pk):
try:
order = Order.objects.get(id=pk)
except Order.DoesNotExist:
raise Http404("No OrderModel matches the given query.")
if order.is_store_completed:
order.is_driver_completed = True
else:
return HttpResponse('請先等待商家餐點完成後,再發派司機')
driver_form = DriverModelForm()
if request.method == 'POST':
driver_form = DriverModelForm(request.POST, instance=order)
if driver_form.is_valid():
driver_form.save()
return redirect("/ubereats/")
context = {'driver_form': driver_form}
return render(request, 'dispatch_driver.html', context)
| 3,275 | 0 | 161 |
a3ee50cf266ce13ddb09c4a9f165d1706e075509 | 1,500 | py | Python | ngramchecker.py | ayimechan/writingwidgets | 73e4751dfdded14206b9d53692a25f0eede2cfb6 | [
"MIT"
] | null | null | null | ngramchecker.py | ayimechan/writingwidgets | 73e4751dfdded14206b9d53692a25f0eede2cfb6 | [
"MIT"
] | null | null | null | ngramchecker.py | ayimechan/writingwidgets | 73e4751dfdded14206b9d53692a25f0eede2cfb6 | [
"MIT"
] | null | null | null | # a simple Ngram finder
# only feasible for 3.x version
# original text file must be plain text
n=1 # min N
howmany = 5 # max N
outlength = 200
white_list = []
white_punc = [',','。','“','”','—','?','!','…','"',"'",':','?','!','.',',']
file_raw = open('put full path to youre text file here','r',encoding='utf-8')
file_out = open('output file with or without full path','w',encoding='utf-8')
full_text = ''
for line in file_raw: #read and remove punctuation
_ = line[:-1].strip().replace(' ','')
for ch in white_punc:
if ch in _:
_ = _.replace(ch, '')
full_text+=_
file_raw.close()
while n<=howmany:
file_out.write('====='+str(n)+'=====\n')
d = search_ngram(n, full_text)
values = list(d.values())
values.sort(key=lambda x:x[0], reverse=True)
outcount = 0
for element in values:
if element[1] not in white_list:
countp = 0
for p in white_punc:
if p not in element[1]:
countp+=1
if countp==len(white_punc):
file_out.write(element[1]+'\t'+str(element[0])+'\n')
outcount+=1
if outcount>outlength:
break
n+=1
file_out.flush()
file_out.close()
| 25.862069 | 77 | 0.532 | # a simple Ngram finder
# only feasible for 3.x version
# original text file must be plain text
n=1 # min N
howmany = 5 # max N
outlength = 200
white_list = []
white_punc = [',','。','“','”','—','?','!','…','"',"'",':','?','!','.',',']
file_raw = open('put full path to youre text file here','r',encoding='utf-8')
file_out = open('output file with or without full path','w',encoding='utf-8')
full_text = ''
for line in file_raw: #read and remove punctuation
_ = line[:-1].strip().replace(' ','')
for ch in white_punc:
if ch in _:
_ = _.replace(ch, '')
full_text+=_
file_raw.close()
def search_ngram(n,fulltext):
start = 0
d = {}
while start<=len(fulltext)-n:
text = fulltext[start:start+n]
if d.get(text):
d[text][0]+=1
else:
d[text] = [1, text]
start+=1
return d
while n<=howmany:
file_out.write('====='+str(n)+'=====\n')
d = search_ngram(n, full_text)
values = list(d.values())
values.sort(key=lambda x:x[0], reverse=True)
outcount = 0
for element in values:
if element[1] not in white_list:
countp = 0
for p in white_punc:
if p not in element[1]:
countp+=1
if countp==len(white_punc):
file_out.write(element[1]+'\t'+str(element[0])+'\n')
outcount+=1
if outcount>outlength:
break
n+=1
file_out.flush()
file_out.close()
| 232 | 0 | 23 |
067862330c1fbab9370310f28ae2252927f597a0 | 2,585 | py | Python | python/tHome/sma/report.py | ZigmundRat/T-Home | 5dc8689f52d87dac890051e540b338b009293ced | [
"BSD-2-Clause"
] | 18 | 2016-04-17T19:39:28.000Z | 2020-11-19T06:55:20.000Z | python/tHome/sma/report.py | ZigmundRat/T-Home | 5dc8689f52d87dac890051e540b338b009293ced | [
"BSD-2-Clause"
] | 6 | 2016-10-31T13:53:45.000Z | 2019-03-20T20:47:03.000Z | python/tHome/sma/report.py | ZigmundRat/T-Home | 5dc8689f52d87dac890051e540b338b009293ced | [
"BSD-2-Clause"
] | 12 | 2016-10-31T12:29:08.000Z | 2021-12-28T12:18:28.000Z | #===========================================================================
#
# Report functions
#
#===========================================================================
import time
from ..util import Data
from .Link import Link
#===========================================================================
def power( *args, **kwargs ):
"""Return instantaneous AC and DC power generation.
Inputs are the same as Link() constructor:
obj = report.instant( '192.168.1.15' )
print obj
"""
with Link( *args, **kwargs ) as link:
link.decode = False
link.raw = False
dcBytes, dc = link.dcPower()
acBytes, ac = link.acTotalPower()
now = time.time()
obj = dc.decode( dcBytes )
obj.update( ac.decode( acBytes ) )
obj.time = now
obj.dcPower = obj.dcPower1 + obj.dcPower2
return obj
#===========================================================================
def energy( *args, **kwargs ):
"""Return instantaneous power and total energy status.
Get instantaneous AC and DC power generation and energy created for
the day.
Inputs are the same as Link() constructor:
obj = report.energy( '192.168.1.15' )
print obj
"""
with Link( *args, **kwargs ) as link:
link.decode = False
dcBytes, dc = link.dcPower()
acBytes, ac = link.acTotalPower()
totBytes, total = link.acTotalEnergy()
now = time.time()
obj = dc.decode( dcBytes )
obj.update( ac.decode( acBytes ) )
obj.update( total.decode( totBytes ) )
obj.time = now
obj.dcPower = obj.dcPower1 + obj.dcPower2
return obj
#===========================================================================
def full( *args, **kwargs ):
"""Return all possible fields.
Inputs are the same as Link() constructor:
obj = report.full( '192.168.1.15' )
print obj
"""
funcs = [
Link.info,
Link.status,
Link.gridRelayStatus,
Link.temperature,
Link.version,
Link.acTotalEnergy,
Link.acTotalPower,
Link.acPower,
Link.acMaxPower,
Link.operationTime,
Link.dcPower,
Link.dcVoltage,
Link.acVoltage,
Link.gridFrequency,
]
with Link( *args, **kwargs ) as link:
link.decode = False
results = [ f( link ) for f in funcs ]
now = time.time()
obj = Data()
for bytes, decoder in results:
obj.update( decoder.decode( bytes ) )
obj.time = now
obj.dcPower = obj.dcPower1 + obj.dcPower2
return obj
#===========================================================================
| 25.85 | 76 | 0.504836 | #===========================================================================
#
# Report functions
#
#===========================================================================
import time
from ..util import Data
from .Link import Link
#===========================================================================
def power( *args, **kwargs ):
"""Return instantaneous AC and DC power generation.
Inputs are the same as Link() constructor:
obj = report.instant( '192.168.1.15' )
print obj
"""
with Link( *args, **kwargs ) as link:
link.decode = False
link.raw = False
dcBytes, dc = link.dcPower()
acBytes, ac = link.acTotalPower()
now = time.time()
obj = dc.decode( dcBytes )
obj.update( ac.decode( acBytes ) )
obj.time = now
obj.dcPower = obj.dcPower1 + obj.dcPower2
return obj
#===========================================================================
def energy( *args, **kwargs ):
"""Return instantaneous power and total energy status.
Get instantaneous AC and DC power generation and energy created for
the day.
Inputs are the same as Link() constructor:
obj = report.energy( '192.168.1.15' )
print obj
"""
with Link( *args, **kwargs ) as link:
link.decode = False
dcBytes, dc = link.dcPower()
acBytes, ac = link.acTotalPower()
totBytes, total = link.acTotalEnergy()
now = time.time()
obj = dc.decode( dcBytes )
obj.update( ac.decode( acBytes ) )
obj.update( total.decode( totBytes ) )
obj.time = now
obj.dcPower = obj.dcPower1 + obj.dcPower2
return obj
#===========================================================================
def full( *args, **kwargs ):
"""Return all possible fields.
Inputs are the same as Link() constructor:
obj = report.full( '192.168.1.15' )
print obj
"""
funcs = [
Link.info,
Link.status,
Link.gridRelayStatus,
Link.temperature,
Link.version,
Link.acTotalEnergy,
Link.acTotalPower,
Link.acPower,
Link.acMaxPower,
Link.operationTime,
Link.dcPower,
Link.dcVoltage,
Link.acVoltage,
Link.gridFrequency,
]
with Link( *args, **kwargs ) as link:
link.decode = False
results = [ f( link ) for f in funcs ]
now = time.time()
obj = Data()
for bytes, decoder in results:
obj.update( decoder.decode( bytes ) )
obj.time = now
obj.dcPower = obj.dcPower1 + obj.dcPower2
return obj
#===========================================================================
| 0 | 0 | 0 |
c9f6b2d99f1b19c404fefc6847e08b20ec621d9b | 630 | py | Python | test/integration/071_commented_yaml_regression_3568_test/test_all_comment_yml_files.py | jankytara2/dbt | 3f4069ab6d4d5b3fc34f8fe785761b5617357b0f | [
"Apache-2.0"
] | 3,156 | 2017-03-05T09:59:23.000Z | 2021-06-30T01:27:52.000Z | test/integration/071_commented_yaml_regression_3568_test/test_all_comment_yml_files.py | jankytara2/dbt | 3f4069ab6d4d5b3fc34f8fe785761b5617357b0f | [
"Apache-2.0"
] | 2,608 | 2017-02-27T15:39:40.000Z | 2021-06-30T01:49:20.000Z | test/integration/071_commented_yaml_regression_3568_test/test_all_comment_yml_files.py | jankytara2/dbt | 3f4069ab6d4d5b3fc34f8fe785761b5617357b0f | [
"Apache-2.0"
] | 693 | 2017-03-13T03:04:49.000Z | 2021-06-25T15:57:41.000Z | from test.integration.base import DBTIntegrationTest, use_profile
| 30 | 116 | 0.671429 | from test.integration.base import DBTIntegrationTest, use_profile
class TestAllCommentYMLIsOk(DBTIntegrationTest):
@property
def schema(self):
return "071_commented_yaml"
@property
def models(self):
return "models"
@use_profile('postgres')
def test_postgres_parses_with_all_comment_yml(self):
try:
self.run_dbt(['parse'])
except TypeError:
assert False, '`dbt parse` failed with a yaml file that is all comments with the same exception as 3568'
except:
assert False, '`dbt parse` failed with a yaml file that is all comments'
| 376 | 164 | 23 |
afe690113b48cacd55d0b5f1e8f1492bae11b8c2 | 35 | py | Python | pygraylog/__init__.py | zmallen/pygraylog | cda2c6b583e8c7de47e98458b3faeae7d05a94d3 | [
"Apache-2.0"
] | 14 | 2016-08-29T16:31:14.000Z | 2021-11-30T10:39:29.000Z | pygraylog/__init__.py | zmallen/pygraylog | cda2c6b583e8c7de47e98458b3faeae7d05a94d3 | [
"Apache-2.0"
] | 9 | 2016-08-28T15:23:47.000Z | 2018-02-07T20:11:18.000Z | pygraylog/__init__.py | zmallen/pygraylog | cda2c6b583e8c7de47e98458b3faeae7d05a94d3 | [
"Apache-2.0"
] | 16 | 2016-10-04T17:37:42.000Z | 2021-07-08T15:43:50.000Z | import endpoints
import graylogapi
| 11.666667 | 17 | 0.885714 | import endpoints
import graylogapi
| 0 | 0 | 0 |
c6899828cc13d6f77a3160c032666acbf80b7ec7 | 6,664 | py | Python | app/controllers/api/private/v1/install.py | arxcdr/silverback | 212139cbc1a648d1f877d60f2d7c4d750eefc3da | [
"BSD-3-Clause"
] | null | null | null | app/controllers/api/private/v1/install.py | arxcdr/silverback | 212139cbc1a648d1f877d60f2d7c4d750eefc3da | [
"BSD-3-Clause"
] | null | null | null | app/controllers/api/private/v1/install.py | arxcdr/silverback | 212139cbc1a648d1f877d60f2d7c4d750eefc3da | [
"BSD-3-Clause"
] | null | null | null | """
Install API Endpoint
"""
# Third Party Library
from django.views import View
from django.http import JsonResponse
from django.utils.translation import gettext as _
# Local Library
from pyvalitron.form import Form
from app.modules.util.helpers import Helpers
from app.modules.core.request import Request
from app.modules.core.response import Response
from app.modules.validation.extension import ExtraRules
from app.modules.core.install import Install as InstallModule
from app.modules.core.decorators import stop_request_if_installed
from app.modules.core.notification import Notification as NotificationModule
| 34.890052 | 142 | 0.484094 | """
Install API Endpoint
"""
# Third Party Library
from django.views import View
from django.http import JsonResponse
from django.utils.translation import gettext as _
# Local Library
from pyvalitron.form import Form
from app.modules.util.helpers import Helpers
from app.modules.core.request import Request
from app.modules.core.response import Response
from app.modules.validation.extension import ExtraRules
from app.modules.core.install import Install as InstallModule
from app.modules.core.decorators import stop_request_if_installed
from app.modules.core.notification import Notification as NotificationModule
class Install(View):
__request = None
__response = None
__helpers = None
__form = None
__install = None
__logger = None
__notification = None
__correlation_id = None
def __init__(self):
self.__request = Request()
self.__response = Response()
self.__helpers = Helpers()
self.__form = Form()
self.__install = InstallModule()
self.__notification = NotificationModule()
self.__logger = self.__helpers.get_logger(__name__)
self.__form.add_validator(ExtraRules())
@stop_request_if_installed
def post(self, request):
self.__correlation_id = request.META["X-Correlation-ID"] if "X-Correlation-ID" in request.META else ""
if self.__install.is_installed():
return JsonResponse(self.__response.send_private_failure([{
"type": "error",
"message": _("Error! Application is already installed.")
}], {}, self.__correlation_id))
self.__request.set_request(request)
request_data = self.__request.get_request_data("post", {
"app_name": "",
"app_email": "",
"app_url": "",
"admin_username": "",
"admin_email": "",
"admin_password": ""
})
self.__form.add_inputs({
'app_name': {
'value': request_data["app_name"],
'sanitize': {
'escape': {},
'strip': {}
},
'validate': {
'alpha_numeric': {
'error': _('Error! Application name must be alpha numeric.')
},
'length_between': {
'param': [2, 30],
'error': _('Error! Application name must be 2 to 30 characters long.')
}
}
},
'app_email': {
'value': request_data["app_email"],
'sanitize': {
'escape': {},
'strip': {}
},
'validate': {
'sv_email': {
'error': _('Error! Application email is invalid.')
}
}
},
'app_url': {
'value': request_data["app_url"],
'sanitize': {
'escape': {},
'strip': {}
},
'validate': {
'sv_url': {
'error': _('Error! Application url is invalid.')
}
}
},
'admin_username': {
'value': request_data["admin_username"],
'sanitize': {
'escape': {},
'strip': {}
},
'validate': {
'alpha_numeric': {
'error': _('Error! Username must be alpha numeric.')
},
'length_between': {
'param': [4, 10],
'error': _('Error! Username must be 5 to 10 characters long.')
}
}
},
'admin_email': {
'value': request_data["admin_email"],
'sanitize': {
'escape': {},
'strip': {}
},
'validate': {
'sv_email': {
'error': _('Error! Admin email is invalid.')
}
}
},
'admin_password': {
'value': request_data["admin_password"],
'validate': {
'sv_password': {
'error': _('Error! Password must contain at least uppercase letter, lowercase letter, numbers and special character.')
},
'length_between': {
'param': [7, 20],
'error': _('Error! Password length must be from 8 to 20 characters.')
}
}
}
})
self.__form.process()
if not self.__form.is_passed():
return JsonResponse(self.__response.send_errors_failure(self.__form.get_errors(), {}, self.__correlation_id))
self.__install.set_app_data(
self.__form.get_sinput("app_name"),
self.__form.get_sinput("app_email"),
self.__form.get_sinput("app_url")
)
self.__install.set_admin_data(
self.__form.get_sinput("admin_username"),
self.__form.get_sinput("admin_email"),
self.__form.get_sinput("admin_password")
)
try:
user_id = self.__install.install()
except Exception as exception:
self.__logger.error(_("Internal server error during installation: %(exception)s {'correlationId':'%(correlationId)s'}") % {
"exception": exception,
"correlationId": self.__correlation_id
})
if user_id:
self.__notification.create_notification({
"highlight": _('Installation'),
"notification": _('Silverback installed successfully'),
"url": "#",
"type": NotificationModule.MESSAGE,
"delivered": False,
"user_id": user_id,
"task_id": None
})
return JsonResponse(self.__response.send_private_success([{
"type": "success",
"message": _("Application installed successfully.")
}], {}, self.__correlation_id))
else:
return JsonResponse(self.__response.send_private_failure([{
"type": "error",
"message": _("Error! Something goes wrong during installing.")
}], {}, self.__correlation_id))
| 5,761 | 262 | 23 |
eaaf40f98644521dbdb576e92beeb38100def095 | 2,071 | py | Python | tests/test_result.py | YoRHazzz/Python_Crawler | 0b46baea32c2bb74593e02b4f6b4c9534a95444d | [
"MIT"
] | null | null | null | tests/test_result.py | YoRHazzz/Python_Crawler | 0b46baea32c2bb74593e02b4f6b4c9534a95444d | [
"MIT"
] | 1 | 2021-12-13T20:55:38.000Z | 2021-12-13T20:55:38.000Z | tests/test_result.py | YoRHazzz/Python_Crawler | 0b46baea32c2bb74593e02b4f6b4c9534a95444d | [
"MIT"
] | null | null | null | import pytest
from crawler.core import Downloader, Config, UrlManager
import os
from shutil import rmtree
DEFAULT_INI_PATH = "./tests/config/default.ini"
CONFIG_DIR_PATH = "./tests/config"
test_failed_urls = ["http://www.google.com"]
test_finished_urls = ["http://www.baidu.com"]
test_repeated_urls = []
for i in range(10):
test_repeated_urls.append("http://www.baidu.com")
test_repeated_urls.append("http://www.hubianluanzao2131231231.com")
| 36.982143 | 94 | 0.689039 | import pytest
from crawler.core import Downloader, Config, UrlManager
import os
from shutil import rmtree
DEFAULT_INI_PATH = "./tests/config/default.ini"
CONFIG_DIR_PATH = "./tests/config"
test_failed_urls = ["http://www.google.com"]
test_finished_urls = ["http://www.baidu.com"]
test_repeated_urls = []
for i in range(10):
test_repeated_urls.append("http://www.baidu.com")
test_repeated_urls.append("http://www.hubianluanzao2131231231.com")
class TestResult:
@staticmethod
def setup_method():
if os.path.exists(CONFIG_DIR_PATH):
rmtree(CONFIG_DIR_PATH)
os.mkdir(CONFIG_DIR_PATH)
Config.make_default_ini(DEFAULT_INI_PATH)
@staticmethod
def fast_download(downloader: Downloader):
downloader.config.ini['multi']['delay'] = '0'
downloader.config.ini['proxy']['timeout'] = '3'
downloader.config.ini['proxy']['retry'] = '1'
def test_retry_failed_urls(self):
url_manger = UrlManager()
downloader = Downloader(Config(DEFAULT_INI_PATH))
self.fast_download(downloader)
result = downloader.get_result(test_repeated_urls, url_manger)
assert len(result.failed_urls) == 1
assert len(result.finished_urls) == 1
result.retry_failed_urls()
assert len(result.failed_urls) == 1
assert len(result.finished_urls) == 1
result = downloader.get_result(test_finished_urls)
result.retry_failed_urls()
result.show_urls_status()
assert len(result.failed_urls) == 0
assert len(result.finished_urls) == 1
downloader.config.update_config_and_ini('proxy', 'proxy_url', '255.255.255.255:65535')
downloader.config.make_default_ini(DEFAULT_INI_PATH)
result = downloader.get_result(test_finished_urls)
assert len(result.failed_urls) == 1
assert len(result.finished_urls) == 0
result.retry_failed_urls(Config(DEFAULT_INI_PATH))
result.show_urls_status()
assert len(result.failed_urls) == 0
assert len(result.finished_urls) == 1
| 1,483 | 112 | 23 |
8bd5eaf35ce1a725bace783655316c953bf23e47 | 991 | py | Python | backend/youngun/youngun/apps/campaigns/migrations/0004_auto_20200701_1759.py | aakashbajaj/Youngun-Campaign-Tracking | a3b4f283b22cefb92c72f7638ee2a9da72622de0 | [
"Apache-2.0"
] | null | null | null | backend/youngun/youngun/apps/campaigns/migrations/0004_auto_20200701_1759.py | aakashbajaj/Youngun-Campaign-Tracking | a3b4f283b22cefb92c72f7638ee2a9da72622de0 | [
"Apache-2.0"
] | 1 | 2021-09-22T19:27:06.000Z | 2021-09-22T19:27:06.000Z | backend/youngun/youngun/apps/campaigns/migrations/0004_auto_20200701_1759.py | aakashbajaj/Youngun-Campaign-Tracking | a3b4f283b22cefb92c72f7638ee2a9da72622de0 | [
"Apache-2.0"
] | null | null | null | # Generated by Django 3.0.7 on 2020-07-01 17:59
from django.db import migrations, models
| 30.030303 | 96 | 0.611504 | # Generated by Django 3.0.7 on 2020-07-01 17:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('campaigns', '0003_auto_20200701_1354'),
]
operations = [
migrations.RemoveField(
model_name='campaign',
name='stories_fetch_ctrl',
),
migrations.AddField(
model_name='campaign',
name='fb_stories_fetch_ctrl',
field=models.BooleanField(default=False, verbose_name='Facebook Stories fetched?'),
),
migrations.AddField(
model_name='campaign',
name='in_stories_fetch_ctrl',
field=models.BooleanField(default=False, verbose_name='Instagram Stories fetched?'),
),
migrations.AddField(
model_name='campaign',
name='tw_stories_fetch_ctrl',
field=models.BooleanField(default=False, verbose_name='Twitter Stories fetched?'),
),
]
| 0 | 877 | 23 |
7f8eecb8c68b5a66de42d3e38c778e99161cd2b3 | 7,917 | py | Python | tests/cogctl/cli/test_relay.py | operable/cogctl | bec66471189376e0c33be88edb68f3af8797fc8c | [
"Apache-2.0"
] | 3 | 2016-05-09T23:14:47.000Z | 2017-01-15T20:41:25.000Z | tests/cogctl/cli/test_relay.py | operable/cogctl | bec66471189376e0c33be88edb68f3af8797fc8c | [
"Apache-2.0"
] | 59 | 2016-03-10T20:53:04.000Z | 2021-09-03T17:26:02.000Z | tests/cogctl/cli/test_relay.py | operable/cogctl | bec66471189376e0c33be88edb68f3af8797fc8c | [
"Apache-2.0"
] | 7 | 2016-03-09T21:43:33.000Z | 2019-01-24T15:44:06.000Z | import pytest
from cogctl.cli.relay import relay
import responses
@pytest.fixture(autouse=True)
| 35.030973 | 96 | 0.506379 | import pytest
from cogctl.cli.relay import relay
import responses
@pytest.fixture(autouse=True)
def mocks(request, cli_state):
with responses.RequestsMock(assert_all_requests_are_fired=False) as rsps:
base_url = "%s/v1" % cli_state.profile["url"]
relay_id = "a6f95083-78e2-4ace-b034-725d80d91717"
# Token
rsps.add(responses.POST,
"%s/token" % base_url,
json={"token": {"value": "lolwut"}},
status=200)
# Relays Index
rsps.add(responses.GET,
"%s/relays" % base_url,
json={"relays": [
{"id": "0e8ec7e2-7014-4991-9c5e-36115cb7bc67",
"name": "relay",
"enabled": True},
{"id": "a6f95083-78e2-4ace-b034-725d80d91717",
"name": "another-relay",
"enabled": False}]},
status=200)
# Relays Create
rsps.add(responses.POST,
"%s/relays" % base_url,
json={"relay": {"id": "639a4eb3-2b69-46c5-90f5-c53bc8930aae",
"name": "prod-relay",
"description": "Does a thing",
"enabled": True,
"groups": []}},
status=201)
# Relays Show
rsps.add(responses.GET,
"%s/relays/%s" % (base_url, relay_id),
json={"relay": {"id": "a6f95083-78e2-4ace-b034-725d80d91717",
"name": "another-relay",
"description": "Does a thing updated",
"enabled": True,
"groups": [{"name": "prod-group"},
{"name": "dev-group"}]}},
status=200)
rsps.add(responses.GET,
"%s/relays/%s" % (base_url, "639a4eb3-2b69-46c5-90f5-c53bc8930aae"), # noqa
json={"relay": {"id": "639a4eb3-2b69-46c5-90f5-c53bc8930aae",
"name": "prod-relay",
"description": "Does a thing",
"enabled": True,
"groups": [{"name": "prod-group"},
{"name": "dev-group"}]}},
status=201)
# Relays Update
rsps.add(responses.PUT,
"%s/relays/%s" % (base_url, relay_id),
json={"relay": {"id": "a6f95083-78e2-4ace-b034-725d80d91717",
"name": "another-relay",
"description": "Does a thing updated",
"enabled": True,
"groups": [{"name": "prod-group"},
{"name": "dev-group"}]}},
status=200)
# Relays Delete
rsps.add(responses.DELETE, "%s/relays/%s" % (base_url, relay_id),
status=204)
# Relay Groups Index
rsps.add(responses.GET, "%s/relay_groups" % base_url,
json={"relay_groups": [{
"id": "10fe4a9d-b5cc-4a42-a7d9-8bfba19573ae",
"name": "prod-group"}, {
"id": "96677a38-bb6e-4b18-bc50-dc7f32805293",
"name": "dev-group"}]},
status=200)
# Relay Groups Member Add
rsps.add(responses.POST,
"%s/relay_groups/10fe4a9d-b5cc-4a42-a7d9-8bfba19573ae/relays" % base_url, # noqa
json={"relay_group": {
"id": "10fe4a9d-b5cc-4a42-a7d9-8bfba19573ae",
"name": "prod-group",
"relays": [{
"id": "a6f95083-78e2-4ace-b034-725d80d91717"}]}},
status=201)
rsps.add(responses.POST,
"%s/relay_groups/96677a38-bb6e-4b18-bc50-dc7f32805293/relays" % base_url, # noqa
json={"relay_group": {
"id": "96677a38-bb6e-4b18-bc50-dc7f32805293",
"name": "dev-group",
"relays": [{
"id": "a6f95083-78e2-4ace-b034-725d80d91717"}]}},
status=201)
yield rsps
def test_relay_list(cogctl):
result = cogctl(relay.relay)
assert result.exit_code == 0
assert result.output == """\
NAME STATUS ID
relay enabled 0e8ec7e2-7014-4991-9c5e-36115cb7bc67
another-relay disabled a6f95083-78e2-4ace-b034-725d80d91717
"""
def test_relay_create(cogctl):
result = cogctl(relay.create,
["prod-relay", "639a4eb3-2b69-46c5-90f5-c53bc8930aae",
"sekret", "--description", "Does a thing", "--enable",
"--relay-group", "prod-group", "--relay-group",
"dev-group"])
# assert result.exit_code == 0
assert result.output == """\
Name prod-relay
ID 639a4eb3-2b69-46c5-90f5-c53bc8930aae
Status enabled
Description Does a thing
Groups prod-group, dev-group
"""
def test_relay_create_with_existing_relay_name(cogctl):
result = cogctl(relay.create,
["another-relay", "639a4eb3-2b69-46c5-90f5-c53bc8930aae",
"sekret"])
assert not result.exit_code == 0
bad_param = "Invalid value for \"name\""
error = "Relay \"another-relay\" already exists"
assert "Error: %s: %s" % (bad_param, error) in result.output
def test_relay_create_with_invalid_relay_id(cogctl):
result = cogctl(relay.create,
["prod-relay", "pony",
"sekret"])
assert not result.exit_code == 0
bad_param = "Invalid value for \"relay_id\""
error = "pony is not a valid UUID value"
assert "Error: %s: %s" % (bad_param, error) in result.output
def test_relay_create_with_existing_relay_id(cogctl):
result = cogctl(relay.create,
["prod-relay", "a6f95083-78e2-4ace-b034-725d80d91717",
"sekret"])
assert not result.exit_code == 0
bad_param = "Invalid value for \"relay_id\""
error = "Relay with ID \"a6f95083-78e2-4ace-b034-725d80d91717\" already exists"
assert "Error: %s: %s" % (bad_param, error) in result.output
def test_relay_enable(cogctl):
result = cogctl(relay.enable, ["another-relay"])
assert result.exit_code == 0
assert result.output == ""
def test_relay_disable(cogctl):
result = cogctl(relay.enable, ["another-relay"])
assert result.exit_code == 0
assert result.output == ""
def test_relay_info(cogctl):
result = cogctl(relay.info, ["another-relay"])
assert result.exit_code == 0
assert result.output == """\
Name another-relay
ID a6f95083-78e2-4ace-b034-725d80d91717
Status enabled
Description Does a thing updated
Groups prod-group, dev-group
"""
def test_relay_update(cogctl):
result = cogctl(relay.update,
["another-relay", "--token", "diff-sekret", "--description",
"Does a thing updated"])
assert result.exit_code == 0
assert result.output == """\
Name another-relay
ID a6f95083-78e2-4ace-b034-725d80d91717
Status enabled
Description Does a thing updated
Groups prod-group, dev-group
"""
def test_relay_rename(cogctl):
result = cogctl(relay.rename,
["another-relay", "renamed-relay"])
assert result.exit_code == 0
assert result.output == ""
def test_relay_delete(cogctl):
result = cogctl(relay.delete, ["another-relay"])
assert result.exit_code == 0
assert result.output == ""
| 7,533 | 0 | 275 |
777c1f85b5da471695d52bab6592cdc2f616c13b | 1,918 | py | Python | Problem Set 2 - Student Version/game.py | MuhammeedAlaa/MI-Assignemnets | c90deb44c609d55ac5f7be343fed93d32e44c1e8 | [
"MIT"
] | null | null | null | Problem Set 2 - Student Version/game.py | MuhammeedAlaa/MI-Assignemnets | c90deb44c609d55ac5f7be343fed93d32e44c1e8 | [
"MIT"
] | null | null | null | Problem Set 2 - Student Version/game.py | MuhammeedAlaa/MI-Assignemnets | c90deb44c609d55ac5f7be343fed93d32e44c1e8 | [
"MIT"
] | null | null | null | from abc import ABC, abstractmethod
from typing import Callable, Generic, Iterable, List, Optional, Tuple, TypeVar, Union
from helpers.utils import CacheContainer, with_cache
# S and A are used for generic typing where S represents the state type and A represents the action type
S = TypeVar("S")
A = TypeVar("A")
# Game is a generic abstract class for game definitions
# It also implements 'CacheContainer' which allows you to call the "cache" method
# which returns a dictionary in which you can store any data you want to cache
# A heuristic function which estimates the value of a given state for a certain agent within a certain game.
# E.g. if the heuristic function returns a high value for a certain agent, it should return low values for their enemies.
HeuristicFunction = Callable[[Game[S, A], S, int], float] | 40.808511 | 121 | 0.715328 | from abc import ABC, abstractmethod
from typing import Callable, Generic, Iterable, List, Optional, Tuple, TypeVar, Union
from helpers.utils import CacheContainer, with_cache
# S and A are used for generic typing where S represents the state type and A represents the action type
S = TypeVar("S")
A = TypeVar("A")
# Game is a generic abstract class for game definitions
# It also implements 'CacheContainer' which allows you to call the "cache" method
# which returns a dictionary in which you can store any data you want to cache
class Game(ABC, Generic[S, A], CacheContainer):
# This function returns the initial state
@abstractmethod
def get_initial_state(self) -> S:
pass
# How many agents are playing this game
@property
def agent_count(self) -> int:
return 1
# This function checks whether the given state is terminal or not
# if it is a terminal state, the second return value will be a list of terminal values for all agents
# if it is not a terminal state, the second return value will be None
@abstractmethod
def is_terminal(self, state: S) -> Tuple[bool, Optional[List[float]]]:
pass
# This function returns the index of the agent whose turn in now
@abstractmethod
def get_turn(self, state: S) -> int:
pass
# This function returns all the possible actions from the given state
@abstractmethod
def get_actions(self, state: S) -> List[A]:
pass
# Given a state and an action, this function returns the next state
@abstractmethod
def get_successor(self, state: S, action: A) -> S:
pass
# A heuristic function which estimates the value of a given state for a certain agent within a certain game.
# E.g. if the heuristic function returns a high value for a certain agent, it should return low values for their enemies.
HeuristicFunction = Callable[[Game[S, A], S, int], float] | 217 | 857 | 22 |
a969f8b821d12efcfc53eb351086358fc1bd1c35 | 325 | py | Python | app/models/configuration.py | ComputeCanada/mc-hub | 92b4c212ba8f7b5b1c8b8700f981275605a07067 | [
"BSD-3-Clause"
] | 5 | 2020-09-04T16:34:36.000Z | 2020-09-25T19:14:59.000Z | app/models/configuration.py | ComputeCanada/mc-hub | 92b4c212ba8f7b5b1c8b8700f981275605a07067 | [
"BSD-3-Clause"
] | 39 | 2020-09-12T17:37:14.000Z | 2022-03-10T17:49:57.000Z | app/models/configuration.py | ComputeCanada/mc-hub | 92b4c212ba8f7b5b1c8b8700f981275605a07067 | [
"BSD-3-Clause"
] | 1 | 2021-03-29T15:42:13.000Z | 2021-03-29T15:42:13.000Z | from models.constants import CONFIGURATION_FILE_PATH, CONFIGURATION_FILENAME
from os import path
import json
try:
config = path.join(CONFIGURATION_FILE_PATH, CONFIGURATION_FILENAME)
with open(config) as configuration_file:
config = json.load(configuration_file)
except FileNotFoundError:
config = dict()
| 29.545455 | 76 | 0.790769 | from models.constants import CONFIGURATION_FILE_PATH, CONFIGURATION_FILENAME
from os import path
import json
try:
config = path.join(CONFIGURATION_FILE_PATH, CONFIGURATION_FILENAME)
with open(config) as configuration_file:
config = json.load(configuration_file)
except FileNotFoundError:
config = dict()
| 0 | 0 | 0 |
81d2da92d3e1e8341c227c909668831664692b3d | 1,613 | py | Python | api/web/encoder.py | scitran/nimsapi | a4203cf6c6d29aa15d33011250ee69ff929fcb0d | [
"MIT"
] | 13 | 2016-05-31T14:32:58.000Z | 2021-09-17T07:18:11.000Z | api/web/encoder.py | scitran/core | a4203cf6c6d29aa15d33011250ee69ff929fcb0d | [
"MIT"
] | 911 | 2016-02-16T18:40:27.000Z | 2018-08-07T17:50:29.000Z | api/web/encoder.py | scitran/nimsapi | a4203cf6c6d29aa15d33011250ee69ff929fcb0d | [
"MIT"
] | 16 | 2016-02-17T15:54:34.000Z | 2021-04-07T05:30:34.000Z | from pymongo.cursor import Cursor
import bson.objectid
import datetime
import json
import pytz
from ..jobs.jobs import Job
def sse_pack(d):
"""
Format a map with Server-Sent-Event-meaningful keys into a string for transport.
Happily borrowed from: http://taoofmac.com/space/blog/2014/11/16/1940
For reading on web usage: http://www.html5rocks.com/en/tutorials/eventsource/basics
For reading on the format: https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format
"""
buffer_ = ''
for k in ['retry', 'id', 'event', 'data']:
if k in d.keys():
buffer_ += '%s: %s\n' % (k, d[k])
return buffer_ + '\n'
def json_sse_pack(d):
"""
Variant of sse_pack that will json-encode your data blob.
"""
d['data'] = json.dumps(d['data'], default=custom_json_serializer)
return sse_pack(d)
def pseudo_consistent_json_encode(d):
"""
Some parts of our system rely upon consistently-produced JSON encoding.
This implementation is not guaranteed to be consistent, but it's good enough for now.
"""
return json.dumps(d, sort_keys=True, indent=4, separators=(',', ': ')) + '\n'
| 29.87037 | 144 | 0.6708 | from pymongo.cursor import Cursor
import bson.objectid
import datetime
import json
import pytz
from ..jobs.jobs import Job
def custom_json_serializer(obj):
if isinstance(obj, bson.objectid.ObjectId):
return str(obj)
elif isinstance(obj, datetime.datetime):
return pytz.timezone('UTC').localize(obj).isoformat()
elif isinstance(obj, Job):
return obj.map()
elif isinstance(obj, Cursor):
return list(obj)
raise TypeError(repr(obj) + " is not JSON serializable")
def sse_pack(d):
"""
Format a map with Server-Sent-Event-meaningful keys into a string for transport.
Happily borrowed from: http://taoofmac.com/space/blog/2014/11/16/1940
For reading on web usage: http://www.html5rocks.com/en/tutorials/eventsource/basics
For reading on the format: https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format
"""
buffer_ = ''
for k in ['retry', 'id', 'event', 'data']:
if k in d.keys():
buffer_ += '%s: %s\n' % (k, d[k])
return buffer_ + '\n'
def json_sse_pack(d):
"""
Variant of sse_pack that will json-encode your data blob.
"""
d['data'] = json.dumps(d['data'], default=custom_json_serializer)
return sse_pack(d)
def pseudo_consistent_json_encode(d):
"""
Some parts of our system rely upon consistently-produced JSON encoding.
This implementation is not guaranteed to be consistent, but it's good enough for now.
"""
return json.dumps(d, sort_keys=True, indent=4, separators=(',', ': ')) + '\n'
| 366 | 0 | 23 |
ab58f25864314da53acc8ff2833933638094f80d | 7,611 | py | Python | kitsune/wiki/tests/test_notifications.py | jgmize/kitsune | 8f23727a9c7fcdd05afc86886f0134fb08d9a2f0 | [
"BSD-3-Clause"
] | null | null | null | kitsune/wiki/tests/test_notifications.py | jgmize/kitsune | 8f23727a9c7fcdd05afc86886f0134fb08d9a2f0 | [
"BSD-3-Clause"
] | null | null | null | kitsune/wiki/tests/test_notifications.py | jgmize/kitsune | 8f23727a9c7fcdd05afc86886f0134fb08d9a2f0 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from django.core import mail
from nose.tools import eq_
from kitsune.sumo.tests import post
from kitsune.users.tests import add_permission, user
from kitsune.wiki.config import (
SIGNIFICANCES, MEDIUM_SIGNIFICANCE, TYPO_SIGNIFICANCE)
from kitsune.wiki.events import (
ReadyRevisionEvent, ApproveRevisionInLocaleEvent)
from kitsune.wiki.models import Revision
from kitsune.wiki.tests import revision, TestCaseBase
def _set_up_ready_watcher():
"""Make a user who watches for revision readiness."""
ready_watcher = user(email='ready@example.com', save=True)
ReadyRevisionEvent.notify(ready_watcher)
return ready_watcher
class ReviewTests(TestCaseBase):
"""Tests for notifications sent during revision review"""
def setUp(self):
"""Have a user watch for revision approval. Log in."""
self.approved_watcher = user(email='approved@example.com', save=True)
ApproveRevisionInLocaleEvent.notify(self.approved_watcher,
locale='en-US')
approver = user(save=True)
add_permission(approver, Revision, 'review_revision')
add_permission(approver, Revision, 'mark_ready_for_l10n')
self.client.login(username=approver.username, password='testpass')
def _review_revision(self, is_approved=True, is_ready=False,
significance=SIGNIFICANCES[0][0], r=None,
comment=None):
"""Make a revision, and approve or reject it through the view."""
if not r:
r = revision(is_approved=False,
is_ready_for_localization=False,
significance=significance,
save=True)
# Figure out POST data:
data = {'comment': 'đSome comment'}
if is_approved:
data['approve'] = 'Approve Revision'
data['significance'] = significance
if is_ready:
data['is_ready_for_localization'] = 'on'
if comment:
data['comment'] = comment
else:
data['reject'] = 'Reject Revision'
response = post(self.client,
'wiki.review_revision',
data,
args=[r.document.slug, r.id])
eq_(200, response.status_code)
def test_ready(self):
"""Show that a ready(-and-approved) rev mails Ready watchers a Ready
notification and Approved watchers an Approved one."""
_set_up_ready_watcher()
self._review_revision(is_ready=True, significance=MEDIUM_SIGNIFICANCE)
# 1 mail to each watcher, 1 to the creator, and one to the reviewer
eq_(4, len(mail.outbox))
_assert_ready_mail(mail.outbox[0])
_assert_approved_mail(mail.outbox[1])
_assert_creator_mail(mail.outbox[2])
def test_approved(self):
"""Show that an approved rev mails Ready watchers nothing and Approved
watchers an Approved notification."""
_set_up_ready_watcher()
self._review_revision(is_ready=False)
# 1 mail to Approved watcher, 1 to creator, 1 for reviewer
eq_(3, len(mail.outbox))
assert 'new approved revision' in mail.outbox[0].subject
assert 'Your revision has been approved' in mail.outbox[1].subject
def test_neither(self):
"""Show that neither an Approved nor a Ready mail is sent if a rev is
rejected."""
_set_up_ready_watcher()
self._review_revision(is_approved=False)
eq_(2, len(mail.outbox)) # 1 mail to creator, one to the reviewer.
assert mail.outbox[0].subject.startswith(
'Your revision has been reviewed')
def test_user_watching_both(self):
"""If a single person is watching ready and approved revisions and a
revision becomes ready, send only the readiness email, not the approval
one."""
# Have the Approved watcher watch Ready as well:
ReadyRevisionEvent.notify(self.approved_watcher)
self._review_revision(is_ready=True, significance=MEDIUM_SIGNIFICANCE)
# 1 mail to watcher, 1 to creator, 1 to reviewer
eq_(3, len(mail.outbox))
_assert_ready_mail(mail.outbox[0])
_assert_creator_mail(mail.outbox[1])
def test_new_lines_in_review_message(self):
"""Test that newlines in a review message are properly displayed."""
_set_up_ready_watcher()
self._review_revision(comment='foo\n\nbar\nbaz')
assert 'foo<br><br>bar<br>baz' in mail.outbox[1].alternatives[0][0]
class ReadyForL10nTests(TestCaseBase):
"""Tests for notifications sent during ready for l10n"""
def setUp(self):
"""Have a user watch for revision approval. Log in."""
self.ready_watcher = user(email='approved@example.com', save=True)
ReadyRevisionEvent.notify(self.ready_watcher)
readyer = user(save=True)
add_permission(readyer, Revision, 'mark_ready_for_l10n')
self.client.login(username=readyer.username, password='testpass')
def _mark_as_ready_revision(self):
"""Make a revision, and approve or reject it through the view."""
r = revision(is_approved=True,
is_ready_for_localization=False,
significance=MEDIUM_SIGNIFICANCE,
save=True)
# Figure out POST data:
data = {'comment': 'something'}
response = post(self.client,
'wiki.mark_ready_for_l10n_revision',
data,
args=[r.document.slug, r.id])
eq_(200, response.status_code)
def test_ready(self):
"""Show that a ready(-and-approved) rev mails Ready watchers a Ready
notification and Approved watchers an Approved one."""
_set_up_ready_watcher()
self._mark_as_ready_revision()
eq_(2, len(mail.outbox)) # 1 mail to each watcher, none to marker
_assert_ready_mail(mail.outbox[0])
_assert_ready_mail(mail.outbox[1])
| 39.848168 | 79 | 0.637893 | # -*- coding: utf-8 -*-
from django.core import mail
from nose.tools import eq_
from kitsune.sumo.tests import post
from kitsune.users.tests import add_permission, user
from kitsune.wiki.config import (
SIGNIFICANCES, MEDIUM_SIGNIFICANCE, TYPO_SIGNIFICANCE)
from kitsune.wiki.events import (
ReadyRevisionEvent, ApproveRevisionInLocaleEvent)
from kitsune.wiki.models import Revision
from kitsune.wiki.tests import revision, TestCaseBase
def _assert_ready_mail(mail):
assert 'ready for localization' in mail.subject
def _assert_approved_mail(mail):
assert 'new approved revision' in mail.subject
def _assert_creator_mail(mail):
assert mail.subject.startswith('Your revision has been approved')
def _set_up_ready_watcher():
"""Make a user who watches for revision readiness."""
ready_watcher = user(email='ready@example.com', save=True)
ReadyRevisionEvent.notify(ready_watcher)
return ready_watcher
class ReviewTests(TestCaseBase):
"""Tests for notifications sent during revision review"""
def setUp(self):
"""Have a user watch for revision approval. Log in."""
self.approved_watcher = user(email='approved@example.com', save=True)
ApproveRevisionInLocaleEvent.notify(self.approved_watcher,
locale='en-US')
approver = user(save=True)
add_permission(approver, Revision, 'review_revision')
add_permission(approver, Revision, 'mark_ready_for_l10n')
self.client.login(username=approver.username, password='testpass')
def _review_revision(self, is_approved=True, is_ready=False,
significance=SIGNIFICANCES[0][0], r=None,
comment=None):
"""Make a revision, and approve or reject it through the view."""
if not r:
r = revision(is_approved=False,
is_ready_for_localization=False,
significance=significance,
save=True)
# Figure out POST data:
data = {'comment': 'đSome comment'}
if is_approved:
data['approve'] = 'Approve Revision'
data['significance'] = significance
if is_ready:
data['is_ready_for_localization'] = 'on'
if comment:
data['comment'] = comment
else:
data['reject'] = 'Reject Revision'
response = post(self.client,
'wiki.review_revision',
data,
args=[r.document.slug, r.id])
eq_(200, response.status_code)
def test_ready(self):
"""Show that a ready(-and-approved) rev mails Ready watchers a Ready
notification and Approved watchers an Approved one."""
_set_up_ready_watcher()
self._review_revision(is_ready=True, significance=MEDIUM_SIGNIFICANCE)
# 1 mail to each watcher, 1 to the creator, and one to the reviewer
eq_(4, len(mail.outbox))
_assert_ready_mail(mail.outbox[0])
_assert_approved_mail(mail.outbox[1])
_assert_creator_mail(mail.outbox[2])
def test_typo_significance_ignore(self):
_set_up_ready_watcher()
self._review_revision(is_ready=True, significance=TYPO_SIGNIFICANCE)
# This is the same as test_ready, except we miss 1 mail, that is the
# localization mail.
eq_(3, len(mail.outbox))
def test_approved(self):
"""Show that an approved rev mails Ready watchers nothing and Approved
watchers an Approved notification."""
_set_up_ready_watcher()
self._review_revision(is_ready=False)
# 1 mail to Approved watcher, 1 to creator, 1 for reviewer
eq_(3, len(mail.outbox))
assert 'new approved revision' in mail.outbox[0].subject
assert 'Your revision has been approved' in mail.outbox[1].subject
def test_based_on_approved(self):
u1 = user()
u1.save()
r1 = revision(is_approved=False,
creator=u1,
is_ready_for_localization=False,
save=True)
u2 = user()
u2.save()
r2 = revision(document=r1.document, based_on=r1, is_approved=False,
creator=u2,
is_ready_for_localization=False,
save=True)
eq_(0, len(mail.outbox))
self._review_revision(r=r2)
# 1 mail for each watcher, 1 for creator, and one for reviewer.
eq_(4, len(mail.outbox))
assert 'has a new approved revision' in mail.outbox[0].subject
assert 'Your revision has been approved' in mail.outbox[1].subject
assert 'Your revision has been approved' in mail.outbox[2].subject
assert 'A revision you contributed to has' in mail.outbox[3].subject
def test_neither(self):
"""Show that neither an Approved nor a Ready mail is sent if a rev is
rejected."""
_set_up_ready_watcher()
self._review_revision(is_approved=False)
eq_(2, len(mail.outbox)) # 1 mail to creator, one to the reviewer.
assert mail.outbox[0].subject.startswith(
'Your revision has been reviewed')
def test_user_watching_both(self):
"""If a single person is watching ready and approved revisions and a
revision becomes ready, send only the readiness email, not the approval
one."""
# Have the Approved watcher watch Ready as well:
ReadyRevisionEvent.notify(self.approved_watcher)
self._review_revision(is_ready=True, significance=MEDIUM_SIGNIFICANCE)
# 1 mail to watcher, 1 to creator, 1 to reviewer
eq_(3, len(mail.outbox))
_assert_ready_mail(mail.outbox[0])
_assert_creator_mail(mail.outbox[1])
def test_new_lines_in_review_message(self):
"""Test that newlines in a review message are properly displayed."""
_set_up_ready_watcher()
self._review_revision(comment='foo\n\nbar\nbaz')
assert 'foo<br><br>bar<br>baz' in mail.outbox[1].alternatives[0][0]
class ReadyForL10nTests(TestCaseBase):
"""Tests for notifications sent during ready for l10n"""
def setUp(self):
"""Have a user watch for revision approval. Log in."""
self.ready_watcher = user(email='approved@example.com', save=True)
ReadyRevisionEvent.notify(self.ready_watcher)
readyer = user(save=True)
add_permission(readyer, Revision, 'mark_ready_for_l10n')
self.client.login(username=readyer.username, password='testpass')
def _mark_as_ready_revision(self):
"""Make a revision, and approve or reject it through the view."""
r = revision(is_approved=True,
is_ready_for_localization=False,
significance=MEDIUM_SIGNIFICANCE,
save=True)
# Figure out POST data:
data = {'comment': 'something'}
response = post(self.client,
'wiki.mark_ready_for_l10n_revision',
data,
args=[r.document.slug, r.id])
eq_(200, response.status_code)
def test_ready(self):
"""Show that a ready(-and-approved) rev mails Ready watchers a Ready
notification and Approved watchers an Approved one."""
_set_up_ready_watcher()
self._mark_as_ready_revision()
eq_(2, len(mail.outbox)) # 1 mail to each watcher, none to marker
_assert_ready_mail(mail.outbox[0])
_assert_ready_mail(mail.outbox[1])
| 1,390 | 0 | 123 |
ba82c7d03047efa772b1660459c2a454bd211965 | 2,567 | py | Python | signal.py | edart76/tree | 743d14a819c057c50bd4f84ed0712768cb4ba582 | [
"Apache-2.0"
] | null | null | null | signal.py | edart76/tree | 743d14a819c057c50bd4f84ed0712768cb4ba582 | [
"Apache-2.0"
] | null | null | null | signal.py | edart76/tree | 743d14a819c057c50bd4f84ed0712768cb4ba582 | [
"Apache-2.0"
] | null | null | null |
from sys import version_info
import inspect
from weakref import WeakSet, WeakKeyDictionary
from collections import deque
from functools import partial
class Signal(object):
""" basic signal emitter
fired signals are added to this object's calling frame -
if this becomes excessive, this
also includes mode to add function calls to queue
instead of directly firing connnected functions
queue support not complete yet, as nothing I use needs it.
"""
queues = {"default" : deque()}
def __init__(self, queue="", useQueue=False):
""":param queue : name of queue to use, or external queue object """
self._functions = WeakSet()
self._methods = WeakKeyDictionary()
# is signal active
self._active = True
# event queue support
self._useQueue = useQueue
self._queue = queue or "default"
def getQueue(self, name="default", create=True):
"""return one of the event queues attended by signal objects"""
name = name or self._queue or "default"
if not name in self.queues and create:
self.queues[name] = deque()
return self.queues[name]
def setQueue(self, queueName):
""" set signal to use given queue """
self._queue = queueName
def emit(self, *args, **kwargs):
""" brings this object up to rough parity with qt signals """
self(*args, **kwargs) | 24.216981 | 70 | 0.696143 |
from sys import version_info
import inspect
from weakref import WeakSet, WeakKeyDictionary
from collections import deque
from functools import partial
class Signal(object):
""" basic signal emitter
fired signals are added to this object's calling frame -
if this becomes excessive, this
also includes mode to add function calls to queue
instead of directly firing connnected functions
queue support not complete yet, as nothing I use needs it.
"""
queues = {"default" : deque()}
def __init__(self, queue="", useQueue=False):
""":param queue : name of queue to use, or external queue object """
self._functions = WeakSet()
self._methods = WeakKeyDictionary()
# is signal active
self._active = True
# event queue support
self._useQueue = useQueue
self._queue = queue or "default"
def __call__(self, *args, **kwargs):
if not self._active:
return
queue = self.getQueue()
# Call handler functions
for func in self._functions:
if self._useQueue:
queue.append(partial(func, *args, **kwargs))
else:
func(*args, **kwargs)
# Call handler methods
for obj, funcs in self._methods.items():
for func in funcs:
if self._useQueue:
queue.append(partial(func, obj, *args, **kwargs))
else:
func(obj, *args, **kwargs)
def activate(self):
self._active = True
def mute(self):
self._active = False
def getQueue(self, name="default", create=True):
"""return one of the event queues attended by signal objects"""
name = name or self._queue or "default"
if not name in self.queues and create:
self.queues[name] = deque()
return self.queues[name]
def setQueue(self, queueName):
""" set signal to use given queue """
self._queue = queueName
def emit(self, *args, **kwargs):
""" brings this object up to rough parity with qt signals """
self(*args, **kwargs)
def connect(self, slot):
if inspect.ismethod(slot):
try:
hash(slot.__self__)
if slot.__self__ not in self._methods:
self._methods[slot.__self__] = set()
self._methods[slot.__self__].add(slot.__func__)
except TypeError:
self._functions.add(slot)
pass
else:
self._functions.add(slot)
def disconnect(self, slot):
if inspect.ismethod(slot):
try:
hash(slot.__self__)
if slot.__self__ in self._methods:
self._methods[slot.__self__].remove(slot.__func__)
except TypeError:
self._functions.remove(slot)
pass
else:
if slot in self._functions:
self._functions.remove(slot)
def clear(self):
self._functions.clear()
self._methods.clear() | 1,130 | 0 | 143 |
2b448f9d2c5f053ad9ec07d32972e64200a1cbe4 | 2,344 | py | Python | gemini/classes/Order.py | amishmenon1/gemini-trading-bot | 1fd368f95e0c159f4b5d85509aa028a236cdbe34 | [
"MIT"
] | null | null | null | gemini/classes/Order.py | amishmenon1/gemini-trading-bot | 1fd368f95e0c159f4b5d85509aa028a236cdbe34 | [
"MIT"
] | null | null | null | gemini/classes/Order.py | amishmenon1/gemini-trading-bot | 1fd368f95e0c159f4b5d85509aa028a236cdbe34 | [
"MIT"
] | null | null | null | from json import JSONEncoder
import json
| 41.122807 | 115 | 0.69198 | from json import JSONEncoder
import json
class Order:
order_id = None
id = None
symbol = None
exchange = None
avg_execution_price = None
side = None
tyoe = None
timestamp = None
is_live = None
is_cancelled = None
is_hidden = None
was_forced = None
executed_amount = None
options = None
price = None
original_amount = None
remaining_amount = None
timestampms = None
def __init__(self, api_order):
self.order_id = api_order['order_id'] if 'order_id' in api_order else None
self.id = api_order['id'] if 'id' in api_order else None
self.symbol = api_order['symbol'] if 'symbol' in api_order else None
self.exchange = api_order['exchange'] if 'exchange' in api_order else None
self.avg_execution_price = api_order['avg_execution_price'] if 'avg_execution_price' in api_order else None
self.side = api_order['side'] if 'side' in api_order else None
self.tyoe = api_order['type'] if 'tyoe' in api_order else None
self.timestamp = api_order['timestamp'] if 'timestamp' in api_order else None
self.timestampms = api_order['timestampms'] if 'timestampms' in api_order else None
self.is_live = api_order['is_live'] if 'is_live' in api_order else None
self.is_cancelled = api_order['is_cancelled'] if 'is_cancelled' in api_order else None
self.is_hidden = api_order['is_hidden'] if 'is_hidden' in api_order else None
self.was_forced = api_order['was_forced'] if 'was_forced' in api_order else None
self.executed_amount = api_order['executed_amount'] if 'executed_amount' in api_order else None
self.options = api_order['options'] if 'options' in api_order else None
self.price = api_order['price'] if 'price' in api_order else None
self.original_amount = api_order['original_amount'] if 'original_amount' in api_order else None
self.remaining_amount = api_order['remaining_amount'] if 'remaining_amount' in api_order else None
def to_string(self):
return OrderEncoder().encode(self)
class OrderEncoder(JSONEncoder):
def default(self, order_object):
if isinstance(order_object, Order):
return order_object.__dict__
else:
return json.JSONEncoder.default(self, order_object)
| 1,789 | 439 | 73 |
d1041f84e0b284f1c6cf9ef99386ef0b0763ac36 | 1,427 | py | Python | demo.py | luther38/XivDbWeb | 204d066232c04dce0ea5a03ec55f160cfbc62659 | [
"MIT"
] | null | null | null | demo.py | luther38/XivDbWeb | 204d066232c04dce0ea5a03ec55f160cfbc62659 | [
"MIT"
] | null | null | null | demo.py | luther38/XivDbWeb | 204d066232c04dce0ea5a03ec55f160cfbc62659 | [
"MIT"
] | null | null | null |
from xivdb.sql import DB, Base, Weapon, Repair, Materia, Stats
from xivdb.importCsv import importCsv
from typing import List
from sqlalchemy.orm import sessionmaker
from XivDbReader import Reader
import sqlalchemy.orm
d = DB(Base)
session: sessionmaker = d.newSession()
w = d.newWeapon()
read: Reader = Reader(job='whm')
whm = read.getArms(recordLimit=1)
for i in whm:
try:
res: Weapon = session.query(Weapon).filter(Weapon.name == i.name).one()
except Exception as e:
#print(f"{i.name} was not found in the DB.")
ic = importCsv()
counter: int = 1
weapons: List[Weapon] = ic.getAllWeapons()
stats: List[Stats] = ic.getAllStats()
repairs: List[Repair] = ic.getAllRepairs()
materias: List[Materia] = ic.getAllMateria()
counter: int = 0
for i in weapons:
try:
res: Weapon = session.query(Weapon).filter(Weapon.name == i.name).one()
counter = counter + 1
if res.name != None:
print(f"Skiped - {i.name}")
continue
except:
w: Weapon = i
s: Stats = stats[counter]
r: Repair = repairs[counter]
m: Materia = materias[counter]
w.stats = s
w.repair = r
w.materia = m
session.add(w)
counter = counter + 1
try:
session.commit()
print(f"Added - {w.name}")
except Exception as e:
print(e)
session.close()
| 24.186441 | 79 | 0.602663 |
from xivdb.sql import DB, Base, Weapon, Repair, Materia, Stats
from xivdb.importCsv import importCsv
from typing import List
from sqlalchemy.orm import sessionmaker
from XivDbReader import Reader
import sqlalchemy.orm
d = DB(Base)
session: sessionmaker = d.newSession()
w = d.newWeapon()
read: Reader = Reader(job='whm')
whm = read.getArms(recordLimit=1)
for i in whm:
try:
res: Weapon = session.query(Weapon).filter(Weapon.name == i.name).one()
except Exception as e:
#print(f"{i.name} was not found in the DB.")
ic = importCsv()
counter: int = 1
weapons: List[Weapon] = ic.getAllWeapons()
stats: List[Stats] = ic.getAllStats()
repairs: List[Repair] = ic.getAllRepairs()
materias: List[Materia] = ic.getAllMateria()
counter: int = 0
for i in weapons:
try:
res: Weapon = session.query(Weapon).filter(Weapon.name == i.name).one()
counter = counter + 1
if res.name != None:
print(f"Skiped - {i.name}")
continue
except:
w: Weapon = i
s: Stats = stats[counter]
r: Repair = repairs[counter]
m: Materia = materias[counter]
w.stats = s
w.repair = r
w.materia = m
session.add(w)
counter = counter + 1
try:
session.commit()
print(f"Added - {w.name}")
except Exception as e:
print(e)
session.close()
| 0 | 0 | 0 |
f8dd9101a0c14362d495693796971d912eed2201 | 10,854 | py | Python | causallearn/search/FCMBased/lingam/multi_group_direct_lingam.py | softsys4ai/causal-config-labyrinth | 4f50f9ff15429b0ac6ad0a99fbe4cfdd17e360fc | [
"MIT"
] | 15 | 2022-01-20T12:35:35.000Z | 2022-03-24T16:25:24.000Z | causallearn/search/FCMBased/lingam/multi_group_direct_lingam.py | softsys4ai/unicorn | 4f50f9ff15429b0ac6ad0a99fbe4cfdd17e360fc | [
"MIT"
] | 14 | 2022-01-23T00:20:00.000Z | 2022-02-22T01:40:43.000Z | causallearn/search/FCMBased/lingam/multi_group_direct_lingam.py | softsys4ai/causal-config-labyrinth | 4f50f9ff15429b0ac6ad0a99fbe4cfdd17e360fc | [
"MIT"
] | 1 | 2022-02-23T08:59:24.000Z | 2022-02-23T08:59:24.000Z | """
Python implementation of the LiNGAM algorithms.
The LiNGAM Project: https://sites.google.com/site/sshimizu06/lingam
"""
import itertools
import numbers
import warnings
import numpy as np
from sklearn.utils import check_array, resample
from .bootstrap import BootstrapResult
from .direct_lingam import DirectLiNGAM
from .hsic import hsic_test_gamma
from .utils import predict_adaptive_lasso
class MultiGroupDirectLiNGAM(DirectLiNGAM):
"""Implementation of DirectLiNGAM Algorithm with multiple groups [1]_
References
----------
.. [1] S. Shimizu. Joint estimation of linear non-Gaussian acyclic models. Neurocomputing, 81: 104-107, 2012.
"""
def __init__(self, random_state=None, prior_knowledge=None, apply_prior_knowledge_softly=False):
"""Construct a model.
Parameters
----------
random_state : int, optional (default=None)
``random_state`` is the seed used by the random number generator.
prior_knowledge : array-like, shape (n_features, n_features), optional (default=None)
Prior background_knowledge used for causal discovery, where ``n_features`` is the number of features.
The elements of prior background_knowledge matrix are defined as follows [1]_:
* ``0`` : :math:`x_i` does not have a directed path to :math:`x_j`
* ``1`` : :math:`x_i` has a directed path to :math:`x_j`
* ``-1`` : No prior background_knowledge is available to know if either of the two cases above (0 or 1) is true.
apply_prior_knowledge_softly : boolean, optional (default=False)
If True, apply prior background_knowledge softly.
"""
super().__init__(random_state, prior_knowledge, apply_prior_knowledge_softly)
def fit(self, X_list):
"""Fit the model to multiple datasets.
Parameters
----------
X_list : list, shape [X, ...]
Multiple datasets for training, where ``X`` is an dataset.
The shape of ''X'' is (n_samples, n_features),
where ``n_samples`` is the number of samples and ``n_features`` is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
# Check parameters
X_list = self._check_X_list(X_list)
if self._Aknw is not None:
if (self._n_features, self._n_features) != self._Aknw.shape:
raise ValueError(
'The shape of prior background_knowledge must be (n_features, n_features)')
# Causal discovery
U = np.arange(self._n_features)
K = []
X_list_ = [np.copy(X) for X in X_list]
for _ in range(self._n_features):
m = self._search_causal_order(X_list_, U)
for i in U:
if i != m:
for d in range(len(X_list_)):
X_list_[d][:, i] = self._residual(
X_list_[d][:, i], X_list_[d][:, m])
K.append(m)
U = U[U != m]
if (self._Aknw is not None) and (not self._apply_prior_knowledge_softly):
self._partial_orders = self._partial_orders[self._partial_orders[:, 0] != m]
self._causal_order = K
self._adjacency_matrices = []
for X in X_list:
self._estimate_adjacency_matrix(X, prior_knowledge=self._Aknw)
self._adjacency_matrices.append(self._adjacency_matrix)
return self
def bootstrap(self, X_list, n_sampling):
"""Evaluate the statistical reliability of DAG based on the bootstrapping.
Parameters
----------
X_list : array-like, shape (X, ...)
Multiple datasets for training, where ``X`` is an dataset.
The shape of ''X'' is (n_samples, n_features),
where ``n_samples`` is the number of samples and ``n_features`` is the number of features.
n_sampling : int
Number of bootstrapping samples.
Returns
-------
results : array-like, shape (BootstrapResult, ...)
Returns the results of bootstrapping for multiple datasets.
"""
# Check parameters
X_list = self._check_X_list(X_list)
if isinstance(n_sampling, (numbers.Integral, np.integer)):
if not 0 < n_sampling:
raise ValueError(
'n_sampling must be an integer greater than 0.')
else:
raise ValueError('n_sampling must be an integer greater than 0.')
# Bootstrapping
adjacency_matrices_list = np.zeros(
[len(X_list), n_sampling, self._n_features, self._n_features])
total_effects_list = np.zeros(
[len(X_list), n_sampling, self._n_features, self._n_features])
for n in range(n_sampling):
resampled_X_list = [resample(X) for X in X_list]
self.fit(resampled_X_list)
for i, am in enumerate(self._adjacency_matrices):
adjacency_matrices_list[i][n] = am
# Calculate total effects
for c, from_ in enumerate(self._causal_order):
for to in self._causal_order[c + 1:]:
effects = self.estimate_total_effect(
resampled_X_list, from_, to)
for i, effect in enumerate(effects):
total_effects_list[i, n, to, from_] = effect
result_list = []
for am, te in zip(adjacency_matrices_list, total_effects_list):
result_list.append(BootstrapResult(am, te))
return result_list
def estimate_total_effect(self, X_list, from_index, to_index):
"""Estimate total effect using causal model.
Parameters
----------
X_list : array-like, shape (X, ...)
Multiple datasets for training, where ``X`` is an dataset.
The shape of ''X'' is (n_samples, n_features),
where ``n_samples`` is the number of samples and ``n_features`` is the number of features.
from_index :
Index of source variable to estimate total effect.
to_index :
Index of destination variable to estimate total effect.
Returns
-------
total_effect : float
Estimated total effect.
"""
# Check parameters
X_list = self._check_X_list(X_list)
# Check from/to causal order
from_order = self._causal_order.index(from_index)
to_order = self._causal_order.index(to_index)
if from_order > to_order:
warnings.warn(f'The estimated causal effect may be incorrect because '
f'the causal order of the destination variable (to_index={to_index}) '
f'is earlier than the source variable (from_index={from_index}).')
effects = []
for X, am in zip(X_list, self._adjacency_matrices):
# from_index + parents indices
parents = np.where(np.abs(am[from_index]) > 0)[0]
predictors = [from_index]
predictors.extend(parents)
# Estimate total effect
coefs = predict_adaptive_lasso(X, predictors, to_index)
effects.append(coefs[0])
return effects
def get_error_independence_p_values(self, X_list):
"""Calculate the p-value matrix of independence between error variables.
Parameters
----------
X_list : array-like, shape (X, ...)
Multiple datasets for training, where ``X`` is an dataset.
The shape of ''X'' is (n_samples, n_features),
where ``n_samples`` is the number of samples and ``n_features`` is the number of features.
Returns
-------
independence_p_values : array-like, shape (n_datasets, n_features, n_features)
p-value matrix of independence between error variables.
"""
# Check parameters
X_list = self._check_X_list(X_list)
p_values = np.zeros([len(X_list), self._n_features, self._n_features])
for d, (X, am) in enumerate(zip(X_list, self._adjacency_matrices)):
n_samples = X.shape[0]
E = X - np.dot(am, X.T).T
for i, j in itertools.combinations(range(self._n_features), 2):
_, p_value = hsic_test_gamma(np.reshape(E[:, i], [n_samples, 1]),
np.reshape(E[:, j], [n_samples, 1]))
p_values[d, i, j] = p_value
p_values[d, j, i] = p_value
return p_values
def _check_X_list(self, X_list):
"""Check input X list."""
if not isinstance(X_list, list):
raise ValueError('X_list must be a list.')
if len(X_list) < 2:
raise ValueError(
'X_list must be a list containing at least two items')
self._n_features = check_array(X_list[0]).shape[1]
X_list_ = []
for X in X_list:
X_ = check_array(X)
if X_.shape[1] != self._n_features:
raise ValueError(
'X_list must be a list with the same number of features')
X_list_.append(X_)
return np.array(X_list_)
def _search_causal_order(self, X_list, U):
"""Search the causal ordering."""
Uc, Vj = self._search_candidate(U)
if len(Uc) == 1:
return Uc[0]
total_size = 0
for X in X_list:
total_size += len(X)
MG_list = []
for i in Uc:
MG = 0
for X in X_list:
M = 0
for j in U:
if i != j:
xi_std = (X[:, i] - np.mean(X[:, i])) / np.std(X[:, i])
xj_std = (X[:, j] - np.mean(X[:, j])) / np.std(X[:, j])
ri_j = xi_std if i in Vj and j in Uc else self._residual(
xi_std, xj_std)
rj_i = xj_std if j in Vj and i in Uc else self._residual(
xj_std, xi_std)
M += np.min([0, self._diff_mutual_info(xi_std,
xj_std, ri_j, rj_i)]) ** 2
MG += M * (len(X) / total_size)
MG_list.append(-1.0 * MG)
return Uc[np.argmax(MG_list)]
@property
def adjacency_matrices_(self):
"""Estimated adjacency matrices.
Returns
-------
adjacency_matrices_ : array-like, shape (B, ...)
The list of adjacency matrix B for multiple datasets.
The shape of B is (n_features, n_features), where
n_features is the number of features.
"""
return self._adjacency_matrices
| 38.626335 | 124 | 0.570849 | """
Python implementation of the LiNGAM algorithms.
The LiNGAM Project: https://sites.google.com/site/sshimizu06/lingam
"""
import itertools
import numbers
import warnings
import numpy as np
from sklearn.utils import check_array, resample
from .bootstrap import BootstrapResult
from .direct_lingam import DirectLiNGAM
from .hsic import hsic_test_gamma
from .utils import predict_adaptive_lasso
class MultiGroupDirectLiNGAM(DirectLiNGAM):
"""Implementation of DirectLiNGAM Algorithm with multiple groups [1]_
References
----------
.. [1] S. Shimizu. Joint estimation of linear non-Gaussian acyclic models. Neurocomputing, 81: 104-107, 2012.
"""
def __init__(self, random_state=None, prior_knowledge=None, apply_prior_knowledge_softly=False):
"""Construct a model.
Parameters
----------
random_state : int, optional (default=None)
``random_state`` is the seed used by the random number generator.
prior_knowledge : array-like, shape (n_features, n_features), optional (default=None)
Prior background_knowledge used for causal discovery, where ``n_features`` is the number of features.
The elements of prior background_knowledge matrix are defined as follows [1]_:
* ``0`` : :math:`x_i` does not have a directed path to :math:`x_j`
* ``1`` : :math:`x_i` has a directed path to :math:`x_j`
* ``-1`` : No prior background_knowledge is available to know if either of the two cases above (0 or 1) is true.
apply_prior_knowledge_softly : boolean, optional (default=False)
If True, apply prior background_knowledge softly.
"""
super().__init__(random_state, prior_knowledge, apply_prior_knowledge_softly)
def fit(self, X_list):
"""Fit the model to multiple datasets.
Parameters
----------
X_list : list, shape [X, ...]
Multiple datasets for training, where ``X`` is an dataset.
The shape of ''X'' is (n_samples, n_features),
where ``n_samples`` is the number of samples and ``n_features`` is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
# Check parameters
X_list = self._check_X_list(X_list)
if self._Aknw is not None:
if (self._n_features, self._n_features) != self._Aknw.shape:
raise ValueError(
'The shape of prior background_knowledge must be (n_features, n_features)')
# Causal discovery
U = np.arange(self._n_features)
K = []
X_list_ = [np.copy(X) for X in X_list]
for _ in range(self._n_features):
m = self._search_causal_order(X_list_, U)
for i in U:
if i != m:
for d in range(len(X_list_)):
X_list_[d][:, i] = self._residual(
X_list_[d][:, i], X_list_[d][:, m])
K.append(m)
U = U[U != m]
if (self._Aknw is not None) and (not self._apply_prior_knowledge_softly):
self._partial_orders = self._partial_orders[self._partial_orders[:, 0] != m]
self._causal_order = K
self._adjacency_matrices = []
for X in X_list:
self._estimate_adjacency_matrix(X, prior_knowledge=self._Aknw)
self._adjacency_matrices.append(self._adjacency_matrix)
return self
def bootstrap(self, X_list, n_sampling):
"""Evaluate the statistical reliability of DAG based on the bootstrapping.
Parameters
----------
X_list : array-like, shape (X, ...)
Multiple datasets for training, where ``X`` is an dataset.
The shape of ''X'' is (n_samples, n_features),
where ``n_samples`` is the number of samples and ``n_features`` is the number of features.
n_sampling : int
Number of bootstrapping samples.
Returns
-------
results : array-like, shape (BootstrapResult, ...)
Returns the results of bootstrapping for multiple datasets.
"""
# Check parameters
X_list = self._check_X_list(X_list)
if isinstance(n_sampling, (numbers.Integral, np.integer)):
if not 0 < n_sampling:
raise ValueError(
'n_sampling must be an integer greater than 0.')
else:
raise ValueError('n_sampling must be an integer greater than 0.')
# Bootstrapping
adjacency_matrices_list = np.zeros(
[len(X_list), n_sampling, self._n_features, self._n_features])
total_effects_list = np.zeros(
[len(X_list), n_sampling, self._n_features, self._n_features])
for n in range(n_sampling):
resampled_X_list = [resample(X) for X in X_list]
self.fit(resampled_X_list)
for i, am in enumerate(self._adjacency_matrices):
adjacency_matrices_list[i][n] = am
# Calculate total effects
for c, from_ in enumerate(self._causal_order):
for to in self._causal_order[c + 1:]:
effects = self.estimate_total_effect(
resampled_X_list, from_, to)
for i, effect in enumerate(effects):
total_effects_list[i, n, to, from_] = effect
result_list = []
for am, te in zip(adjacency_matrices_list, total_effects_list):
result_list.append(BootstrapResult(am, te))
return result_list
def estimate_total_effect(self, X_list, from_index, to_index):
"""Estimate total effect using causal model.
Parameters
----------
X_list : array-like, shape (X, ...)
Multiple datasets for training, where ``X`` is an dataset.
The shape of ''X'' is (n_samples, n_features),
where ``n_samples`` is the number of samples and ``n_features`` is the number of features.
from_index :
Index of source variable to estimate total effect.
to_index :
Index of destination variable to estimate total effect.
Returns
-------
total_effect : float
Estimated total effect.
"""
# Check parameters
X_list = self._check_X_list(X_list)
# Check from/to causal order
from_order = self._causal_order.index(from_index)
to_order = self._causal_order.index(to_index)
if from_order > to_order:
warnings.warn(f'The estimated causal effect may be incorrect because '
f'the causal order of the destination variable (to_index={to_index}) '
f'is earlier than the source variable (from_index={from_index}).')
effects = []
for X, am in zip(X_list, self._adjacency_matrices):
# from_index + parents indices
parents = np.where(np.abs(am[from_index]) > 0)[0]
predictors = [from_index]
predictors.extend(parents)
# Estimate total effect
coefs = predict_adaptive_lasso(X, predictors, to_index)
effects.append(coefs[0])
return effects
def get_error_independence_p_values(self, X_list):
"""Calculate the p-value matrix of independence between error variables.
Parameters
----------
X_list : array-like, shape (X, ...)
Multiple datasets for training, where ``X`` is an dataset.
The shape of ''X'' is (n_samples, n_features),
where ``n_samples`` is the number of samples and ``n_features`` is the number of features.
Returns
-------
independence_p_values : array-like, shape (n_datasets, n_features, n_features)
p-value matrix of independence between error variables.
"""
# Check parameters
X_list = self._check_X_list(X_list)
p_values = np.zeros([len(X_list), self._n_features, self._n_features])
for d, (X, am) in enumerate(zip(X_list, self._adjacency_matrices)):
n_samples = X.shape[0]
E = X - np.dot(am, X.T).T
for i, j in itertools.combinations(range(self._n_features), 2):
_, p_value = hsic_test_gamma(np.reshape(E[:, i], [n_samples, 1]),
np.reshape(E[:, j], [n_samples, 1]))
p_values[d, i, j] = p_value
p_values[d, j, i] = p_value
return p_values
def _check_X_list(self, X_list):
"""Check input X list."""
if not isinstance(X_list, list):
raise ValueError('X_list must be a list.')
if len(X_list) < 2:
raise ValueError(
'X_list must be a list containing at least two items')
self._n_features = check_array(X_list[0]).shape[1]
X_list_ = []
for X in X_list:
X_ = check_array(X)
if X_.shape[1] != self._n_features:
raise ValueError(
'X_list must be a list with the same number of features')
X_list_.append(X_)
return np.array(X_list_)
def _search_causal_order(self, X_list, U):
"""Search the causal ordering."""
Uc, Vj = self._search_candidate(U)
if len(Uc) == 1:
return Uc[0]
total_size = 0
for X in X_list:
total_size += len(X)
MG_list = []
for i in Uc:
MG = 0
for X in X_list:
M = 0
for j in U:
if i != j:
xi_std = (X[:, i] - np.mean(X[:, i])) / np.std(X[:, i])
xj_std = (X[:, j] - np.mean(X[:, j])) / np.std(X[:, j])
ri_j = xi_std if i in Vj and j in Uc else self._residual(
xi_std, xj_std)
rj_i = xj_std if j in Vj and i in Uc else self._residual(
xj_std, xi_std)
M += np.min([0, self._diff_mutual_info(xi_std,
xj_std, ri_j, rj_i)]) ** 2
MG += M * (len(X) / total_size)
MG_list.append(-1.0 * MG)
return Uc[np.argmax(MG_list)]
@property
def adjacency_matrices_(self):
"""Estimated adjacency matrices.
Returns
-------
adjacency_matrices_ : array-like, shape (B, ...)
The list of adjacency matrix B for multiple datasets.
The shape of B is (n_features, n_features), where
n_features is the number of features.
"""
return self._adjacency_matrices
| 0 | 0 | 0 |
af4aef31b29eb7fac373d7af381d6a06f930385f | 83 | py | Python | ibkr/apps.py | westonplatter/pnl_reporting | 8cf58cf16977bc5cb0109f047f0681e10e09abbc | [
"BSD-3-Clause"
] | 12 | 2020-10-16T14:36:53.000Z | 2022-01-29T10:11:19.000Z | ibkr/apps.py | westonplatter/pnl_reporting | 8cf58cf16977bc5cb0109f047f0681e10e09abbc | [
"BSD-3-Clause"
] | 9 | 2021-01-21T02:38:52.000Z | 2022-01-08T22:42:04.000Z | ibkr/apps.py | westonplatter/pnl_reporting | 8cf58cf16977bc5cb0109f047f0681e10e09abbc | [
"BSD-3-Clause"
] | 4 | 2020-11-30T13:00:22.000Z | 2021-11-30T09:33:02.000Z | from django.apps import AppConfig
| 13.833333 | 33 | 0.73494 | from django.apps import AppConfig
class IbkrConfig(AppConfig):
name = "ibkr"
| 0 | 25 | 23 |
7e5ca3f9e7f33bdcb4fe3780578db3eb48b9c9dc | 11,476 | py | Python | bravado/client.py | analogue/swagger-py | ded3a811579ea4e0d1adb3582b90d8d941b7fb17 | [
"BSD-3-Clause"
] | null | null | null | bravado/client.py | analogue/swagger-py | ded3a811579ea4e0d1adb3582b90d8d941b7fb17 | [
"BSD-3-Clause"
] | null | null | null | bravado/client.py | analogue/swagger-py | ded3a811579ea4e0d1adb3582b90d8d941b7fb17 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
The :class:`SwaggerClient` provides an interface for making API calls based on
a swagger spec, and returns responses of python objects which build from the
API response.
Structure Diagram::
+---------------------+
| |
| SwaggerClient |
| |
+------+--------------+
|
| has many
|
+------v--------------+
| |
| Resource +------------------+
| | |
+------+--------------+ has many |
| |
| has many |
| |
+------v--------------+ +------v--------------+
| | | |
| Operation | | SwaggerModel |
| | | |
+------+--------------+ +---------------------+
|
| uses
|
+------v--------------+
| |
| HttpClient |
| |
+---------------------+
To get a client
.. code-block:: python
client = bravado.client.SwaggerClient.from_url(swagger_spec_url)
"""
import logging
from bravado_core.docstring import create_operation_docstring
from bravado_core.exception import SwaggerMappingError
from bravado_core.formatter import SwaggerFormat # noqa
from bravado_core.param import marshal_param
from bravado_core.spec import Spec
from six import iteritems, itervalues
from bravado.docstring_property import docstring_property
from bravado.requests_client import RequestsClient
from bravado.swagger_model import Loader
from bravado.warning import warn_for_deprecated_op
log = logging.getLogger(__name__)
CONFIG_DEFAULTS = {
# See the constructor of :class:`bravado.http_future.HttpFuture` for an
# in depth explanation of what this means.
'also_return_response': False,
}
REQUEST_OPTIONS_DEFAULTS = {
# List of callbacks that are executed after the incoming response has been
# validated and the swagger_result has been unmarshalled.
#
# The callback should expect two arguments:
# param : incoming_response
# type : subclass of class:`bravado_core.response.IncomingResponse`
# param : operation
# type : class:`bravado_core.operation.Operation`
'response_callbacks': [],
}
class SwaggerClient(object):
"""A client for accessing a Swagger-documented RESTful service.
:type swagger_spec: :class:`bravado_core.spec.Spec`
"""
@classmethod
def from_url(cls, spec_url, http_client=None, request_headers=None,
config=None):
"""Build a :class:`SwaggerClient` from a url to the Swagger
specification for a RESTful API.
:param spec_url: url pointing at the swagger API specification
:type spec_url: str
:param http_client: an HTTP client used to perform requests
:type http_client: :class:`bravado.http_client.HttpClient`
:param request_headers: Headers to pass with http requests
:type request_headers: dict
:param config: Config dict for bravado and bravado_core.
See CONFIG_DEFAULTS in :module:`bravado_core.spec`.
See CONFIG_DEFAULTS in :module:`bravado.client`.
:rtype: :class:`bravado_core.spec.Spec`
"""
log.debug(u"Loading from %s" % spec_url)
http_client = http_client or RequestsClient()
loader = Loader(http_client, request_headers=request_headers)
spec_dict = loader.load_spec(spec_url)
# RefResolver may have to download additional json files (remote refs)
# via http. Wrap http_client's request() so that request headers are
# passed along with the request transparently. Yeah, this is not ideal,
# but since RefResolver has new found responsibilities, it is
# functional.
if request_headers is not None:
http_client.request = inject_headers_for_remote_refs(
http_client.request, request_headers)
return cls.from_spec(spec_dict, spec_url, http_client, config)
@classmethod
def from_spec(cls, spec_dict, origin_url=None, http_client=None,
config=None):
"""
Build a :class:`SwaggerClient` from a Swagger spec in dict form.
:param spec_dict: a dict with a Swagger spec in json-like form
:param origin_url: the url used to retrieve the spec_dict
:type origin_url: str
:param config: Configuration dict - see spec.CONFIG_DEFAULTS
:rtype: :class:`bravado_core.spec.Spec`
"""
http_client = http_client or RequestsClient()
# Apply bravado config defaults
config = dict(CONFIG_DEFAULTS, **(config or {}))
swagger_spec = Spec.from_dict(
spec_dict, origin_url, http_client, config)
return cls(swagger_spec)
def __getattr__(self, item):
"""
:param item: name of the resource to return
:return: :class:`Resource`
"""
resource = self.swagger_spec.resources.get(item)
if not resource:
raise AttributeError(
'Resource {0} not found. Available resources: {1}'
.format(item, ', '.join(dir(self))))
# Wrap bravado-core's Resource and Operation objects in order to
# execute a service call via the http_client.
return ResourceDecorator(resource)
def inject_headers_for_remote_refs(request_callable, request_headers):
"""Inject request_headers only when the request is to retrieve the
remote refs in the swagger spec (vs being a request for a service call).
:param request_callable: method on http_client to make a http request
:param request_headers: headers to inject when retrieving remote refs
"""
return request_wrapper
class ResourceDecorator(object):
"""
Wraps :class:`bravado_core.resource.Resource` so that accesses to contained
operations can be instrumented.
"""
def __init__(self, resource):
"""
:type resource: :class:`bravado_core.resource.Resource`
"""
self.resource = resource
def __getattr__(self, name):
"""
:rtype: :class:`CallableOperation`
"""
return CallableOperation(getattr(self.resource, name))
def __dir__(self):
"""
Exposes correct attrs on resource when tab completing in a REPL
"""
return self.resource.__dir__()
class CallableOperation(object):
"""Wraps an operation to make it callable and provides a docstring. Calling
the operation uses the configured http_client.
:type operation: :class:`bravado_core.operation.Operation`
"""
@docstring_property(__doc__)
def __getattr__(self, name):
"""Forward requests for attrs not found on this decorator to the
delegate.
"""
return getattr(self.operation, name)
def __call__(self, **op_kwargs):
"""Invoke the actual HTTP request and return a future.
:rtype: :class:`bravado.http_future.HTTPFuture`
"""
log.debug(u"%s(%s)" % (self.operation.operation_id, op_kwargs))
warn_for_deprecated_op(self.operation)
# Apply request_options defaults
request_options = dict(
REQUEST_OPTIONS_DEFAULTS,
**(op_kwargs.pop('_request_options', {})))
request_params = construct_request(
self.operation, request_options, **op_kwargs)
config = self.operation.swagger_spec.config
http_client = self.operation.swagger_spec.http_client
# Per-request config overrides client wide config
also_return_response = request_options.get(
'also_return_response',
config['also_return_response'])
return http_client.request(
request_params,
operation=self.operation,
response_callbacks=request_options['response_callbacks'],
also_return_response=also_return_response)
def construct_request(operation, request_options, **op_kwargs):
"""Construct the outgoing request dict.
:type operation: :class:`bravado_core.operation.Operation`
:param request_options: _request_options passed into the operation
invocation.
:param op_kwargs: parameter name/value pairs to passed to the
invocation of the operation.
:return: request in dict form
"""
url = operation.swagger_spec.api_url.rstrip('/') + operation.path_name
request = {
'method': operation.http_method.upper(),
'url': url,
'params': {}, # filled in downstream
'headers': request_options.get('headers', {}),
}
# Copy over optional request options
for request_option in ('connect_timeout', 'timeout'):
if request_option in request_options:
request[request_option] = request_options[request_option]
construct_params(operation, request, op_kwargs)
return request
def construct_params(operation, request, op_kwargs):
"""Given the parameters passed to the operation invocation, validates and
marshals the parameters into the provided request dict.
:type operation: :class:`bravado_core.operation.Operation`
:type request: dict
:param op_kwargs: the kwargs passed to the operation invocation
:raises: SwaggerMappingError on extra parameters or when a required
parameter is not supplied.
"""
current_params = operation.params.copy()
for param_name, param_value in iteritems(op_kwargs):
param = current_params.pop(param_name, None)
if param is None:
raise SwaggerMappingError(
"{0} does not have parameter {1}"
.format(operation.operation_id, param_name))
marshal_param(param, param_value, request)
# Check required params and non-required params with a 'default' value
for remaining_param in itervalues(current_params):
if remaining_param.required:
raise SwaggerMappingError(
'{0} is a required parameter'.format(remaining_param.name))
if not remaining_param.required and remaining_param.has_default():
marshal_param(remaining_param, None, request)
| 35.639752 | 79 | 0.622778 | # -*- coding: utf-8 -*-
"""
The :class:`SwaggerClient` provides an interface for making API calls based on
a swagger spec, and returns responses of python objects which build from the
API response.
Structure Diagram::
+---------------------+
| |
| SwaggerClient |
| |
+------+--------------+
|
| has many
|
+------v--------------+
| |
| Resource +------------------+
| | |
+------+--------------+ has many |
| |
| has many |
| |
+------v--------------+ +------v--------------+
| | | |
| Operation | | SwaggerModel |
| | | |
+------+--------------+ +---------------------+
|
| uses
|
+------v--------------+
| |
| HttpClient |
| |
+---------------------+
To get a client
.. code-block:: python
client = bravado.client.SwaggerClient.from_url(swagger_spec_url)
"""
import logging
from bravado_core.docstring import create_operation_docstring
from bravado_core.exception import SwaggerMappingError
from bravado_core.formatter import SwaggerFormat # noqa
from bravado_core.param import marshal_param
from bravado_core.spec import Spec
from six import iteritems, itervalues
from bravado.docstring_property import docstring_property
from bravado.requests_client import RequestsClient
from bravado.swagger_model import Loader
from bravado.warning import warn_for_deprecated_op
log = logging.getLogger(__name__)
CONFIG_DEFAULTS = {
# See the constructor of :class:`bravado.http_future.HttpFuture` for an
# in depth explanation of what this means.
'also_return_response': False,
}
REQUEST_OPTIONS_DEFAULTS = {
# List of callbacks that are executed after the incoming response has been
# validated and the swagger_result has been unmarshalled.
#
# The callback should expect two arguments:
# param : incoming_response
# type : subclass of class:`bravado_core.response.IncomingResponse`
# param : operation
# type : class:`bravado_core.operation.Operation`
'response_callbacks': [],
}
class SwaggerClient(object):
"""A client for accessing a Swagger-documented RESTful service.
:type swagger_spec: :class:`bravado_core.spec.Spec`
"""
def __init__(self, swagger_spec):
self.swagger_spec = swagger_spec
@classmethod
def from_url(cls, spec_url, http_client=None, request_headers=None,
config=None):
"""Build a :class:`SwaggerClient` from a url to the Swagger
specification for a RESTful API.
:param spec_url: url pointing at the swagger API specification
:type spec_url: str
:param http_client: an HTTP client used to perform requests
:type http_client: :class:`bravado.http_client.HttpClient`
:param request_headers: Headers to pass with http requests
:type request_headers: dict
:param config: Config dict for bravado and bravado_core.
See CONFIG_DEFAULTS in :module:`bravado_core.spec`.
See CONFIG_DEFAULTS in :module:`bravado.client`.
:rtype: :class:`bravado_core.spec.Spec`
"""
log.debug(u"Loading from %s" % spec_url)
http_client = http_client or RequestsClient()
loader = Loader(http_client, request_headers=request_headers)
spec_dict = loader.load_spec(spec_url)
# RefResolver may have to download additional json files (remote refs)
# via http. Wrap http_client's request() so that request headers are
# passed along with the request transparently. Yeah, this is not ideal,
# but since RefResolver has new found responsibilities, it is
# functional.
if request_headers is not None:
http_client.request = inject_headers_for_remote_refs(
http_client.request, request_headers)
return cls.from_spec(spec_dict, spec_url, http_client, config)
@classmethod
def from_spec(cls, spec_dict, origin_url=None, http_client=None,
config=None):
"""
Build a :class:`SwaggerClient` from a Swagger spec in dict form.
:param spec_dict: a dict with a Swagger spec in json-like form
:param origin_url: the url used to retrieve the spec_dict
:type origin_url: str
:param config: Configuration dict - see spec.CONFIG_DEFAULTS
:rtype: :class:`bravado_core.spec.Spec`
"""
http_client = http_client or RequestsClient()
# Apply bravado config defaults
config = dict(CONFIG_DEFAULTS, **(config or {}))
swagger_spec = Spec.from_dict(
spec_dict, origin_url, http_client, config)
return cls(swagger_spec)
def get_model(self, model_name):
return self.swagger_spec.definitions[model_name]
def __repr__(self):
return u"%s(%s)" % (self.__class__.__name__, self.swagger_spec.api_url)
def __getattr__(self, item):
"""
:param item: name of the resource to return
:return: :class:`Resource`
"""
resource = self.swagger_spec.resources.get(item)
if not resource:
raise AttributeError(
'Resource {0} not found. Available resources: {1}'
.format(item, ', '.join(dir(self))))
# Wrap bravado-core's Resource and Operation objects in order to
# execute a service call via the http_client.
return ResourceDecorator(resource)
def __dir__(self):
return self.swagger_spec.resources.keys()
def inject_headers_for_remote_refs(request_callable, request_headers):
"""Inject request_headers only when the request is to retrieve the
remote refs in the swagger spec (vs being a request for a service call).
:param request_callable: method on http_client to make a http request
:param request_headers: headers to inject when retrieving remote refs
"""
def request_wrapper(request_params, *args, **kwargs):
def is_remote_ref_request(request_kwargs):
# operation is only present for service calls
return request_kwargs.get('operation') is None
if is_remote_ref_request(kwargs):
request_params['headers'] = request_headers
return request_callable(request_params, *args, **kwargs)
return request_wrapper
class ResourceDecorator(object):
"""
Wraps :class:`bravado_core.resource.Resource` so that accesses to contained
operations can be instrumented.
"""
def __init__(self, resource):
"""
:type resource: :class:`bravado_core.resource.Resource`
"""
self.resource = resource
def __getattr__(self, name):
"""
:rtype: :class:`CallableOperation`
"""
return CallableOperation(getattr(self.resource, name))
def __dir__(self):
"""
Exposes correct attrs on resource when tab completing in a REPL
"""
return self.resource.__dir__()
class CallableOperation(object):
"""Wraps an operation to make it callable and provides a docstring. Calling
the operation uses the configured http_client.
:type operation: :class:`bravado_core.operation.Operation`
"""
def __init__(self, operation):
self.operation = operation
@docstring_property(__doc__)
def __doc__(self):
return create_operation_docstring(self.operation)
def __getattr__(self, name):
"""Forward requests for attrs not found on this decorator to the
delegate.
"""
return getattr(self.operation, name)
def __call__(self, **op_kwargs):
"""Invoke the actual HTTP request and return a future.
:rtype: :class:`bravado.http_future.HTTPFuture`
"""
log.debug(u"%s(%s)" % (self.operation.operation_id, op_kwargs))
warn_for_deprecated_op(self.operation)
# Apply request_options defaults
request_options = dict(
REQUEST_OPTIONS_DEFAULTS,
**(op_kwargs.pop('_request_options', {})))
request_params = construct_request(
self.operation, request_options, **op_kwargs)
config = self.operation.swagger_spec.config
http_client = self.operation.swagger_spec.http_client
# Per-request config overrides client wide config
also_return_response = request_options.get(
'also_return_response',
config['also_return_response'])
return http_client.request(
request_params,
operation=self.operation,
response_callbacks=request_options['response_callbacks'],
also_return_response=also_return_response)
def construct_request(operation, request_options, **op_kwargs):
"""Construct the outgoing request dict.
:type operation: :class:`bravado_core.operation.Operation`
:param request_options: _request_options passed into the operation
invocation.
:param op_kwargs: parameter name/value pairs to passed to the
invocation of the operation.
:return: request in dict form
"""
url = operation.swagger_spec.api_url.rstrip('/') + operation.path_name
request = {
'method': operation.http_method.upper(),
'url': url,
'params': {}, # filled in downstream
'headers': request_options.get('headers', {}),
}
# Copy over optional request options
for request_option in ('connect_timeout', 'timeout'):
if request_option in request_options:
request[request_option] = request_options[request_option]
construct_params(operation, request, op_kwargs)
return request
def construct_params(operation, request, op_kwargs):
"""Given the parameters passed to the operation invocation, validates and
marshals the parameters into the provided request dict.
:type operation: :class:`bravado_core.operation.Operation`
:type request: dict
:param op_kwargs: the kwargs passed to the operation invocation
:raises: SwaggerMappingError on extra parameters or when a required
parameter is not supplied.
"""
current_params = operation.params.copy()
for param_name, param_value in iteritems(op_kwargs):
param = current_params.pop(param_name, None)
if param is None:
raise SwaggerMappingError(
"{0} does not have parameter {1}"
.format(operation.operation_id, param_name))
marshal_param(param, param_value, request)
# Check required params and non-required params with a 'default' value
for remaining_param in itervalues(current_params):
if remaining_param.required:
raise SwaggerMappingError(
'{0} is a required parameter'.format(remaining_param.name))
if not remaining_param.required and remaining_param.has_default():
marshal_param(remaining_param, None, request)
| 711 | 0 | 185 |
488f805ead1ea81a412115f7c6e06f8427a0411c | 6,844 | py | Python | Login.py | MichaelToLearn/oh_my_course | ac00f132e931422430f5f8ed2079ce0fc21cfb37 | [
"Apache-2.0"
] | 1 | 2020-01-14T01:14:27.000Z | 2020-01-14T01:14:27.000Z | Login.py | MichaelToLearn/oh_my_course | ac00f132e931422430f5f8ed2079ce0fc21cfb37 | [
"Apache-2.0"
] | null | null | null | Login.py | MichaelToLearn/oh_my_course | ac00f132e931422430f5f8ed2079ce0fc21cfb37 | [
"Apache-2.0"
] | null | null | null | # coding:utf-8
import json
import pickle
import requests
import os
import re
from io import BytesIO
BASE_DIR = os.path.dirname(__file__)
# LOGIN_URL = 'http://grdms.bit.edu.cn/yjs/login_cas.jsp'
# LOGIN_URL = 'https://login.bit.edu.cn/cas/login?service=https://login.bit.edu.cn/campus-account/shiro-cas'
LOGIN_URL = 'https://login.bit.edu.cn/cas/login?service=http%3A%2F%2Fgrdms.bit.edu.cn%2Fyjs%2Flogin_cas.jsp'
# LOGIN_INDEX_URL = 'https://login.bit.edu.cn/cas/login?service=https://login.bit.edu.cn/campus-account/shiro-cas'
LOGIN_INDEX_URL = LOGIN_URL
# 验证码
CAPTCHA_URL = 'https://login.bit.edu.cn/cas/captcha.html'
NEED_CAPTCHA_URL = 'https://login.bit.edu.cn/cas/needCaptcha.html?username=%s'
| 30.690583 | 192 | 0.513296 | # coding:utf-8
import json
import pickle
import requests
import os
import re
from io import BytesIO
BASE_DIR = os.path.dirname(__file__)
# LOGIN_URL = 'http://grdms.bit.edu.cn/yjs/login_cas.jsp'
# LOGIN_URL = 'https://login.bit.edu.cn/cas/login?service=https://login.bit.edu.cn/campus-account/shiro-cas'
LOGIN_URL = 'https://login.bit.edu.cn/cas/login?service=http%3A%2F%2Fgrdms.bit.edu.cn%2Fyjs%2Flogin_cas.jsp'
# LOGIN_INDEX_URL = 'https://login.bit.edu.cn/cas/login?service=https://login.bit.edu.cn/campus-account/shiro-cas'
LOGIN_INDEX_URL = LOGIN_URL
# 验证码
CAPTCHA_URL = 'https://login.bit.edu.cn/cas/captcha.html'
NEED_CAPTCHA_URL = 'https://login.bit.edu.cn/cas/needCaptcha.html?username=%s'
class Login(object):
stuid = ''
pwd = ''
cookies = None
http_session_id = ''
def __init__(self, stuid, pwd):
r"""
初始化
:param stuid: 学号
:param pwd: 密码
"""
self.pwd = pwd
self.stuid = stuid
def get_login_param(self):
r"""
访问初始登录界面,获取登录参数
:return: 参数结果,格式如下:
{
param: {
'lt': lt参数,
'execution': execution参数,
'_eventId': 事件参数,
'rmShown': 不知道
},
cookies: 请求后所产生的cookie
}
"""
r = requests.get(LOGIN_INDEX_URL, verify=False)
html = r.text
pattern = 'name="(\S+)"\svalue="(\S+)"'
search_result = re.findall(pattern, html)
param = {}
for item in search_result:
param[item[0]] = item[1]
self.cookies = r.cookies
self.http_session_id = self.get_http_session_id()
return {
"param": param,
"cookies": r.cookies
}
def cookies_to_str(self, cookies):
r"""
将对象形式的cookie转化成字符串形式
:param cookies: 对象形式的cookie
:return: 字符串形式的cookie
"""
cookie_str = ''
for index, cookie in enumerate(cookies.items()):
if index > 0:
cookie_str = cookie_str + '; '
cookie_str = cookie_str + "%s=%s" % (cookie[0], cookie[1])
return cookie_str
def need_captcha(self):
r"""
查询是否需要输入验证码
:return: 是/否
"""
r = requests.get(NEED_CAPTCHA_URL % self.stuid, verify=False, headers=self.get_cookie_header())
return r.json()
def handle_captcha(self):
r"""
处理验证码
:return: 无
"""
r = requests.get(CAPTCHA_URL, verify=False, headers=self.get_cookie_header())
base_dir = os.path.dirname(__file__)
file_path = os.path.join(base_dir, 'captcha.jpg')
open(file_path, 'wb').write(r.content)
def login(self, using_cache=True):
r"""
登录
:return: 登录结果
{
"status": 登录是否成功,
"cookies": 登录成功之后的cookie
}
"""
# 判断之前是否保存过登录信息
if self.logined() and using_cache:
return {
'status': True,
'msg': u'使用之前的缓存登录成功'
}
result = self.get_login_param()
param = result['param']
param['username'] = self.stuid
param['password'] = self.pwd
# 判断是否需要输入验证码
if self.need_captcha():
# 需要输入验证码
self.handle_captcha()
print(u'请输入验证码:')
param['captchaResponse'] = input()
r = requests.post(LOGIN_URL, data=param, verify=False, headers=self.get_cookie_header())
html = r.text
msgs = re.findall('id="msg"\sstyle="font-size:20px;color:red;">(.+)</div>', html)
url_changed = r.url.find('grdms.bit.edu.cn/yjs/login_cas.jsp') != -1
key_word_contained = html.find(u"top.location = '/yjs/application/main.jsp'") != -1
if url_changed or key_word_contained:
print(r.cookies.items())
# 登录成功
self.cookies = r.cookies
# 保存登录信息
self.save_cookies()
return {
'status': True,
'msg': u'登录成功!'
}
else:
# 登录失败
if msgs:
return {
'status': False,
'msg': msgs[0]
}
else:
return {
'status': False,
'msg': u'未知错误信息'
}
def get_http_session_id(self, prefix=False):
r"""
获取档期那的http_session_id
:return: http_session_id
"""
http_session_id = ''
for item in self.cookies.items():
if item[0] == 'JSESSIONID':
http_session_id = item[1]
if prefix:
http_session_id = 'ysj1app1~' + http_session_id
return http_session_id
def logined(self):
r"""
判断是否已经登录过
:return: 是否
"""
path = os.path.join(BASE_DIR, 'cookie.txt')
if os.path.exists(path):
self.cookies = self.read_cookie()
return True
else:
return False
def read_cookie(self):
r"""
读取cookie
:return: 无
"""
path = os.path.join(BASE_DIR, 'cookie.txt')
cookies = pickle.loads(open(path, 'rb').read())
return cookies
def save_cookies(self):
r"""
保存cookie
:return: 无
"""
path = os.path.join(BASE_DIR, 'cookie.txt')
open(path, 'wb').write(
pickle.dumps(self.cookies)
)
def get_cookie_header(self):
r"""
获取带有cookie的header
:return: {
"Cookie": ...
}
"""
return {
"Cookie": self.cookies_to_str(self.cookies)
}
def get_header_whole(self):
r"""
获取完整的header
:return: 完整的header
"""
headers = self.get_cookie_header()
headers_other = {
'Accept': 'image/gif, image/jpeg, image/pjpeg, application/x-ms-application, application/xaml+xml, application/x-ms-xbap, */*',
'Referer': 'http://grdms.bit.edu.cn/yjs/yanyuan/py/pyjxjh.do?method=queryListing',
'Accept-Language': 'zh-CN',
'User-Agent': 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 10.0; WOW64; Trident/7.0; .NET4.0C; .NET4.0E; .NET CLR 2.0.50727; .NET CLR 3.0.30729; .NET CLR 3.5.30729; doyo 2.6.1)',
'Content-Type': 'application/x-www-form-urlencoded',
'Accept-Encoding': 'gzip, deflate',
'Content-Length': '668',
'Host': 'grdms.bit.edu.cn',
'Connection': 'Keep-Alive',
'Pragma': 'no-cache'
}
headers = dict(headers, **headers_other)
return headers | 0 | 6,564 | 23 |
e1aad344e44883df09767703e8496275881b91e0 | 2,937 | py | Python | bot.py | VicOfDlt/JesStats | f787d3d87900934632ac44144760c273731565b0 | [
"MIT"
] | null | null | null | bot.py | VicOfDlt/JesStats | f787d3d87900934632ac44144760c273731565b0 | [
"MIT"
] | null | null | null | bot.py | VicOfDlt/JesStats | f787d3d87900934632ac44144760c273731565b0 | [
"MIT"
] | null | null | null | import discord, requests, os
from discord.ext import commands, tasks
from discord_components import DiscordComponents
from config import token, db
from typing import Union
from help_ import CustomHelpCommand
config = db["config"]
links = db["linked"]
presence_count = 0
intents = discord.Intents.all()
bot = commands.Bot(
command_prefix="/",
intents=intents,
help_command=CustomHelpCommand(),
case_insensitive=True,
)
DiscordComponents(bot)
@tasks.loop(seconds=20)
@bot.event
if __name__ == "__main__":
bot.load_extension("cogs.commands")
bot.load_extension("cogs.news")
bot.run(token)
| 26.459459 | 86 | 0.607423 | import discord, requests, os
from discord.ext import commands, tasks
from discord_components import DiscordComponents
from config import token, db
from typing import Union
from help_ import CustomHelpCommand
config = db["config"]
links = db["linked"]
presence_count = 0
async def get_embed(ctx: commands.Context, title=None, description=None, color=None):
b_config = await config.find_one({"_id": "config"})
if not color:
color = b_config["color"]
kwargs = {"color": color}
if title:
kwargs["title"] = title
if description:
kwargs["description"] = description
embed = discord.Embed(**kwargs)
embed.set_footer(
text="Made by Vic__dtl and FusedTundra10",
icon_url=b_config["footer"],
)
embed.set_author(
name=ctx.message.author.display_name, icon_url=ctx.author.avatar.url
)
embed.set_thumbnail(url=b_config["thumbnail"])
return embed
async def is_server_online(ctx):
res = requests.get("https://jesapi2.herokuapp.com/4")
active = res.json()["serverOnline"]
if active:
return True
return await ctx.send(
embed=await get_embed(
ctx, ":red_circle: The server is offline, can't find that info"
)
)
async def find_linked(user: Union[discord.User, int, commands.Context]):
if isinstance(user, commands.Context):
id_ = user.message.author.id
elif isinstance(user, (discord.User, discord.Member)):
id_ = user.id
else:
id_ = user
players = await links.find_one({"_id": "players"})
if id_ in players:
return players["ign"]
else:
return
intents = discord.Intents.all()
bot = commands.Bot(
command_prefix="/",
intents=intents,
help_command=CustomHelpCommand(),
case_insensitive=True,
)
DiscordComponents(bot)
@tasks.loop(seconds=20)
async def change_presence():
global presence_count
presences = [
{
"activity": discord.Activity(
type=discord.ActivityType.watching, name=f"{len(bot.guilds)} Servers"
)
},
{
"activity": discord.Activity(
type=discord.ActivityType.watching, name=f"{len(bot.guilds)} Servers"
)
},
{
"activity": discord.Activity(
type=discord.ActivityType.watching, name="ciel0"
)
},
]
await bot.change_presence(**presences[presence_count])
if presence_count == 2:
presence_count = 0
else:
presence_count += 1
@bot.event
async def on_ready():
print(f"{bot.user.name}: Bot loaded successfully. ID: {bot.user.id}")
change_presence.start()
if __name__ == "__main__":
bot.load_extension("cogs.commands")
bot.load_extension("cogs.news")
bot.run(token)
| 2,147 | 0 | 121 |
2dd3cd31fed4537a30510222bf73cf19535656b0 | 1,121 | py | Python | Examples/Infovis/Python/boost_centrality.py | OpenGeoscience/VTK | a373e975b9284a022f43a062ebf5042bb17b4e44 | [
"BSD-3-Clause"
] | 1 | 2020-05-04T05:59:07.000Z | 2020-05-04T05:59:07.000Z | Examples/Infovis/Python/boost_centrality.py | OpenGeoscience/VTK | a373e975b9284a022f43a062ebf5042bb17b4e44 | [
"BSD-3-Clause"
] | null | null | null | Examples/Infovis/Python/boost_centrality.py | OpenGeoscience/VTK | a373e975b9284a022f43a062ebf5042bb17b4e44 | [
"BSD-3-Clause"
] | 5 | 2015-10-09T04:12:29.000Z | 2021-12-15T16:57:11.000Z | from vtk import *
source = vtkRandomGraphSource()
source.DirectedOff()
source.SetNumberOfVertices(50)
source.SetEdgeProbability(0.01)
source.SetUseEdgeProbability(True)
source.AllowParallelEdgesOn()
source.AllowSelfLoopsOn()
source.SetStartWithTree(True)
# Connect to the Boost centrality filter.
centrality = vtkBoostBrandesCentrality ()
centrality.SetInputConnection(source.GetOutputPort())
view = vtkGraphLayoutView()
view.AddRepresentationFromInputConnection(centrality.GetOutputPort())
view.SetVertexLabelArrayName("centrality")
view.SetVertexLabelVisibility(True)
view.SetVertexColorArrayName("centrality")
view.SetColorVertices(True)
view.SetEdgeLabelArrayName("centrality")
#view.SetEdgeLabelVisibility(True)
view.SetEdgeColorArrayName("centrality")
view.SetColorEdges(True)
view.SetLayoutStrategyToSimple2D()
theme = vtkViewTheme.CreateMellowTheme()
theme.SetLineWidth(5)
theme.SetPointSize(10)
theme.SetCellOpacity(1)
theme.SetVertexLabelColor(0, 0, 0)
view.ApplyViewTheme(theme)
theme.FastDelete()
view.GetRenderWindow().SetSize(600, 600)
view.ResetCamera()
view.Render()
view.GetInteractor().Start()
| 25.477273 | 69 | 0.831401 | from vtk import *
source = vtkRandomGraphSource()
source.DirectedOff()
source.SetNumberOfVertices(50)
source.SetEdgeProbability(0.01)
source.SetUseEdgeProbability(True)
source.AllowParallelEdgesOn()
source.AllowSelfLoopsOn()
source.SetStartWithTree(True)
# Connect to the Boost centrality filter.
centrality = vtkBoostBrandesCentrality ()
centrality.SetInputConnection(source.GetOutputPort())
view = vtkGraphLayoutView()
view.AddRepresentationFromInputConnection(centrality.GetOutputPort())
view.SetVertexLabelArrayName("centrality")
view.SetVertexLabelVisibility(True)
view.SetVertexColorArrayName("centrality")
view.SetColorVertices(True)
view.SetEdgeLabelArrayName("centrality")
#view.SetEdgeLabelVisibility(True)
view.SetEdgeColorArrayName("centrality")
view.SetColorEdges(True)
view.SetLayoutStrategyToSimple2D()
theme = vtkViewTheme.CreateMellowTheme()
theme.SetLineWidth(5)
theme.SetPointSize(10)
theme.SetCellOpacity(1)
theme.SetVertexLabelColor(0, 0, 0)
view.ApplyViewTheme(theme)
theme.FastDelete()
view.GetRenderWindow().SetSize(600, 600)
view.ResetCamera()
view.Render()
view.GetInteractor().Start()
| 0 | 0 | 0 |
b8e0783ae0bf6f0f8523e6b5ddc9acc14987632e | 2,731 | py | Python | exam14_02/script_Davide_Abbondandolo.py | Abbodavi/programming_davide_abbondandolo | b985cb0af0c5cd46d6b643410b0e106ddf4a1148 | [
"MIT"
] | null | null | null | exam14_02/script_Davide_Abbondandolo.py | Abbodavi/programming_davide_abbondandolo | b985cb0af0c5cd46d6b643410b0e106ddf4a1148 | [
"MIT"
] | null | null | null | exam14_02/script_Davide_Abbondandolo.py | Abbodavi/programming_davide_abbondandolo | b985cb0af0c5cd46d6b643410b0e106ddf4a1148 | [
"MIT"
] | null | null | null | import input_data #input import
BLOSUM52=input_data.BLOSUM52
seq2=input_data.seq2
seq1=input_data.seq1
matrix=NW_matrix(seq1,seq2,BLOSUM52,-2) #compute 2 matrix, gap penality is set to -2
result=allign(matrix[0],matrix[1],seq1,seq2) #use matrix obtained before to get the alignment
print(result[0],result[1]) #and print it with it's score | 47.912281 | 93 | 0.645185 | import input_data #input import
BLOSUM52=input_data.BLOSUM52
seq2=input_data.seq2
seq1=input_data.seq1
def NW_matrix(s1,s2,matrix,gap):
score_m=[[0] * (len(s1)+1) for i in range(len(s2)+1)] #create 2 equal matrix for score
move_m=[[0] * (len(s1)+1) for i in range(len(s2)+1)] #and movement of size(n+1)*(m+1)
for i in range(1,len(s1)+1): #initialize 1° row
score_m[0][i]=i*gap #of score matrix
move_m[0][i]="o" #of move matrix
for i in range(1,len(s2)+1): #initialize 1° column
score_m[i][0]=gap*i #of score matrix
move_m[i][0]="v" #of move matrix
for i in range(1,len(s2)+1): #for every row
for j in range(1,len(s1)+1): #for every colum in each row
o=score_m[i][j-1]+gap #compute score when adding a gap in s2
v=score_m[i-1][j]+gap #compute score when adding a gap in s1
d=score_m[i-1][j-1]+matrix[s2[i-1]+s1[j-1]] #compute score of alignign 2 AA
best=max(o,v,d) #select the highest
score_m[i][j]=best #put in in the cell
if best==d: #if best is equal to align
move_m[i][j]="d" #write letter corrisondig to align in move matrix
elif best==v: #if best is equal to gap in s2
move_m[i][j]="v" #write letter corrisondig to it in move matrix
else: #if best is equal to gap in s1
move_m[i][j]="o" #write letter corrisondig to it in move matrix
return score_m, move_m #return the 2 matrix
def allign(F,P,s1,s2):
i=len(s2) #start from bottom left corner (x and y coordinate given) X
j=len(s1) # Y
score=F[i][j] #which corrispond to the alignment score
all1="" #2 empty string for backtracking
all2=""
while i!=0 and j!=0: #until we are on the 1° cell
if P[i][j]=="d": #if move matix has d we align the 2 residues
all1=all1+s1[j-1] #add the one we are considering for s1
all2=all2+s2[i-1] #add the one we are considering for s2
i-=1 #move x and y pointer in the matrix
j-=1
elif P[i][j]=="v": #if move matix has v
all1=all1+"-" #we add gap in s1
all2=all2+s2[i-1] # and align residues in s2 in the alignment
i-=1 #move X pointer
elif P[i][j]=="o": #if move matix has o
all1=all1+s1[j-1] #we add residues in s1
all2=all2+"-" #and add gapp in s2 in the alignment
j-=1 #move Y pointer
all1=all1[::-1] #reverse the 2 string, because they are written backwards
all2=all2[::-1]
res=all1+"\n" + all2 #string with alignment
return res,score #return aligment and score
matrix=NW_matrix(seq1,seq2,BLOSUM52,-2) #compute 2 matrix, gap penality is set to -2
result=allign(matrix[0],matrix[1],seq1,seq2) #use matrix obtained before to get the alignment
print(result[0],result[1]) #and print it with it's score | 2,338 | 0 | 46 |
5c365fe033cb87c245810ffc125f74663dd50b0f | 850 | py | Python | 2016/1/b.py | WilliamHayward/Advent-Of-Code | fdcc3677632dc79d96233f98527a1db2e41704dd | [
"MIT"
] | null | null | null | 2016/1/b.py | WilliamHayward/Advent-Of-Code | fdcc3677632dc79d96233f98527a1db2e41704dd | [
"MIT"
] | null | null | null | 2016/1/b.py | WilliamHayward/Advent-Of-Code | fdcc3677632dc79d96233f98527a1db2e41704dd | [
"MIT"
] | null | null | null | directions = open('input', 'r').read().strip().split(', ')
x = 0
y = 0
facing = 0 #North
visited = {}
visited[(x, y)] = True
for direction in directions:
if direction[0] == 'R':
facing += 1
else:
facing -= 1
if facing < 0:
facing = 3
elif facing > 3:
facing = 0
count = int(direction[1:])
for i in range(count):
if facing == 0: # North
y += 1
elif facing == 1: # East
x += 1
elif facing == 2: # South
y -= 1
elif facing == 3: # West
x -= 1
position = (x, y)
if visited.get(position, False):
# Absolute values because direction isn't relevant, just distance
x = abs(x)
y = abs(y)
print(x + y)
exit()
visited[position] = True
| 22.368421 | 77 | 0.458824 | directions = open('input', 'r').read().strip().split(', ')
x = 0
y = 0
facing = 0 #North
visited = {}
visited[(x, y)] = True
for direction in directions:
if direction[0] == 'R':
facing += 1
else:
facing -= 1
if facing < 0:
facing = 3
elif facing > 3:
facing = 0
count = int(direction[1:])
for i in range(count):
if facing == 0: # North
y += 1
elif facing == 1: # East
x += 1
elif facing == 2: # South
y -= 1
elif facing == 3: # West
x -= 1
position = (x, y)
if visited.get(position, False):
# Absolute values because direction isn't relevant, just distance
x = abs(x)
y = abs(y)
print(x + y)
exit()
visited[position] = True
| 0 | 0 | 0 |
86434d75753f7cb9ab26d0db01b9cca8cf89f27c | 71,882 | py | Python | infoblox_netmri/api/broker/v2_3_0/routing_area_member_broker.py | infobloxopen/infoblox_netmri | aa1c744df7e439dbe163bb9edd165e4e85a9771b | [
"Apache-2.0"
] | 12 | 2016-02-19T12:37:54.000Z | 2022-03-04T20:11:08.000Z | infoblox_netmri/api/broker/v2_3_0/routing_area_member_broker.py | azinfoblox/infoblox-netmri | 02372c5231e2677ab6299cb659a73c9a41b4b0f4 | [
"Apache-2.0"
] | 18 | 2015-11-12T18:37:00.000Z | 2021-05-19T07:59:55.000Z | infoblox_netmri/api/broker/v2_3_0/routing_area_member_broker.py | azinfoblox/infoblox-netmri | 02372c5231e2677ab6299cb659a73c9a41b4b0f4 | [
"Apache-2.0"
] | 18 | 2016-01-07T12:04:34.000Z | 2022-03-31T11:05:41.000Z | from ..broker import Broker
| 56.644602 | 765 | 0.635903 | from ..broker import Broker
class RoutingAreaMemberBroker(Broker):
controller = "routing_area_members"
def show(self, **kwargs):
"""Shows the details for the specified routing area member.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param RoutingAreaMemberID: The internal NetMRI identifier for this routing area membership.
:type RoutingAreaMemberID: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of routing area member methods. The listed methods will be called on each routing area member returned and included in the output. Available methods are: data_source, device, interface, routing_area, infradevice.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: data_source, device, interface, routing_area.
:type include: Array of String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return routing_area_member: The routing area member identified by the specified RoutingAreaMemberID.
:rtype routing_area_member: RoutingAreaMember
"""
return self.api_request(self._get_method_fullname("show"), kwargs)
def index(self, **kwargs):
"""Lists the available routing area members. Any of the inputs listed may be be used to narrow the list; other inputs will be ignored. Of the various ways to query lists, using this method is most efficient.
**Inputs**
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DeviceID: The internal NetMRI identifier for the device from which this routing area membership was collected.
:type DeviceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceID: The internal NetMRI identifier for the device from which this routing area membership was collected.
:type DeviceID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param RoutingAreaID: The internal NetMRI identifier for the routing area or autonomous system associated with this membership.
:type RoutingAreaID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param RoutingAreaID: The internal NetMRI identifier for the routing area or autonomous system associated with this membership.
:type RoutingAreaID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param RoutingAreaMemberID: The internal NetMRI identifier for this routing area membership.
:type RoutingAreaMemberID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param RoutingAreaMemberID: The internal NetMRI identifier for this routing area membership.
:type RoutingAreaMemberID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results.
:type DeviceGroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param timestamp: The data returned will represent the routing area members as of this date and time. If omitted, the result will indicate the most recently collected data.
:type timestamp: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of routing area member methods. The listed methods will be called on each routing area member returned and included in the output. Available methods are: data_source, device, interface, routing_area, infradevice.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: data_source, device, interface, routing_area.
:type include: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` RoutingAreaMemberID
:param sort: The data field(s) to use for sorting the output. Default is RoutingAreaMemberID. Valid values are RoutingAreaMemberID, RoutingAreaMemberStartTime, RoutingAreaMemberEndTime, RoutingAreaMemberChangedCols, RoutingAreaMemberTimestamp, RoutingAreaMemberSource, DataSourceID, DeviceID, RoutingAreaID, OspfAuthType, OspfImportAsExtern, OspfSpfRunsDelta, OspfAreaBdrRtrCount, OspfAsBdrRtrCount, OspfAreaLsaCount, OspfAreaLsaCksumSum, OspfAreaSummaryInd.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each RoutingAreaMember. Valid values are RoutingAreaMemberID, RoutingAreaMemberStartTime, RoutingAreaMemberEndTime, RoutingAreaMemberChangedCols, RoutingAreaMemberTimestamp, RoutingAreaMemberSource, DataSourceID, DeviceID, RoutingAreaID, OspfAuthType, OspfImportAsExtern, OspfSpfRunsDelta, OspfAreaBdrRtrCount, OspfAsBdrRtrCount, OspfAreaLsaCount, OspfAreaLsaCksumSum, OspfAreaSummaryInd. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return routing_area_members: An array of the RoutingAreaMember objects that match the specified input criteria.
:rtype routing_area_members: Array of RoutingAreaMember
"""
return self.api_list_request(self._get_method_fullname("index"), kwargs)
def search(self, **kwargs):
"""Lists the available routing area members matching the input criteria. This method provides a more flexible search interface than the index method, but searching using this method is more demanding on the system and will not perform to the same level as the index method. The input fields listed below will be used as in the index method, to filter the result, along with the optional query string and XML filter described below.
**Inputs**
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DataSourceID: The internal NetMRI identifier for the collector NetMRI that collected this data record.
:type DataSourceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DataSourceID: The internal NetMRI identifier for the collector NetMRI that collected this data record.
:type DataSourceID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DeviceID: The internal NetMRI identifier for the device from which this routing area membership was collected.
:type DeviceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceID: The internal NetMRI identifier for the device from which this routing area membership was collected.
:type DeviceID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param OspfAreaBdrRtrCount: The total number of area border routers reachable within this area. This is initially zero, and is calculated in each SPF Pass.
:type OspfAreaBdrRtrCount: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param OspfAreaBdrRtrCount: The total number of area border routers reachable within this area. This is initially zero, and is calculated in each SPF Pass.
:type OspfAreaBdrRtrCount: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param OspfAreaLsaCksumSum: The sum of the link-state advertisements' LS checksums contained in this area's link-state database on this router. This sum excludes external (LS type 5) link-state advertisements. The sum can be used to determine if there has been a change in a router's link state database, and to compare the link-state database of two routers.
:type OspfAreaLsaCksumSum: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param OspfAreaLsaCksumSum: The sum of the link-state advertisements' LS checksums contained in this area's link-state database on this router. This sum excludes external (LS type 5) link-state advertisements. The sum can be used to determine if there has been a change in a router's link state database, and to compare the link-state database of two routers.
:type OspfAreaLsaCksumSum: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param OspfAreaLsaCount: The total number of link-state advertisements in this area's link-state database on this router, excluding AS External LSA's.
:type OspfAreaLsaCount: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param OspfAreaLsaCount: The total number of link-state advertisements in this area's link-state database on this router, excluding AS External LSA's.
:type OspfAreaLsaCount: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param OspfAreaSummaryInd: Indicates how the router handles import of summary LSAs into stub areas. It has no effect on other areas.
If true, then the router will both summarize and propagate summary LSAs into the stub area.
Otherwise, the router will neither originate nor propagate summary LSAs. It will rely entirely on its default route.
:type OspfAreaSummaryInd: Boolean
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param OspfAreaSummaryInd: Indicates how the router handles import of summary LSAs into stub areas. It has no effect on other areas.
If true, then the router will both summarize and propagate summary LSAs into the stub area.
Otherwise, the router will neither originate nor propagate summary LSAs. It will rely entirely on its default route.
:type OspfAreaSummaryInd: Array of Boolean
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param OspfAsBdrRtrCount: The total number of Autonomous System border routers reachable within this area, according to this router. This is initially zero, and is calculated in each SPF Pass.
:type OspfAsBdrRtrCount: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param OspfAsBdrRtrCount: The total number of Autonomous System border routers reachable within this area, according to this router. This is initially zero, and is calculated in each SPF Pass.
:type OspfAsBdrRtrCount: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param OspfAuthType: The type of authentication configured on this area for this router.
:type OspfAuthType: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param OspfAuthType: The type of authentication configured on this area for this router.
:type OspfAuthType: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param OspfImportAsExtern: The type of this OSPF area, according to this router. (importExternal for a standard are, noImportExternal for a stub, and importNssa for a not-so-stubby area).
:type OspfImportAsExtern: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param OspfImportAsExtern: The type of this OSPF area, according to this router. (importExternal for a standard are, noImportExternal for a stub, and importNssa for a not-so-stubby area).
:type OspfImportAsExtern: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param OspfSpfRunsDelta: The number of times that the intra-area route table has been calculated using this area's link-state database (typically using Dijkstra's algorithm), since the last time NetMRI polled the device.
:type OspfSpfRunsDelta: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param OspfSpfRunsDelta: The number of times that the intra-area route table has been calculated using this area's link-state database (typically using Dijkstra's algorithm), since the last time NetMRI polled the device.
:type OspfSpfRunsDelta: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param RoutingAreaID: The internal NetMRI identifier for the routing area or autonomous system associated with this membership.
:type RoutingAreaID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param RoutingAreaID: The internal NetMRI identifier for the routing area or autonomous system associated with this membership.
:type RoutingAreaID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param RoutingAreaMemberChangedCols: The fields that changed between this revision of the record and the previous revision.
:type RoutingAreaMemberChangedCols: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param RoutingAreaMemberChangedCols: The fields that changed between this revision of the record and the previous revision.
:type RoutingAreaMemberChangedCols: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param RoutingAreaMemberEndTime: The ending effective time of this revision of this record, or empty if still in effect.
:type RoutingAreaMemberEndTime: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param RoutingAreaMemberEndTime: The ending effective time of this revision of this record, or empty if still in effect.
:type RoutingAreaMemberEndTime: Array of DateTime
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param RoutingAreaMemberID: The internal NetMRI identifier for this routing area membership.
:type RoutingAreaMemberID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param RoutingAreaMemberID: The internal NetMRI identifier for this routing area membership.
:type RoutingAreaMemberID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param RoutingAreaMemberSource: Internal tracking information for NetMRI algorithms.
:type RoutingAreaMemberSource: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param RoutingAreaMemberSource: Internal tracking information for NetMRI algorithms.
:type RoutingAreaMemberSource: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param RoutingAreaMemberStartTime: The starting effective time of this revision of the record.
:type RoutingAreaMemberStartTime: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param RoutingAreaMemberStartTime: The starting effective time of this revision of the record.
:type RoutingAreaMemberStartTime: Array of DateTime
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param RoutingAreaMemberTimestamp: The date and time this record was collected or calculated.
:type RoutingAreaMemberTimestamp: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param RoutingAreaMemberTimestamp: The date and time this record was collected or calculated.
:type RoutingAreaMemberTimestamp: Array of DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results.
:type DeviceGroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param timestamp: The data returned will represent the routing area members as of this date and time. If omitted, the result will indicate the most recently collected data.
:type timestamp: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of routing area member methods. The listed methods will be called on each routing area member returned and included in the output. Available methods are: data_source, device, interface, routing_area, infradevice.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: data_source, device, interface, routing_area.
:type include: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` RoutingAreaMemberID
:param sort: The data field(s) to use for sorting the output. Default is RoutingAreaMemberID. Valid values are RoutingAreaMemberID, RoutingAreaMemberStartTime, RoutingAreaMemberEndTime, RoutingAreaMemberChangedCols, RoutingAreaMemberTimestamp, RoutingAreaMemberSource, DataSourceID, DeviceID, RoutingAreaID, OspfAuthType, OspfImportAsExtern, OspfSpfRunsDelta, OspfAreaBdrRtrCount, OspfAsBdrRtrCount, OspfAreaLsaCount, OspfAreaLsaCksumSum, OspfAreaSummaryInd.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each RoutingAreaMember. Valid values are RoutingAreaMemberID, RoutingAreaMemberStartTime, RoutingAreaMemberEndTime, RoutingAreaMemberChangedCols, RoutingAreaMemberTimestamp, RoutingAreaMemberSource, DataSourceID, DeviceID, RoutingAreaID, OspfAuthType, OspfImportAsExtern, OspfSpfRunsDelta, OspfAreaBdrRtrCount, OspfAsBdrRtrCount, OspfAreaLsaCount, OspfAreaLsaCksumSum, OspfAreaSummaryInd. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param query: This value will be matched against routing area members, looking to see if one or more of the listed attributes contain the passed value. You may also surround the value with '/' and '/' to perform a regular expression search rather than a containment operation. Any record that matches will be returned. The attributes searched are: DataSourceID, DeviceID, OspfAreaBdrRtrCount, OspfAreaLsaCksumSum, OspfAreaLsaCount, OspfAreaSummaryInd, OspfAsBdrRtrCount, OspfAuthType, OspfImportAsExtern, OspfSpfRunsDelta, RoutingAreaID, RoutingAreaMemberChangedCols, RoutingAreaMemberEndTime, RoutingAreaMemberID, RoutingAreaMemberSource, RoutingAreaMemberStartTime, RoutingAreaMemberTimestamp.
:type query: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return routing_area_members: An array of the RoutingAreaMember objects that match the specified input criteria.
:rtype routing_area_members: Array of RoutingAreaMember
"""
return self.api_list_request(self._get_method_fullname("search"), kwargs)
def find(self, **kwargs):
"""Lists the available routing area members matching the input specification. This provides the most flexible search specification of all the query mechanisms, enabling searching using comparison operations other than equality. However, it is more complex to use and will not perform as efficiently as the index or search methods. In the input descriptions below, 'field names' refers to the following fields: DataSourceID, DeviceID, OspfAreaBdrRtrCount, OspfAreaLsaCksumSum, OspfAreaLsaCount, OspfAreaSummaryInd, OspfAsBdrRtrCount, OspfAuthType, OspfImportAsExtern, OspfSpfRunsDelta, RoutingAreaID, RoutingAreaMemberChangedCols, RoutingAreaMemberEndTime, RoutingAreaMemberID, RoutingAreaMemberSource, RoutingAreaMemberStartTime, RoutingAreaMemberTimestamp.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DataSourceID: The operator to apply to the field DataSourceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DataSourceID: The internal NetMRI identifier for the collector NetMRI that collected this data record. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DataSourceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DataSourceID: If op_DataSourceID is specified, the field named in this input will be compared to the value in DataSourceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DataSourceID must be specified if op_DataSourceID is specified.
:type val_f_DataSourceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DataSourceID: If op_DataSourceID is specified, this value will be compared to the value in DataSourceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DataSourceID must be specified if op_DataSourceID is specified.
:type val_c_DataSourceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DeviceID: The operator to apply to the field DeviceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DeviceID: The internal NetMRI identifier for the device from which this routing area membership was collected. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DeviceID: If op_DeviceID is specified, the field named in this input will be compared to the value in DeviceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DeviceID must be specified if op_DeviceID is specified.
:type val_f_DeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DeviceID: If op_DeviceID is specified, this value will be compared to the value in DeviceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DeviceID must be specified if op_DeviceID is specified.
:type val_c_DeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_OspfAreaBdrRtrCount: The operator to apply to the field OspfAreaBdrRtrCount. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. OspfAreaBdrRtrCount: The total number of area border routers reachable within this area. This is initially zero, and is calculated in each SPF Pass. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_OspfAreaBdrRtrCount: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_OspfAreaBdrRtrCount: If op_OspfAreaBdrRtrCount is specified, the field named in this input will be compared to the value in OspfAreaBdrRtrCount using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_OspfAreaBdrRtrCount must be specified if op_OspfAreaBdrRtrCount is specified.
:type val_f_OspfAreaBdrRtrCount: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_OspfAreaBdrRtrCount: If op_OspfAreaBdrRtrCount is specified, this value will be compared to the value in OspfAreaBdrRtrCount using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_OspfAreaBdrRtrCount must be specified if op_OspfAreaBdrRtrCount is specified.
:type val_c_OspfAreaBdrRtrCount: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_OspfAreaLsaCksumSum: The operator to apply to the field OspfAreaLsaCksumSum. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. OspfAreaLsaCksumSum: The sum of the link-state advertisements' LS checksums contained in this area's link-state database on this router. This sum excludes external (LS type 5) link-state advertisements. The sum can be used to determine if there has been a change in a router's link state database, and to compare the link-state database of two routers. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_OspfAreaLsaCksumSum: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_OspfAreaLsaCksumSum: If op_OspfAreaLsaCksumSum is specified, the field named in this input will be compared to the value in OspfAreaLsaCksumSum using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_OspfAreaLsaCksumSum must be specified if op_OspfAreaLsaCksumSum is specified.
:type val_f_OspfAreaLsaCksumSum: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_OspfAreaLsaCksumSum: If op_OspfAreaLsaCksumSum is specified, this value will be compared to the value in OspfAreaLsaCksumSum using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_OspfAreaLsaCksumSum must be specified if op_OspfAreaLsaCksumSum is specified.
:type val_c_OspfAreaLsaCksumSum: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_OspfAreaLsaCount: The operator to apply to the field OspfAreaLsaCount. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. OspfAreaLsaCount: The total number of link-state advertisements in this area's link-state database on this router, excluding AS External LSA's. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_OspfAreaLsaCount: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_OspfAreaLsaCount: If op_OspfAreaLsaCount is specified, the field named in this input will be compared to the value in OspfAreaLsaCount using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_OspfAreaLsaCount must be specified if op_OspfAreaLsaCount is specified.
:type val_f_OspfAreaLsaCount: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_OspfAreaLsaCount: If op_OspfAreaLsaCount is specified, this value will be compared to the value in OspfAreaLsaCount using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_OspfAreaLsaCount must be specified if op_OspfAreaLsaCount is specified.
:type val_c_OspfAreaLsaCount: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_OspfAreaSummaryInd: The operator to apply to the field OspfAreaSummaryInd. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. OspfAreaSummaryInd: Indicates how the router handles import of summary LSAs into stub areas. It has no effect on other areas.
If true, then the router will both summarize and propagate summary LSAs into the stub area.
Otherwise, the router will neither originate nor propagate summary LSAs. It will rely entirely on its default route. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_OspfAreaSummaryInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_OspfAreaSummaryInd: If op_OspfAreaSummaryInd is specified, the field named in this input will be compared to the value in OspfAreaSummaryInd using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_OspfAreaSummaryInd must be specified if op_OspfAreaSummaryInd is specified.
:type val_f_OspfAreaSummaryInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_OspfAreaSummaryInd: If op_OspfAreaSummaryInd is specified, this value will be compared to the value in OspfAreaSummaryInd using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_OspfAreaSummaryInd must be specified if op_OspfAreaSummaryInd is specified.
:type val_c_OspfAreaSummaryInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_OspfAsBdrRtrCount: The operator to apply to the field OspfAsBdrRtrCount. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. OspfAsBdrRtrCount: The total number of Autonomous System border routers reachable within this area, according to this router. This is initially zero, and is calculated in each SPF Pass. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_OspfAsBdrRtrCount: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_OspfAsBdrRtrCount: If op_OspfAsBdrRtrCount is specified, the field named in this input will be compared to the value in OspfAsBdrRtrCount using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_OspfAsBdrRtrCount must be specified if op_OspfAsBdrRtrCount is specified.
:type val_f_OspfAsBdrRtrCount: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_OspfAsBdrRtrCount: If op_OspfAsBdrRtrCount is specified, this value will be compared to the value in OspfAsBdrRtrCount using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_OspfAsBdrRtrCount must be specified if op_OspfAsBdrRtrCount is specified.
:type val_c_OspfAsBdrRtrCount: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_OspfAuthType: The operator to apply to the field OspfAuthType. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. OspfAuthType: The type of authentication configured on this area for this router. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_OspfAuthType: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_OspfAuthType: If op_OspfAuthType is specified, the field named in this input will be compared to the value in OspfAuthType using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_OspfAuthType must be specified if op_OspfAuthType is specified.
:type val_f_OspfAuthType: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_OspfAuthType: If op_OspfAuthType is specified, this value will be compared to the value in OspfAuthType using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_OspfAuthType must be specified if op_OspfAuthType is specified.
:type val_c_OspfAuthType: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_OspfImportAsExtern: The operator to apply to the field OspfImportAsExtern. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. OspfImportAsExtern: The type of this OSPF area, according to this router. (importExternal for a standard are, noImportExternal for a stub, and importNssa for a not-so-stubby area). For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_OspfImportAsExtern: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_OspfImportAsExtern: If op_OspfImportAsExtern is specified, the field named in this input will be compared to the value in OspfImportAsExtern using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_OspfImportAsExtern must be specified if op_OspfImportAsExtern is specified.
:type val_f_OspfImportAsExtern: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_OspfImportAsExtern: If op_OspfImportAsExtern is specified, this value will be compared to the value in OspfImportAsExtern using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_OspfImportAsExtern must be specified if op_OspfImportAsExtern is specified.
:type val_c_OspfImportAsExtern: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_OspfSpfRunsDelta: The operator to apply to the field OspfSpfRunsDelta. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. OspfSpfRunsDelta: The number of times that the intra-area route table has been calculated using this area's link-state database (typically using Dijkstra's algorithm), since the last time NetMRI polled the device. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_OspfSpfRunsDelta: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_OspfSpfRunsDelta: If op_OspfSpfRunsDelta is specified, the field named in this input will be compared to the value in OspfSpfRunsDelta using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_OspfSpfRunsDelta must be specified if op_OspfSpfRunsDelta is specified.
:type val_f_OspfSpfRunsDelta: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_OspfSpfRunsDelta: If op_OspfSpfRunsDelta is specified, this value will be compared to the value in OspfSpfRunsDelta using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_OspfSpfRunsDelta must be specified if op_OspfSpfRunsDelta is specified.
:type val_c_OspfSpfRunsDelta: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_RoutingAreaID: The operator to apply to the field RoutingAreaID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. RoutingAreaID: The internal NetMRI identifier for the routing area or autonomous system associated with this membership. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_RoutingAreaID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_RoutingAreaID: If op_RoutingAreaID is specified, the field named in this input will be compared to the value in RoutingAreaID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_RoutingAreaID must be specified if op_RoutingAreaID is specified.
:type val_f_RoutingAreaID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_RoutingAreaID: If op_RoutingAreaID is specified, this value will be compared to the value in RoutingAreaID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_RoutingAreaID must be specified if op_RoutingAreaID is specified.
:type val_c_RoutingAreaID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_RoutingAreaMemberChangedCols: The operator to apply to the field RoutingAreaMemberChangedCols. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. RoutingAreaMemberChangedCols: The fields that changed between this revision of the record and the previous revision. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_RoutingAreaMemberChangedCols: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_RoutingAreaMemberChangedCols: If op_RoutingAreaMemberChangedCols is specified, the field named in this input will be compared to the value in RoutingAreaMemberChangedCols using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_RoutingAreaMemberChangedCols must be specified if op_RoutingAreaMemberChangedCols is specified.
:type val_f_RoutingAreaMemberChangedCols: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_RoutingAreaMemberChangedCols: If op_RoutingAreaMemberChangedCols is specified, this value will be compared to the value in RoutingAreaMemberChangedCols using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_RoutingAreaMemberChangedCols must be specified if op_RoutingAreaMemberChangedCols is specified.
:type val_c_RoutingAreaMemberChangedCols: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_RoutingAreaMemberEndTime: The operator to apply to the field RoutingAreaMemberEndTime. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. RoutingAreaMemberEndTime: The ending effective time of this revision of this record, or empty if still in effect. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_RoutingAreaMemberEndTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_RoutingAreaMemberEndTime: If op_RoutingAreaMemberEndTime is specified, the field named in this input will be compared to the value in RoutingAreaMemberEndTime using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_RoutingAreaMemberEndTime must be specified if op_RoutingAreaMemberEndTime is specified.
:type val_f_RoutingAreaMemberEndTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_RoutingAreaMemberEndTime: If op_RoutingAreaMemberEndTime is specified, this value will be compared to the value in RoutingAreaMemberEndTime using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_RoutingAreaMemberEndTime must be specified if op_RoutingAreaMemberEndTime is specified.
:type val_c_RoutingAreaMemberEndTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_RoutingAreaMemberID: The operator to apply to the field RoutingAreaMemberID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. RoutingAreaMemberID: The internal NetMRI identifier for this routing area membership. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_RoutingAreaMemberID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_RoutingAreaMemberID: If op_RoutingAreaMemberID is specified, the field named in this input will be compared to the value in RoutingAreaMemberID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_RoutingAreaMemberID must be specified if op_RoutingAreaMemberID is specified.
:type val_f_RoutingAreaMemberID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_RoutingAreaMemberID: If op_RoutingAreaMemberID is specified, this value will be compared to the value in RoutingAreaMemberID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_RoutingAreaMemberID must be specified if op_RoutingAreaMemberID is specified.
:type val_c_RoutingAreaMemberID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_RoutingAreaMemberSource: The operator to apply to the field RoutingAreaMemberSource. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. RoutingAreaMemberSource: Internal tracking information for NetMRI algorithms. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_RoutingAreaMemberSource: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_RoutingAreaMemberSource: If op_RoutingAreaMemberSource is specified, the field named in this input will be compared to the value in RoutingAreaMemberSource using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_RoutingAreaMemberSource must be specified if op_RoutingAreaMemberSource is specified.
:type val_f_RoutingAreaMemberSource: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_RoutingAreaMemberSource: If op_RoutingAreaMemberSource is specified, this value will be compared to the value in RoutingAreaMemberSource using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_RoutingAreaMemberSource must be specified if op_RoutingAreaMemberSource is specified.
:type val_c_RoutingAreaMemberSource: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_RoutingAreaMemberStartTime: The operator to apply to the field RoutingAreaMemberStartTime. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. RoutingAreaMemberStartTime: The starting effective time of this revision of the record. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_RoutingAreaMemberStartTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_RoutingAreaMemberStartTime: If op_RoutingAreaMemberStartTime is specified, the field named in this input will be compared to the value in RoutingAreaMemberStartTime using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_RoutingAreaMemberStartTime must be specified if op_RoutingAreaMemberStartTime is specified.
:type val_f_RoutingAreaMemberStartTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_RoutingAreaMemberStartTime: If op_RoutingAreaMemberStartTime is specified, this value will be compared to the value in RoutingAreaMemberStartTime using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_RoutingAreaMemberStartTime must be specified if op_RoutingAreaMemberStartTime is specified.
:type val_c_RoutingAreaMemberStartTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_RoutingAreaMemberTimestamp: The operator to apply to the field RoutingAreaMemberTimestamp. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. RoutingAreaMemberTimestamp: The date and time this record was collected or calculated. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_RoutingAreaMemberTimestamp: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_RoutingAreaMemberTimestamp: If op_RoutingAreaMemberTimestamp is specified, the field named in this input will be compared to the value in RoutingAreaMemberTimestamp using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_RoutingAreaMemberTimestamp must be specified if op_RoutingAreaMemberTimestamp is specified.
:type val_f_RoutingAreaMemberTimestamp: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_RoutingAreaMemberTimestamp: If op_RoutingAreaMemberTimestamp is specified, this value will be compared to the value in RoutingAreaMemberTimestamp using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_RoutingAreaMemberTimestamp must be specified if op_RoutingAreaMemberTimestamp is specified.
:type val_c_RoutingAreaMemberTimestamp: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results.
:type DeviceGroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param timestamp: The data returned will represent the routing area members as of this date and time. If omitted, the result will indicate the most recently collected data.
:type timestamp: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of routing area member methods. The listed methods will be called on each routing area member returned and included in the output. Available methods are: data_source, device, interface, routing_area, infradevice.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: data_source, device, interface, routing_area.
:type include: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` RoutingAreaMemberID
:param sort: The data field(s) to use for sorting the output. Default is RoutingAreaMemberID. Valid values are RoutingAreaMemberID, RoutingAreaMemberStartTime, RoutingAreaMemberEndTime, RoutingAreaMemberChangedCols, RoutingAreaMemberTimestamp, RoutingAreaMemberSource, DataSourceID, DeviceID, RoutingAreaID, OspfAuthType, OspfImportAsExtern, OspfSpfRunsDelta, OspfAreaBdrRtrCount, OspfAsBdrRtrCount, OspfAreaLsaCount, OspfAreaLsaCksumSum, OspfAreaSummaryInd.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each RoutingAreaMember. Valid values are RoutingAreaMemberID, RoutingAreaMemberStartTime, RoutingAreaMemberEndTime, RoutingAreaMemberChangedCols, RoutingAreaMemberTimestamp, RoutingAreaMemberSource, DataSourceID, DeviceID, RoutingAreaID, OspfAuthType, OspfImportAsExtern, OspfSpfRunsDelta, OspfAreaBdrRtrCount, OspfAsBdrRtrCount, OspfAreaLsaCount, OspfAreaLsaCksumSum, OspfAreaSummaryInd. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return routing_area_members: An array of the RoutingAreaMember objects that match the specified input criteria.
:rtype routing_area_members: Array of RoutingAreaMember
"""
return self.api_list_request(self._get_method_fullname("find"), kwargs)
def routing_area(self, **kwargs):
"""The routing area or autonomous system associated with this membership.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param RoutingAreaMemberID: The internal NetMRI identifier for this routing area membership.
:type RoutingAreaMemberID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The routing area or autonomous system associated with this membership.
:rtype : RoutingArea
"""
return self.api_request(self._get_method_fullname("routing_area"), kwargs)
def data_source(self, **kwargs):
"""The NetMRI collector that collected this data record.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param RoutingAreaMemberID: The internal NetMRI identifier for this routing area membership.
:type RoutingAreaMemberID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The NetMRI collector that collected this data record.
:rtype : DataSource
"""
return self.api_request(self._get_method_fullname("data_source"), kwargs)
def interface(self, **kwargs):
"""The interface used to participate in this routing area.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param RoutingAreaMemberID: The internal NetMRI identifier for this routing area membership.
:type RoutingAreaMemberID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The interface used to participate in this routing area.
:rtype : Interface
"""
return self.api_request(self._get_method_fullname("interface"), kwargs)
def infradevice(self, **kwargs):
"""The device from which this routing area membership was collected.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param RoutingAreaMemberID: The internal NetMRI identifier for this routing area membership.
:type RoutingAreaMemberID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The device from which this routing area membership was collected.
:rtype : InfraDevice
"""
return self.api_request(self._get_method_fullname("infradevice"), kwargs)
def device(self, **kwargs):
"""The device from which this routing area membership was collected.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param RoutingAreaMemberID: The internal NetMRI identifier for this routing area membership.
:type RoutingAreaMemberID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return device: The device from which this routing area membership was collected.
:rtype device: DeviceConfig
"""
return self.api_request(self._get_method_fullname("device"), kwargs)
| 0 | 71,830 | 23 |