hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
38790fc9c1e1af653614fcb057c058b3e9e09917 | 3,398 | py | Python | molsysmt/api_forms/api_openmm_GromacsGroFile.py | uibcdf/MolModMTs | 4f6b6f671a9fa3e73008d1e9c48686d5f20a6573 | [
"MIT"
] | null | null | null | molsysmt/api_forms/api_openmm_GromacsGroFile.py | uibcdf/MolModMTs | 4f6b6f671a9fa3e73008d1e9c48686d5f20a6573 | [
"MIT"
] | null | null | null | molsysmt/api_forms/api_openmm_GromacsGroFile.py | uibcdf/MolModMTs | 4f6b6f671a9fa3e73008d1e9c48686d5f20a6573 | [
"MIT"
] | null | null | null | from molsysmt._private.exceptions import *
from molsysmt.item.openmm_GromacsGroFile.is_openmm_GromacsGroFile import is_openmm_GromacsGroFile as is_form
from molsysmt.item.openmm_GromacsGroFile.extract import extract
from molsysmt.item.openmm_GromacsGroFile.add import add
from molsysmt.item.openmm_GromacsGroFile.append_structures import append_structures
from molsysmt.item.openmm_GromacsGroFile.get import *
from molsysmt.item.openmm_GromacsGroFile.set import *
form_name='openmm.GromacsGroFile'
form_type='class'
form_info=["",""]
form_attributes = {
'atom_index' : True,
'atom_id' : True,
'atom_name' : True,
'atom_type' : True,
'bond_index' : True,
'bond_id' : True,
'bond_name' : True,
'bond_type' : True,
'bond_order' : True,
'group_index' : True,
'group_id' : True,
'group_name' : True,
'group_type' : True,
'component_index' : False,
'component_id' : False,
'component_name' : False,
'component_type' : False,
'molecule_index' : True,
'molecule_id' : True,
'molecule_name' : True,
'molecule_type' : True,
'chain_index' : True,
'chain_id' : True,
'chain_name' : True,
'chain_type' : True,
'entity_index' : False,
'entity_id' : False,
'entity_name' : False,
'entity_type' : False,
'coordinates' : True,
'velocities' : False,
'box' : True,
'time' : False,
'step' : False,
'forcefield_parameters' : False,
'forcefield' : False,
'temperature' : False,
'pressure' : False,
'integrator' : False,
'damping' : False,
}
def to_openmm_Topology(item, molecular_system, atom_indices='all', structure_indices='all'):
from molsysmt.item.openmm_GromacsGroFile import to_openmm_Topology as openmm_GromacsGroFile_to_openmm_Topology
tmp_item = openmm_GromacsGroFile_to_openmm_Topology(item, atom_indices=atom_indices, check=False)
return tmp_item
def to_openmm_Modeller(item, molecular_system, atom_indices='all', structure_indices='all'):
from molsysmt.item.openmm_GromacsGroFile import to_openmm_Modeller as openmm_GromacsGroFile_to_openmm_Modeller
tmp_item = openmm_GromacsGroFile_to_openmm_Modeller(item, atom_indices=atom_indices, check=False)
return tmp_item
def to_molsysmt_MolSys(item, molecular_system, atom_indices='all', structure_indices='all'):
from molsysmt.item.openmm_GromacsGroFile import to_molsysmt_MolSys as openmm_GromacsGroFile_to_molsysmt_MolSys
tmp_item = openmm_GromacsGroFile_to_molsysmt_MolSys(item, atom_indices=atom_indices, structure_indices=structure_indices, check=False)
return tmp_item
def to_molsysmt_Topology(item, molecular_system, atom_indices='all', structure_indices='all'):
from molsysmt.item.openmm_GromacsGroFile import to_molsysmt_Topology as openmm_GromacsGroFile_to_molsysmt_Topology
tmp_item = openmm_GromacsGroFile_to_molsysmt_Topology(item, atom_indices=atom_indices, structure_indices=structure_indices, check=False)
return tmp_item
def to_molsysmt_Structures(item, molecular_system, atom_indices='all', structure_indices='all'):
from molsysmt.item.openmm_GromacsGroFile import to_molsysmt_Structures as openmm_GromacsGroFile_to_molsysmt_Structures
tmp_item = openmm_GromacsGroFile_to_molsysmt_Structures(item, atom_indices=atom_indices, structure_indices=structure_indices, check=False)
return tmp_item
| 31.757009 | 142 | 0.759859 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 570 | 0.167746 |
387914e6bb3ad3988784eba28689e73be40bad2b | 523 | py | Python | simple_permutations.py | mutazag/nlpia | 14e70396b118605745c148f5e573246844687c1f | [
"MIT"
] | null | null | null | simple_permutations.py | mutazag/nlpia | 14e70396b118605745c148f5e573246844687c1f | [
"MIT"
] | null | null | null | simple_permutations.py | mutazag/nlpia | 14e70396b118605745c148f5e573246844687c1f | [
"MIT"
] | null | null | null | #%%
from itertools import permutations, product
# %%
A =['a','b']
Z = ['x','z']
[p for p in product('ab', range(3))]
[p for p in product(A,Z)]
[p for p in product(A, repeat=3)]
# %%
msg = 'Good Morning Rosa!'
msgList = msg.split()
[' '.join(p) for p in permutations(msgList,3)]
# %%
# complex phrase
s = """Find textbooks with titles containing 'NLP',
or 'natural' and 'language', or
'computational' and 'linguistics'."""
s_length = len(set(s.split()))
import numpy as np
np.arange(1,s_length+1).prod()
# %%
| 19.37037 | 51 | 0.619503 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 200 | 0.382409 |
38795101ca4281688e60964eabe22f2626c23b79 | 510 | py | Python | olea/packages/ip2loc/ip2loc.py | Pix-00/olea | 98bee1fd8866a3929f685a139255afb7b6813f31 | [
"Apache-2.0"
] | 2 | 2020-06-18T03:25:52.000Z | 2020-06-18T07:33:45.000Z | olea/packages/ip2loc/ip2loc.py | Pix-00/olea | 98bee1fd8866a3929f685a139255afb7b6813f31 | [
"Apache-2.0"
] | 15 | 2021-01-28T07:11:04.000Z | 2021-05-24T07:11:37.000Z | olea/packages/ip2loc/ip2loc.py | Pix-00/olea | 98bee1fd8866a3929f685a139255afb7b6813f31 | [
"Apache-2.0"
] | null | null | null | __all__ = ['IP2Loc']
import IP2Location
from .update_ipdb import download
class IP2Loc():
def __init__(self, app=None):
self.ip2loc = None
if app:
self.init_app(app)
def init_app(self, app):
path = app.config['IP2LOC_IPDB_PATH']
if not path.exists():
download(path, app.config['IP2LOC_DOWNLOAD_TOKEN'])
self.ip2loc = IP2Location.IP2Location(path, 'SHARED_MEMORY')
def get_city(self, ip):
return self.ip2loc.get_city(ip)
| 23.181818 | 68 | 0.631373 | 431 | 0.845098 | 0 | 0 | 0 | 0 | 0 | 0 | 64 | 0.12549 |
38796eed3ff31562e41f2d173de7660b64b06992 | 521 | py | Python | tests/BaseCase.py | YaroslavChyhryn/SchoolAPI | 6b5eb4e1faf6b962561109fc227057ad0f8d4d92 | [
"MIT"
] | null | null | null | tests/BaseCase.py | YaroslavChyhryn/SchoolAPI | 6b5eb4e1faf6b962561109fc227057ad0f8d4d92 | [
"MIT"
] | null | null | null | tests/BaseCase.py | YaroslavChyhryn/SchoolAPI | 6b5eb4e1faf6b962561109fc227057ad0f8d4d92 | [
"MIT"
] | null | null | null | import unittest
from school_api.app import create_app
from school_api.db import create_tables, drop_tables
from school_api.data_generator import test_db
class BaseCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.app = create_app('test')
def setUp(self):
drop_tables(self.app)
create_tables(self.app)
test_db(self.app)
with self.app.app_context():
self.client = self.app.test_client()
def tearDown(self):
drop_tables(self.app)
| 23.681818 | 52 | 0.685221 | 365 | 0.700576 | 0 | 0 | 74 | 0.142035 | 0 | 0 | 6 | 0.011516 |
387bfe5524cac7d7cb21e866fa774cc3212626e2 | 238 | py | Python | msgflow/logging.py | noriyukipy/smilechat | a9c0ef93c35b2a1f3e9d1700391ae865544adfbc | [
"MIT"
] | 5 | 2021-01-01T12:34:23.000Z | 2022-03-08T13:02:11.000Z | msgflow/logging.py | noriyukipy/smilechat | a9c0ef93c35b2a1f3e9d1700391ae865544adfbc | [
"MIT"
] | null | null | null | msgflow/logging.py | noriyukipy/smilechat | a9c0ef93c35b2a1f3e9d1700391ae865544adfbc | [
"MIT"
] | 2 | 2020-09-20T10:41:51.000Z | 2020-11-09T06:15:32.000Z | import json
import datetime
def print_json_log(logger_, level_, message_):
dict_ = {"level": level_, "message": message_, "time": str(datetime.datetime.now())}
json_str = json.dumps(dict_)
getattr(logger_, level_)(json_str)
| 26.444444 | 88 | 0.710084 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 22 | 0.092437 |
387c70f8ffc34a57405e03583c11907f465d14b4 | 560 | py | Python | tests/test_nace2.py | sayari-analytics/pyisic | 42ed46f5bc446a0bbc0edf30b64bc4ab939dd033 | [
"MIT"
] | 3 | 2021-11-18T15:32:38.000Z | 2022-02-28T19:16:14.000Z | tests/test_nace2.py | sayari-analytics/pyisic | 42ed46f5bc446a0bbc0edf30b64bc4ab939dd033 | [
"MIT"
] | 18 | 2021-06-28T19:17:49.000Z | 2022-03-23T20:20:18.000Z | tests/test_nace2.py | sayari-analytics/pyisic | 42ed46f5bc446a0bbc0edf30b64bc4ab939dd033 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import pytest
from pyisic import NACE2_to_ISIC4
from pyisic.types import Standards
@pytest.mark.parametrize(
"code,expected",
[
("DOESNT EXIST", set()),
("A", {(Standards.ISIC4, "A")}),
("01", {(Standards.ISIC4, "01")}),
("01.1", {(Standards.ISIC4, "011")}),
("01.11", {(Standards.ISIC4, "0111")}),
],
)
def test_naics2017_to_isic4_concordance(code: str, expected: set):
"""Test NAICS2017 to ISIC4 sample concordances."""
assert NACE2_to_ISIC4.concordant(code) == expected
| 26.666667 | 66 | 0.608929 | 0 | 0 | 0 | 0 | 449 | 0.801786 | 0 | 0 | 140 | 0.25 |
387ea40c8cd79f5bdddf23799a108f4ee84c715c | 391 | py | Python | JiaLu/learn/list_training9.py | 13022108937/homework | 05b3c0535532766b286976b15245ed1f925da8c5 | [
"Apache-2.0"
] | null | null | null | JiaLu/learn/list_training9.py | 13022108937/homework | 05b3c0535532766b286976b15245ed1f925da8c5 | [
"Apache-2.0"
] | null | null | null | JiaLu/learn/list_training9.py | 13022108937/homework | 05b3c0535532766b286976b15245ed1f925da8c5 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
def bubble_search_func(data_list):
cnt_num_all = len(data_list)
for i in range(cnt_num_all-1):
for j in range(1,cnt_num_all-i):
if(data_list[j-1]>data_list[j]):
data_list[j-1],data_list[j]=data_list[j],data_list[j-1]
data_list = [54, 25, 93, 17, 77, 31, 44, 55, 20, 10]
bubble_search_func(data_list)
print(data_list)
| 20.578947 | 71 | 0.639386 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 21 | 0.053708 |
387eb64186d9be38a6ef7eb92362e60714c12f89 | 981 | py | Python | src/custom_user_app/urls.py | JackCX777/user_polls_2 | fa8fe9ad4c1fa36b4ea5bb402b3d485852a98d3b | [
"BSD-3-Clause"
] | null | null | null | src/custom_user_app/urls.py | JackCX777/user_polls_2 | fa8fe9ad4c1fa36b4ea5bb402b3d485852a98d3b | [
"BSD-3-Clause"
] | null | null | null | src/custom_user_app/urls.py | JackCX777/user_polls_2 | fa8fe9ad4c1fa36b4ea5bb402b3d485852a98d3b | [
"BSD-3-Clause"
] | null | null | null | from django.urls import path
from custom_user_app.views import (CustomUserLoginView,
CustomUserLogoutView,
CustomUserCreationView,
CustomUserUpdateView,
CustomUserPasswordChangeView,
CustomUserPasswordChangeDoneView)
urlpatterns = [
path('login/', CustomUserLoginView.as_view(), name='user_login'),
path('logout/', CustomUserLogoutView.as_view(), name='user_logout'),
path('registration/', CustomUserCreationView.as_view(), name='user_registration'),
path('profile/<int:profile_id>', CustomUserUpdateView.as_view(), name='user_profile'),
path('password_change/<int:profile_id>', CustomUserPasswordChangeView.as_view(), name='user_password_change'),
path('password_change_done/<int:profile_id>', CustomUserPasswordChangeDoneView.as_view(),
name='password_change_done'),
]
| 51.631579 | 114 | 0.643221 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 233 | 0.237513 |
387ed07ba8ec4acd4791f1af86f446b18d2578f8 | 2,691 | py | Python | src/utils/preprocess_dataset.py | FedericoBottoni/household-poverty-classifier | 7357cc6a6c08e9cf76cdd79a04cce32a5982fa85 | [
"MIT"
] | null | null | null | src/utils/preprocess_dataset.py | FedericoBottoni/household-poverty-classifier | 7357cc6a6c08e9cf76cdd79a04cce32a5982fa85 | [
"MIT"
] | null | null | null | src/utils/preprocess_dataset.py | FedericoBottoni/household-poverty-classifier | 7357cc6a6c08e9cf76cdd79a04cce32a5982fa85 | [
"MIT"
] | null | null | null | import numpy as np
import csv as csv
from clean_data import clean_data
from join_columns import join_columns
from fix_decimals import add_int, cut_decimals
def preprocess_dataset():
preprocess_data('train', False)
preprocess_data('test', False)
preprocess_data('train', True)
preprocess_data('test', True)
def preprocess_data(data_name, encode_features):
name = data_name
raw = list()
with open("./data/raw_" + data_name + ".csv") as f:
raw_reader = csv.reader(f, delimiter=",")
for row in raw_reader:
raw.append(row)
raw = np.array(raw)
raw = clean_data(raw)
if encode_features:
raw = join_columns(raw, ["sanitario1", "sanitario2", "sanitario3", "sanitario5", "sanitario6"], ["c","c","c","c","o1"], "sanitario", [1,2,3,4], {"o1":"sanioth"})
raw = join_columns(raw, ["energcocinar1", "energcocinar2", "energcocinar3", "energcocinar4"], ["c","c","c","c"], "energcocinar", [1,4,2,3])
raw = join_columns(raw, ["elimbasu1", "elimbasu2", "elimbasu3", "elimbasu4", "elimbasu6"], ["c","c","c","c","o1"], "elimbasu", [4,3,2,1], {"o1":"elimoth"})
#raw = np.delete(raw, np.where(raw[0,:] == "elimbasu5")[0][0], axis=1) #this column has been removed inside the clean_data function since it has 0 mean and 0 variance
raw = join_columns(raw, ["epared1", "epared2", "epared3"], ["c","c","c"], "epared", [1,2,3])
raw = join_columns(raw, ["etecho1", "etecho2", "etecho3"], ["c","c","c"], "etecho", [1,2,3])
raw = join_columns(raw, ["eviv1", "eviv2", "eviv3"], ["c","c","c"], "eviv", [1,2,3])
raw = join_columns(raw, ["female", "male"], ["c","c"], "gender", [0,1])
raw = join_columns(raw, ["parentesco1", "parentesco2", "parentesco3", "parentesco4", "parentesco5", "parentesco6", "parentesco7", "parentesco8", "parentesco9", "parentesco10", "parentesco11", "parentesco12"], ["c","c","c","c","c","c","c","c","c","c","c","c"], "parentesco", [1,2,3,4,5,6,7,8,9,10,11,12])
raw = join_columns(raw, ["instlevel1", "instlevel2", "instlevel3", "instlevel4", "instlevel5", "instlevel6", "instlevel7", "instlevel8", "instlevel9"], ["c","c","c","c","c","c","c","c","c"], "instlevel", [1,2,3,4,5,6,7,8,9])
raw = join_columns(raw, ["tipovivi1", "tipovivi2", "tipovivi3", "tipovivi4", "tipovivi5"], ["c","c","c","c","o1"], "tipovivi", [1,2,3,4], {"o1":"tipooth"})
raw = join_columns(raw, ["area2", "area1"], ["c","c"], "area", [0,1])
name = name + '_enc'
raw = add_int(raw, 0)
raw = cut_decimals(raw, 2)
#saving new dataset
print('exporting ' + name + '.csv')
np.savetxt('./data/' + name + '.csv', raw, delimiter=';', fmt='%s') | 65.634146 | 311 | 0.593088 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,184 | 0.439985 |
387f0112c992c1eb1d347a52c37faadb884c7c51 | 1,691 | py | Python | helpme/migrations/0003_auto_20200901_2025.py | renderbox/django-help-me | 6efdaf715d2770305a7187c008354e0f784f9f5b | [
"MIT"
] | 1 | 2020-09-30T22:21:02.000Z | 2020-09-30T22:21:02.000Z | helpme/migrations/0003_auto_20200901_2025.py | renderbox/django-help-me | 6efdaf715d2770305a7187c008354e0f784f9f5b | [
"MIT"
] | 8 | 2020-09-11T00:50:57.000Z | 2022-03-30T22:10:45.000Z | helpme/migrations/0003_auto_20200901_2025.py | renderbox/django-help-me | 6efdaf715d2770305a7187c008354e0f784f9f5b | [
"MIT"
] | null | null | null | # Generated by Django 3.1 on 2020-09-01 20:25
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('sites', '0002_alter_domain_unique'),
('helpme', '0002_category_question'),
]
operations = [
migrations.AddField(
model_name='category',
name='excluded_sites',
field=models.ManyToManyField(blank=True, related_name='excluded_categories', to='sites.Site'),
),
migrations.AddField(
model_name='category',
name='global_category',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='question',
name='excluded_sites',
field=models.ManyToManyField(blank=True, related_name='excluded_questions', to='sites.Site'),
),
migrations.AddField(
model_name='question',
name='global_question',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='ticket',
name='question',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='helpme.question'),
),
migrations.AlterField(
model_name='category',
name='sites',
field=models.ManyToManyField(related_name='categories', to='sites.Site'),
),
migrations.AlterField(
model_name='question',
name='sites',
field=models.ManyToManyField(blank=True, related_name='questions', to='sites.Site'),
),
]
| 33.156863 | 127 | 0.595506 | 1,567 | 0.926671 | 0 | 0 | 0 | 0 | 0 | 0 | 397 | 0.234772 |
388094937263ef4639ae196c69b959037b68702e | 3,799 | py | Python | lib/surface/anthos/auth/login.py | google-cloud-sdk-unofficial/google-cloud-sdk | 2a48a04df14be46c8745050f98768e30474a1aac | [
"Apache-2.0"
] | 2 | 2019-11-10T09:17:07.000Z | 2019-12-18T13:44:08.000Z | lib/surface/anthos/auth/login.py | google-cloud-sdk-unofficial/google-cloud-sdk | 2a48a04df14be46c8745050f98768e30474a1aac | [
"Apache-2.0"
] | null | null | null | lib/surface/anthos/auth/login.py | google-cloud-sdk-unofficial/google-cloud-sdk | 2a48a04df14be46c8745050f98768e30474a1aac | [
"Apache-2.0"
] | 1 | 2020-07-25T01:40:19.000Z | 2020-07-25T01:40:19.000Z | # -*- coding: utf-8 -*- #
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Authenticate clusters using the Anthos client.."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.anthos import anthoscli_backend
from googlecloudsdk.command_lib.anthos import flags
from googlecloudsdk.command_lib.anthos.common import kube_flags
from googlecloudsdk.command_lib.anthos.common import messages
from googlecloudsdk.core import log
class Login(base.BinaryBackedCommand):
"""Authenticate clusters using the Anthos client."""
detailed_help = {
'EXAMPLES':
"""
To add credentials to default kubeconfig file:
$ {command} --cluster=testcluster --login-config=kubectl-anthos-config.yaml
To add credentials to custom kubeconfig file:
$ {command} --cluster=testcluster --login-config=kubectl-anthos-config.yaml --kubeconfig=my.kubeconfig
To generate the commands without executing them:
$ {command} --cluster=testcluster --login-config=kubectl-anthos-config.yaml --dry-run
""",
}
@staticmethod
def Args(parser):
kube_flags.GetKubeConfigFlag(
'Specifies the destination kubeconfig file '
'where credentials will be stored.').AddToParser(parser)
flags.GetUserFlag().AddToParser(parser)
flags.GetClusterFlag().AddToParser(parser)
flags.GetLoginConfigFlag().AddToParser(parser)
flags.GetLoginConfigCertFlag().AddToParser(parser)
flags.GetDryRunFlag('Print out the generated kubectl commands '
'but do not execute them.').AddToParser(parser)
flags.GetSetPreferredAuthenticationFlag().AddToParser(parser)
def Run(self, args):
command_executor = anthoscli_backend.AnthosAuthWrapper()
cluster = args.CLUSTER
# Get Default Path if flag not provided.
login_config = args.login_config or command_executor.default_config_path
# Get contents of config, parsing either URL or File.
login_config, config_contents, is_url = anthoscli_backend.GetFileOrURL(
login_config, args.login_config_cert)
# Get Preferred Auth Method and handle prompting.
force_update = args.set_preferred_auth
authmethod, ldapuser, ldappass = anthoscli_backend.GetPreferredAuthForCluster(
cluster=cluster,
login_config=login_config,
config_contents=config_contents,
force_update=force_update,
is_url=is_url)
# Log and execute binary command with flags.
log.status.Print(messages.LOGIN_CONFIG_MESSAGE)
response = command_executor(
command='login',
cluster=cluster,
kube_config=args.kubeconfig,
user=args.user,
login_config=login_config,
login_config_cert=args.login_config_cert,
dry_run=args.dry_run,
show_exec_error=args.show_exec_error,
ldap_user=ldapuser,
ldap_pass=ldappass,
preferred_auth=authmethod,
env=anthoscli_backend.GetEnvArgsForCommand(
extra_vars={'GCLOUD_AUTH_PLUGIN': 'true'}))
return anthoscli_backend.LoginResponseHandler(
response, list_clusters_only=(cluster is None))
| 38.373737 | 113 | 0.733351 | 2,690 | 0.708081 | 0 | 0 | 588 | 0.154778 | 0 | 0 | 1,570 | 0.413267 |
38823e27450525f05ac5a168826d916d3ea60ed9 | 382 | py | Python | projects/migrations/0005_alter_location_location.py | Gomax-07/gallery | 934b667d79d9a98e43648864a420cc559e9456e6 | [
"Unlicense"
] | null | null | null | projects/migrations/0005_alter_location_location.py | Gomax-07/gallery | 934b667d79d9a98e43648864a420cc559e9456e6 | [
"Unlicense"
] | null | null | null | projects/migrations/0005_alter_location_location.py | Gomax-07/gallery | 934b667d79d9a98e43648864a420cc559e9456e6 | [
"Unlicense"
] | null | null | null | # Generated by Django 3.2.7 on 2021-09-07 02:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('projects', '0004_location'),
]
operations = [
migrations.AlterField(
model_name='location',
name='location',
field=models.CharField(max_length=150),
),
]
| 20.105263 | 51 | 0.594241 | 289 | 0.756545 | 0 | 0 | 0 | 0 | 0 | 0 | 92 | 0.240838 |
3883883e7eb9a4441c27a1740a372b5eec7a6a64 | 5,491 | py | Python | src/napari_mahotas_image_processing/_function.py | haesleinhuepf/napari-mahotas-image-processing | d6c19a27b1e5acb994aa1e698692394e73f783a3 | [
"BSL-1.0",
"BSD-3-Clause"
] | 2 | 2021-11-27T02:02:25.000Z | 2021-11-28T09:55:47.000Z | src/napari_mahotas_image_processing/_function.py | haesleinhuepf/napari-mahotas-image-processing | d6c19a27b1e5acb994aa1e698692394e73f783a3 | [
"BSL-1.0",
"BSD-3-Clause"
] | null | null | null | src/napari_mahotas_image_processing/_function.py | haesleinhuepf/napari-mahotas-image-processing | d6c19a27b1e5acb994aa1e698692394e73f783a3 | [
"BSL-1.0",
"BSD-3-Clause"
] | null | null | null | import numpy as np
from napari_plugin_engine import napari_hook_implementation
from napari_tools_menu import register_function
from napari_time_slicer import time_slicer, slice_by_slice
import napari
from napari.types import ImageData, LabelsData
@napari_hook_implementation
def napari_experimental_provide_function():
return [
gaussian_blur,
threshold_otsu,
connected_component_labeling,
sobel_edge_detector,
binary_fill_holes,
seeded_watershed,
split_touching_objects,
euclidean_distance_map
]
@register_function(menu="Filtering / noise removal > Gaussian (n-mahotas)")
@time_slicer
def gaussian_blur(image:ImageData, sigma: float = 1, viewer: napari.Viewer = None) -> ImageData:
"""
Filters an image using a Gaussian kernel with a given sigma.
See also
--------
..[0] https://mahotas.readthedocs.io/en/latest/api.html#mahotas.gaussian_filter
"""
import mahotas as mh
return mh.gaussian_filter(image, sigma)
def _8bit(image):
return (image / image.max() * 255).astype(np.uint8)
@register_function(menu="Segmentation / binarization > Threshold (Otsu et al 1979, n-mahotas)")
@time_slicer
def threshold_otsu(image:ImageData, viewer: napari.Viewer = None) -> LabelsData:
"""
Thresholds an image using Otsu's technique
See also
--------
..[0] https://mahotas.readthedocs.io/en/latest/api.html#mahotas.otsu
"""
import mahotas as mh
image_8bit = _8bit(image)
t = mh.otsu(image_8bit)
return image_8bit > t
@register_function(menu="Segmentation / labeling > Connected component labeling (n-mahotas)")
@time_slicer
def connected_component_labeling(binary_image: LabelsData, viewer: napari.Viewer = None) -> LabelsData:
"""
Label connected regions in a binary image
See also
--------
..[0] https://mahotas.readthedocs.io/en/latest/api.html#mahotas.label
"""
labeled, nr_objects = mh.label(binary_image)
return labeled
@register_function(menu="Filtering / edge enhancement > Sobel edge detection (slice-by-slice, n-mahotas)")
@time_slicer
def sobel_edge_detector(image:ImageData, viewer: napari.Viewer = None) -> ImageData:
"""
Enhances edges using a sobel operator
See also
--------
..[0] https://mahotas.readthedocs.io/en/latest/api.html#mahotas.sobel
"""
import mahotas as mh
return mh.sobel(image, just_filter=True)
@register_function(menu="Segmentation post-processing > Binary fill holes (slice_by_slice, n-mahotas)")
@slice_by_slice
@time_slicer
def binary_fill_holes(binary_image:LabelsData, viewer: napari.Viewer = None) -> LabelsData:
"""
Fill holes in a binary image
See also
--------
..[0] https://mahotas.readthedocs.io/en/latest/api.html#mahotas.close_holes
"""
import mahotas as mh
return mh.close_holes(binary_image)
@register_function(menu="Segmentation / labeling > Seeded watershed (n-mahotas)")
@time_slicer
def seeded_watershed(image:ImageData, labeled_seeds:LabelsData, viewer: napari.Viewer = None) -> LabelsData:
"""
Labels all pixels in an image by flooding intensity valleys in a given image starting from labeled region seeds.
See also
--------
..[0] https://mahotas.readthedocs.io/en/latest/api.html#mahotas.cwatershed
"""
import mahotas as mh
labels = mh.cwatershed(image, labeled_seeds)
return labels
@register_function(menu="Measurement > Euclidean distance map (n-mahotas)")
@time_slicer
def euclidean_distance_map(binary_image:LabelsData, viewer: napari.Viewer = None) -> LabelsData:
"""
Draws a Euclidean distance map from a binary image. Non-zero values in th binary image will be
replaced by the distance to the next zero pixel.
See also
--------
..[0] https://en.wikipedia.org/wiki/Distance_transform
"""
import mahotas as mh
return mh.distance(binary_image)
def _sobel_3d(image):
from scipy import ndimage as ndi
kernel = np.asarray([
[
[0, 0, 0],
[0, 1, 0],
[0, 0, 0]
], [
[0, 1, 0],
[1, -6, 1],
[0, 1, 0]
], [
[0, 0, 0],
[0, 1, 0],
[0, 0, 0]
]
])
return ndi.convolve(image, kernel)
@register_function(menu="Segmentation post-processing > Split touching objects (n-mahotas)")
@time_slicer
def split_touching_objects(binary:LabelsData, sigma:float=3.5, viewer: napari.Viewer = None) -> LabelsData:
"""
Takes a binary image and draws cuts in the objects similar to the ImageJ watershed algorithm.
See also
--------
.. [0] https://imagej.nih.gov/ij/docs/menus/process.html#watershed
"""
import mahotas as mh
binary = _8bit(np.asarray(binary))
# typical way of using scikit-image watershed
distance = mh.distance(binary)
blurred_distance = mh.gaussian_filter(distance, sigma=sigma)
fp = np.ones((3,) * binary.ndim)
markers, num_labels = mh.label(mh.regmax(blurred_distance, Bc=fp))
labels = mh.cwatershed(-blurred_distance, markers)
# identify label-cutting edges
if len(binary.shape) == 2:
edges = mh.sobel(labels, just_filter=True)
edges2 = mh.sobel(binary, just_filter=True)
else: # assuming 3D
edges = _sobel_3d(labels)
edges2 = _sobel_3d(binary)
almost = np.logical_not(np.logical_xor(edges != 0, edges2 != 0)) * binary
return mh.open(almost) != 0
| 30.848315 | 116 | 0.676744 | 0 | 0 | 0 | 0 | 4,760 | 0.866873 | 0 | 0 | 2,106 | 0.383537 |
38838a2148cd8410cf38dde80c33588255de0106 | 487 | py | Python | CoquoBot/order_manager.py | Josef212/CoquoBot | adb9744b04454a4591237937dfb2c9f00da30077 | [
"MIT"
] | null | null | null | CoquoBot/order_manager.py | Josef212/CoquoBot | adb9744b04454a4591237937dfb2c9f00da30077 | [
"MIT"
] | null | null | null | CoquoBot/order_manager.py | Josef212/CoquoBot | adb9744b04454a4591237937dfb2c9f00da30077 | [
"MIT"
] | null | null | null | from order import Order
class OrderManager:
def __init__(self):
self.orders = {}
def user_has_any_order(self, chat_id: int, user: str) -> bool:
order = self.get_order(chat_id)
return order.user_has_any_order(user)
def get_order(self, id: int) -> Order:
if id not in self.orders:
self.orders[id] = Order()
return self.orders[id]
def reset_order(self, id: int) -> None:
self.get_order(id).reset() | 27.055556 | 66 | 0.601643 | 462 | 0.948665 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
388501d208ae63f4dc2d1e7114cc8996d14643dd | 2,576 | py | Python | tests/binary/run.py | learnflexswitch/pyangbind | 7b39fec6806b516c442f920a8396d2e1fa9c36b1 | [
"Apache-2.0"
] | 1 | 2021-07-15T18:12:28.000Z | 2021-07-15T18:12:28.000Z | tests/binary/run.py | ktbyers/pyangbind | 39f9f8d842c66dde784c45369ea7b280f375401a | [
"Apache-2.0"
] | null | null | null | tests/binary/run.py | ktbyers/pyangbind | 39f9f8d842c66dde784c45369ea7b280f375401a | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import os, sys, getopt
TESTNAME="binary"
# generate bindings in this folder
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "k", ["keepfiles"])
except getopt.GetoptError as e:
print str(e)
sys.exit(127)
k = False
for o, a in opts:
if o in ["-k", "--keepfiles"]:
k = True
pyangpath = os.environ.get('PYANGPATH') if os.environ.get('PYANGPATH') is not None else False
pyangbindpath = os.environ.get('PYANGBINDPATH') if os.environ.get('PYANGBINDPATH') is not None else False
assert not pyangpath == False, "could not find path to pyang"
assert not pyangbindpath == False, "could not resolve pyangbind directory"
this_dir = os.path.dirname(os.path.realpath(__file__))
os.system("%s --plugindir %s -f pybind -o %s/bindings.py %s/%s.yang" % (pyangpath, pyangbindpath, this_dir, this_dir, TESTNAME))
from bindings import binary as b
from bitarray import bitarray
t = b()
for i in ["b1", "b2", "b3"]:
assert hasattr(t.container, i), "element did not exist in container (%s)" \
% i
for value in [("01110", True, [False, True, True, True, False],), \
({"42": 42}, True, [True]), \
]:
passed = True
try:
t.container.b1 = value[0]
except:
passed = False
assert passed == value[1], "could incorrectly set b1 to %s" % value[0]
assert t.container.b2._default == bitarray("0100"), \
"Default for leaf b2 was not set correctly (%s != %s)" \
% (t.container.b2._default, bitarray("0100"))
assert t.container.b2 == bitarray(), \
"Value of bitarray was not null when checking b2 (%s != %s)" \
% (t.container.b2, bitarray())
assert t.container.b2._changed() == False, \
"Unset bitarray specified changed when was default (%s != False)" \
% (t.container.b2._changed())
t.container.b2 = bitarray("010")
assert t.container.b2 == bitarray('010'), \
"Bitarray not successfuly set (%s != %s)" % (t.container.b2, bitarray('010'))
assert t.container.b2._changed() == True, \
"Bitarray value not flagged as changed (%s != %s)" % (t.container.b2._changed(), True)
for v in [("0", True), ("01", True), ("010", False)]:
try:
t.container.b3 = v[0]
passed = True
except ValueError:
passed = False
assert passed == v[1], "limited length binary incorrectly set to %s (%s != %s)" \
% (v[0], v[1], passed)
if not k:
os.system("/bin/rm %s/bindings.py" % this_dir)
os.system("/bin/rm %s/bindings.pyc" % this_dir)
if __name__ == '__main__':
main()
| 33.454545 | 130 | 0.615295 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 793 | 0.307842 |
38859de8b56eff173435e0d0952d25f450ef8119 | 1,024 | py | Python | ScoutingBox/Box/admin.py | JoannaNitek/ScoutingBox | a49d12abad5aec4bccdbb8efc627f3403c4e9b4c | [
"MIT"
] | null | null | null | ScoutingBox/Box/admin.py | JoannaNitek/ScoutingBox | a49d12abad5aec4bccdbb8efc627f3403c4e9b4c | [
"MIT"
] | 9 | 2019-08-06T02:08:30.000Z | 2022-02-10T08:48:19.000Z | ScoutingBox/Box/admin.py | JoannaNitek/ScoutingBox | a49d12abad5aec4bccdbb8efc627f3403c4e9b4c | [
"MIT"
] | 2 | 2019-09-16T18:45:08.000Z | 2019-09-18T17:00:42.000Z | from django.contrib import admin
# Register your models here.
from Box.models import Player, ObservationList, Comments, ObservationForm
class PlayerAdmin(admin.ModelAdmin):
list_display = ['first_name', 'last_name', 'year_of_birth', 'club', 'position', 'status', 'mail', 'phone', 'agent']
class ObservationListAdmin(admin.ModelAdmin):
list_display = ['date', 'match', 'city', 'country', 'scout']
class CommentsAdmin(admin.ModelAdmin):
list_display = ['comment', 'player', 'date']
class ObservationFormAdmin(admin.ModelAdmin):
list_display = ['scout', 'player', 'observation', 'first_desc',
'second_desc', 'third_desc', 'fourth_desc', 'fifth_desc', 'sixth_desc',
'one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine', 'ten', 'eleven']
admin.site.register(Player, PlayerAdmin),
admin.site.register(ObservationList, ObservationListAdmin),
admin.site.register(Comments, CommentsAdmin),
admin.site.register(ObservationForm, ObservationFormAdmin) | 36.571429 | 119 | 0.699219 | 667 | 0.651367 | 0 | 0 | 0 | 0 | 0 | 0 | 337 | 0.329102 |
3885d8be7539352dd2a853eb75507e09353f7e06 | 992 | py | Python | activity_1/src/testTimeExecution.py | lucasguesserts/MO824A-combinatorial-optimization | a88569e4496c0ed4f89a4e8bac7ab8f42f6cb7d4 | [
"MIT"
] | null | null | null | activity_1/src/testTimeExecution.py | lucasguesserts/MO824A-combinatorial-optimization | a88569e4496c0ed4f89a4e8bac7ab8f42f6cb7d4 | [
"MIT"
] | null | null | null | activity_1/src/testTimeExecution.py | lucasguesserts/MO824A-combinatorial-optimization | a88569e4496c0ed4f89a4e8bac7ab8f42f6cb7d4 | [
"MIT"
] | null | null | null | from datetime import datetime
from CompanyProblemSolver import CompanyProblemSolver
class TimedSolution:
def __init__(self, J):
time_start = datetime.now()
solver = CompanyProblemSolver(J, displayProgress=True)
time_end = datetime.now()
self.duration = float((time_end - time_start).total_seconds())
self.J = J
self.numberOfVariables = solver.numberOfVariables
self.numberOfConstraints = solver.numberOfConstraints
self.cost = solver.cost
def __str__(self):
asDict = {
"problem size (J)": self.J,
"number of variables": self.numberOfVariables,
"number of constraints": self.numberOfConstraints,
"cost": self.cost,
"execution time [s]": self.duration
}
return asDict.__repr__()
if __name__ == "__main__":
problemSizes = range(100, 200+1, 100)
for J in problemSizes:
solution = TimedSolution(J)
print(solution)
| 33.066667 | 70 | 0.638105 | 749 | 0.75504 | 0 | 0 | 0 | 0 | 0 | 0 | 98 | 0.09879 |
38883a0e498b070f2e3d5997d8486a1d81c565d7 | 2,528 | py | Python | app/repository/model.py | maestro-server/data-app | cde6479cc84fe410220b34742772d5017571e3d3 | [
"Apache-2.0"
] | null | null | null | app/repository/model.py | maestro-server/data-app | cde6479cc84fe410220b34742772d5017571e3d3 | [
"Apache-2.0"
] | 1 | 2019-11-21T17:06:31.000Z | 2019-11-21T17:06:31.000Z | app/repository/model.py | maestro-server/data-app | cde6479cc84fe410220b34742772d5017571e3d3 | [
"Apache-2.0"
] | null | null | null | import datetime
import re
from app import db
from bson.objectid import ObjectId
from pymongo import InsertOne, UpdateOne
from pymongo.errors import BulkWriteError
from app.error.factoryInvalid import FactoryInvalid
class Model(object):
def __init__(self, id=None, name=None):
if name is None:
name = self.__class__.__name__.lower()
self.col = db[name]
self.__id = id
def getAll(self, filter={}, limit=10, skip=0):
result = self.col.find(filter) \
.limit(limit) \
.skip(skip)
return list(result)
def count(self, filter={}):
return self.col.count(filter)
def get(self):
return self.col.find_one(Model.makeObjectId(self.__id))
def update(self, data):
if not self.__id:
return FactoryInvalid.responseInvalid(
{'msg': 'Id not setted'},
422)
setUpdatedData = {'$set': data}
result = self.col.update_one(Model.makeObjectId(self.__id), setUpdatedData)
return result.raw_result
def updateMany(self, filters, data):
setUpdatedData = {'$set': data}
result = self.col.update_many(filters, setUpdatedData)
return result.raw_result
def batch_process(self, data):
requests = []
for item in data:
obj = {**Model.makeDateAt(key='updated_at'), **item['data']}
if item['filter']:
args = Model.reservedWordMongo(obj)
cal = UpdateOne(item['filter'], args, upsert=True)
else:
obj = {**Model.makeDateAt(key='created_at'), **obj}
cal = InsertOne(obj)
requests.append(cal)
try:
result = self.col.bulk_write(requests, ordered=False)
except BulkWriteError as bwe:
print(bwe.details)
raise
return result.bulk_api_result
@staticmethod
def makeDateAt(key):
return {key: datetime.datetime.utcnow()}
@staticmethod
def reservedWordMongo(obj):
filter = {'$set': {}}
for key, item in obj.items():
if item is not None:
if re.match(r"\$", key):
filter[key] = item
else:
filter['$set'][key] = item
return filter
@staticmethod
def makeObjectId(id):
if id:
return {'_id': Model.castObjectId(id)}
@staticmethod
def castObjectId(id):
return ObjectId(id)
| 27.478261 | 83 | 0.567642 | 2,310 | 0.913766 | 0 | 0 | 577 | 0.228244 | 0 | 0 | 100 | 0.039557 |
38888f481bd14c8f69369fc17765ba947e3ae62e | 9,087 | py | Python | python_lib/mitxgraders/comparers/linear_comparer.py | haharay/python_lib | 8acfc634ceb1943da5163c81b79bad126b27212f | [
"MIT"
] | 17 | 2018-06-20T19:38:13.000Z | 2021-12-31T19:52:52.000Z | python_lib/mitxgraders/comparers/linear_comparer.py | haharay/python_lib | 8acfc634ceb1943da5163c81b79bad126b27212f | [
"MIT"
] | 282 | 2017-11-07T13:34:03.000Z | 2022-03-26T04:25:20.000Z | python_lib/mitxgraders/comparers/linear_comparer.py | haharay/python_lib | 8acfc634ceb1943da5163c81b79bad126b27212f | [
"MIT"
] | 7 | 2018-06-05T23:27:00.000Z | 2022-03-26T08:02:50.000Z | from __future__ import print_function, division, absolute_import, unicode_literals
from numbers import Number
import numpy as np
from voluptuous import Schema, Required, Any, Range
from mitxgraders.comparers.baseclasses import CorrelatedComparer
from mitxgraders.helpers.calc.mathfuncs import is_nearly_zero
from mitxgraders.helpers.validatorfuncs import text_string
from mitxgraders.exceptions import ConfigError
def get_linear_fit_error(x, y):
"""
Get total error in a linear regression y = ax + b between samples x and y.
If x is constant, returns the result of get_offset_fit_error(x, y).
Arguments:
x, y: flat numpy array
Usage
=====
Zero error in a linear relationship:
>>> x = np.array([2, 5, 8])
>>> result = get_linear_fit_error(x, 2*x + 1)
>>> round(result, 6)
0.0
If x is constant and y is constant, they are considered linearly related
>>> x = np.array([1, 1, 1])
>>> result = get_linear_fit_error(x, 2*x + 1)
>>> round(result, 6)
0.0
If x is constant but y is not, the error associated with the best fit of a constant is computed
>>> x = np.array([1, 1, 1])
>>> y = np.array([0, 1, 2])
>>> result = get_linear_fit_error(x, y)
>>> result == np.sqrt(2)
True
"""
A = np.vstack([x, np.ones(len(x))]).T
coeffs, residuals, rank, singular_vals = np.linalg.lstsq(A, y, rcond=-1)
if rank == 1:
# The input values x are constant. Return the linear offset error.
return get_offset_fit_error(x, y)
return np.sqrt(residuals.item())
def get_proportional_fit_error(x, y):
"""
Get total error in a linear regression y = ax between samples x and y, with
zero constant term.
Arguments:
x, y: flat numpy array
Usage
=====
Reveals error if relationship is not proportional:
>>> x = np.array([2, 5, 8])
>>> result = get_proportional_fit_error(x, 2*x + 1)
>>> result # doctest: +ELLIPSIS
0.76200...
Zero error in a proportional relationship:
>>> result = get_proportional_fit_error(x, 2*x)
>>> round(result, 6)
0.0
If x is constant and y is constant, they are considered proportional
>>> x = np.array([1, 1, 1])
>>> result = get_proportional_fit_error(x, 2*x)
>>> round(result, 6)
0.0
If x is constant but y is not, the error associated with the best fit of a constant is computed
>>> x = np.array([1, 1, 1])
>>> y = np.array([0, 1, 2])
>>> result = get_proportional_fit_error(x, y)
>>> result == np.sqrt(2)
True
"""
A = np.vstack(x)
coeffs, residuals, rank, singular_vals = np.linalg.lstsq(A, y, rcond=-1)
return np.sqrt(residuals.item())
def get_offset_fit_error(x, y):
"""
Get total error in a linear regression y = x + b between samples x and y,
with slope term equal to 1.
Arguments:
x, y: flat numpy array
Usage
=====
Reveals error if relationship is not constant-offset:
>>> x = np.array([2, 5, 8])
>>> result = get_offset_fit_error(x, 2*x + 1)
>>> result # doctest: +ELLIPSIS
4.242640...
Zero error in a constant-offset relationship:
>>> result = get_offset_fit_error(x, x + 5)
>>> round(result, 6)
0.0
"""
mean = np.mean(y - x)
return np.sqrt(sum(np.square(x + mean - y)))
def get_equals_fit_error(x, y):
"""
Get total error in the difference between two samples.
Arguments:
x, y: compatible numpy arrays
"""
return np.sqrt(sum(np.square(x - y)))
class LinearComparer(CorrelatedComparer):
"""
Used to check that there is an linear relationship between student's input
and the expected answer.
The general linear relationship is expected = a * student + b. The comparer
can check for four subtypes:
equals: (a, b) = (1, 0)
proportional: b = 0
offset: a = 1
linear: neither a nor b fixed
Configuration
=============
The first four configuration keys determine the amount of partial credit
given for a specific type of linear relationship. If set to None, the
relationship is not checked.
equals (None | number): defaults to 1.0
proportional (None | number): defaults to 0.5
offset (None | number): defaults to None
linear (None | number): defaults to None
The remaining configuration keys specify a feedback message to be given
in each case:
equals_msg (str): defaults to ''
proportional_msg (str): defaults to 'The submitted answer differs from
an expected answer by a constant factor.'
offset_msg (str): defaults to ''
linear_msg (str): defaults to ''
NOTE:
LinearComparer can be used with MatrixGrader, but the linear
relationship must be the same for all entries. Essentially, this means
we test for
expected_array = sclar_a * expected_array + scalar_b * ONES
where ONES is a matrix of all ones.
The ONES offset works as expected for vectors, but is probably not what
you want for matrices.
"""
schema_config = Schema({
Required('equals', default=1.0): Any(None, Range(0, 1)),
Required('proportional', default=0.5): Any(None, Range(0, 1)),
Required('offset', default=None): Any(None, Range(0, 1)),
Required('linear', default=None): Any(None, Range(0, 1)),
Required('equals_msg', default=''): text_string,
Required('proportional_msg', default=(
'The submitted answer differs from an expected answer by a '
'constant factor.'
)): text_string,
Required('offset_msg', default=''): text_string,
Required('linear_msg', default=''): text_string,
})
all_modes = ('equals', 'proportional', 'offset', 'linear')
zero_compatible_modes = ('equals', 'offset')
def __init__(self, config=None, **kwargs):
super(LinearComparer, self).__init__(config, **kwargs)
self.modes = tuple(mode for mode in self.all_modes if self.config[mode] is not None)
error_calculators = {
'equals': get_equals_fit_error,
'proportional': get_proportional_fit_error,
'offset': get_offset_fit_error,
'linear': get_linear_fit_error,
}
@staticmethod
def check_comparing_zero(comparer_params_evals, student_evals, tolerance):
"""
Check whether student input is nearly zero, or author input is exactly zero
"""
student_zero = all([
is_nearly_zero(x, tolerance, reference=y)
for x, y in zip(student_evals, comparer_params_evals)
])
expected_zero = all(np.all(x == 0.0) for [x] in comparer_params_evals)
return student_zero or expected_zero
def get_valid_modes(self, is_comparing_zero):
"""
Returns a copy of self.modes, first removing 'proportional' and 'linear'
when is_comparing_zero is truthy.
"""
if is_comparing_zero:
return tuple(mode for mode in self.modes
if mode in self.zero_compatible_modes)
return self.modes
def __call__(self, comparer_params_evals, student_evals, utils):
student_evals_norm = np.linalg.norm(student_evals)
# Validate student input shape...only needed for MatrixGrader
if hasattr(utils, 'validate_shape'):
# in numpy, scalars have empty tuples as their shapes
expected_0 = comparer_params_evals[0][0]
scalar_expected = isinstance(expected_0, Number)
shape = tuple() if scalar_expected else expected_0.shape
utils.validate_shape(student_evals[0], shape)
# Raise an error if there is less than 3 samples
if len(student_evals) < 3:
msg = 'Cannot perform linear comparison with less than 3 samples'
raise ConfigError(msg)
is_comparing_zero = self.check_comparing_zero(comparer_params_evals,
student_evals, utils.tolerance)
filtered_modes = self.get_valid_modes(is_comparing_zero)
# Get the result for each mode
# flatten in case individual evals are arrays (as in MatrixGrader)
student = np.array(student_evals).flatten()
expected = np.array(comparer_params_evals).flatten()
errors = [self.error_calculators[mode](student, expected) for mode in filtered_modes]
results = [
{'grade_decimal': self.config[mode], 'msg': self.config[mode+'_msg']}
if is_nearly_zero(error, utils.tolerance, reference=student_evals_norm)
else
{'grade_decimal': 0, 'msg': ''}
for mode, error in zip(filtered_modes, errors)
]
# Get the best result using max.
# For a list of pairs, max compares by 1st index and uses 2nd to break ties
key = lambda result: (result['grade_decimal'], result['msg'])
return max(results, key=key)
| 36.939024 | 99 | 0.630241 | 5,472 | 0.602179 | 0 | 0 | 484 | 0.053263 | 0 | 0 | 5,109 | 0.562232 |
38898650b0417419ec2e1d168eb7f6f230735290 | 642 | py | Python | 2017/day8-2.py | alvaropp/AdventOfCode2017 | 2827dcc18ecb9ad59a1a5fe11e469f31bafb74ad | [
"MIT"
] | null | null | null | 2017/day8-2.py | alvaropp/AdventOfCode2017 | 2827dcc18ecb9ad59a1a5fe11e469f31bafb74ad | [
"MIT"
] | null | null | null | 2017/day8-2.py | alvaropp/AdventOfCode2017 | 2827dcc18ecb9ad59a1a5fe11e469f31bafb74ad | [
"MIT"
] | null | null | null | filename = "day8.txt"
ops = {"inc": "+=", "dec": "-="}
# Initialise registers to zero
regs = {}
with open(filename) as f:
for line in f.readlines():
data = line.split(' ')
reg = data[0]
if reg not in regs:
regs[reg] = 0
# Follow the instructions
maxReg = 0
with open(filename) as f:
for line in f.readlines():
reg, op, value, _, condReg, condOp, condValue = line.split(' ')
if eval(str(regs[condReg])+condOp+condValue):
exec("regs['{}']".format(reg) + ops[op] + value)
if regs[reg] > maxReg:
maxReg = regs[reg]
print("Result = ", maxReg)
| 25.68 | 71 | 0.543614 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 112 | 0.174455 |
388a33a4c640a949c0d9f3e5677661be8943cc55 | 4,755 | py | Python | tests/components/unifi/test_services.py | rahulsinghsss/core | 1156275db4e53a556ef58bb2038ae7d8ad103556 | [
"Apache-2.0"
] | 1 | 2021-12-30T09:37:48.000Z | 2021-12-30T09:37:48.000Z | tests/components/unifi/test_services.py | rahulsinghsss/core | 1156275db4e53a556ef58bb2038ae7d8ad103556 | [
"Apache-2.0"
] | 18 | 2021-11-03T06:21:46.000Z | 2022-03-31T06:21:15.000Z | tests/components/unifi/test_services.py | rahulsinghsss/core | 1156275db4e53a556ef58bb2038ae7d8ad103556 | [
"Apache-2.0"
] | 1 | 2021-12-30T09:37:53.000Z | 2021-12-30T09:37:53.000Z | """deCONZ service tests."""
from unittest.mock import Mock, patch
from homeassistant.components.unifi.const import DOMAIN as UNIFI_DOMAIN
from homeassistant.components.unifi.services import (
SERVICE_REMOVE_CLIENTS,
UNIFI_SERVICES,
async_setup_services,
async_unload_services,
)
from .test_controller import setup_unifi_integration
async def test_service_setup(hass):
"""Verify service setup works."""
assert UNIFI_SERVICES not in hass.data
with patch(
"homeassistant.core.ServiceRegistry.async_register", return_value=Mock(True)
) as async_register:
await async_setup_services(hass)
assert hass.data[UNIFI_SERVICES] is True
assert async_register.call_count == 1
async def test_service_setup_already_registered(hass):
"""Make sure that services are only registered once."""
hass.data[UNIFI_SERVICES] = True
with patch(
"homeassistant.core.ServiceRegistry.async_register", return_value=Mock(True)
) as async_register:
await async_setup_services(hass)
async_register.assert_not_called()
async def test_service_unload(hass):
"""Verify service unload works."""
hass.data[UNIFI_SERVICES] = True
with patch(
"homeassistant.core.ServiceRegistry.async_remove", return_value=Mock(True)
) as async_remove:
await async_unload_services(hass)
assert hass.data[UNIFI_SERVICES] is False
assert async_remove.call_count == 1
async def test_service_unload_not_registered(hass):
"""Make sure that services can only be unloaded once."""
with patch(
"homeassistant.core.ServiceRegistry.async_remove", return_value=Mock(True)
) as async_remove:
await async_unload_services(hass)
assert UNIFI_SERVICES not in hass.data
async_remove.assert_not_called()
async def test_remove_clients(hass, aioclient_mock):
"""Verify removing different variations of clients work."""
clients = [
{
"first_seen": 100,
"last_seen": 500,
"mac": "00:00:00:00:00:01",
},
{
"first_seen": 100,
"last_seen": 1100,
"mac": "00:00:00:00:00:02",
},
{
"first_seen": 100,
"last_seen": 500,
"fixed_ip": "1.2.3.4",
"mac": "00:00:00:00:00:03",
},
{
"first_seen": 100,
"last_seen": 500,
"hostname": "hostname",
"mac": "00:00:00:00:00:04",
},
{
"first_seen": 100,
"last_seen": 500,
"name": "name",
"mac": "00:00:00:00:00:05",
},
]
config_entry = await setup_unifi_integration(
hass, aioclient_mock, clients_all_response=clients
)
controller = hass.data[UNIFI_DOMAIN][config_entry.entry_id]
aioclient_mock.clear_requests()
aioclient_mock.post(
f"https://{controller.host}:1234/api/s/{controller.site}/cmd/stamgr",
)
await hass.services.async_call(UNIFI_DOMAIN, SERVICE_REMOVE_CLIENTS, blocking=True)
assert aioclient_mock.mock_calls[0][2] == {
"cmd": "forget-sta",
"macs": ["00:00:00:00:00:01"],
}
async def test_remove_clients_controller_unavailable(hass, aioclient_mock):
"""Verify no call is made if controller is unavailable."""
clients = [
{
"first_seen": 100,
"last_seen": 500,
"mac": "00:00:00:00:00:01",
}
]
config_entry = await setup_unifi_integration(
hass, aioclient_mock, clients_all_response=clients
)
controller = hass.data[UNIFI_DOMAIN][config_entry.entry_id]
controller.available = False
aioclient_mock.clear_requests()
aioclient_mock.post(
f"https://{controller.host}:1234/api/s/{controller.site}/cmd/stamgr",
)
await hass.services.async_call(UNIFI_DOMAIN, SERVICE_REMOVE_CLIENTS, blocking=True)
assert aioclient_mock.call_count == 0
async def test_remove_clients_no_call_on_empty_list(hass, aioclient_mock):
"""Verify no call is made if no fitting client has been added to the list."""
clients = [
{
"first_seen": 100,
"last_seen": 1100,
"mac": "00:00:00:00:00:01",
}
]
config_entry = await setup_unifi_integration(
hass, aioclient_mock, clients_all_response=clients
)
controller = hass.data[UNIFI_DOMAIN][config_entry.entry_id]
aioclient_mock.clear_requests()
aioclient_mock.post(
f"https://{controller.host}:1234/api/s/{controller.site}/cmd/stamgr",
)
await hass.services.async_call(UNIFI_DOMAIN, SERVICE_REMOVE_CLIENTS, blocking=True)
assert aioclient_mock.call_count == 0
| 31.282895 | 87 | 0.646477 | 0 | 0 | 0 | 0 | 0 | 0 | 4,383 | 0.921767 | 1,225 | 0.257624 |
388a3e1c967bd69504b07a1ff1bfdb07f5722281 | 632 | py | Python | {{cookiecutter.directory_name}}/config/settings/development.py | ragnarok22/cookiecutter-django | 082196dde5ad932bf99bee138dc80de8c3823e03 | [
"Apache-2.0"
] | 2 | 2021-07-23T18:58:49.000Z | 2022-02-23T18:44:40.000Z | {{cookiecutter.directory_name}}/config/settings/development.py | ragnarok22/cookiecutter-django | 082196dde5ad932bf99bee138dc80de8c3823e03 | [
"Apache-2.0"
] | null | null | null | {{cookiecutter.directory_name}}/config/settings/development.py | ragnarok22/cookiecutter-django | 082196dde5ad932bf99bee138dc80de8c3823e03 | [
"Apache-2.0"
] | null | null | null | """
This is the settings file that you use when you're working on the project locally.
Local development-specific include DEBUG mode, log level, and activation of developer tools like django-debug-toolsbar
"""
from .base import *
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'qov#ce&bl3z8@ymehv1byt^beru%el-0wjo%e#1q8#og6331ik'
ALLOWED_HOSTS = ['*']
MEDIA_ROOT = os.path.join(BASE_DIR, '{{cookiecutter.directory_name}}', 'media')
# email settings
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
| 31.6 | 118 | 0.764241 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 499 | 0.789557 |
388b168ea670204a5943c2c7112d806beb501cfa | 3,094 | py | Python | analysis_scripts/prose_CCLE_PE2_evidence.py | bwbio/PROSE | 1622396e76e0e293ccff85786d1a5974c4fc3c94 | [
"MIT"
] | null | null | null | analysis_scripts/prose_CCLE_PE2_evidence.py | bwbio/PROSE | 1622396e76e0e293ccff85786d1a5974c4fc3c94 | [
"MIT"
] | null | null | null | analysis_scripts/prose_CCLE_PE2_evidence.py | bwbio/PROSE | 1622396e76e0e293ccff85786d1a5974c4fc3c94 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Mon Oct 25 05:21:45 2021
@author: bw98j
"""
import prose as pgx
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import seaborn as sns
import numpy as np
import itertools
import glob
import os
from tqdm import tqdm
import scipy.stats
import gtfparse
import itertools
from pylab import *
import collections
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import quantile_transform
import pickle
import re
#plot parameters
plt.rcParams['mathtext.fontset'] = 'custom'
plt.rcParams['mathtext.it'] = 'Arial:italic'
plt.rcParams['mathtext.rm'] = 'Arial'
plt.rc('font',family='arial',size=40)
plt.rc('hatch',linewidth = 2.0)
#%% Read formatted matrices
score = pd.read_csv('ccle/ccle_prose_formatted.tsv.gz', sep='\t').drop_duplicates('cell_line')
#%%
pe=glob.glob('databases/nextprot*')
pedict = {}
for i in pe:
pe = i.split('_')[3]
df = pd.read_csv(i)
pro = [i.split('_')[-1] for i in df.values.T[0]]
pedict[pe] = set(pro)
#%%
score = score.sort_values(by='tissue')
df = score[list(score.columns.intersection(pedict['PE2']))]
data = df.mean().sort_values(ascending=True)
fig, ax = plt.subplots(figsize=[10,9])
g = sns.scatterplot(y=df.mean().sort_values(ascending=True),
x=df.mean().sort_values(ascending=True).rank(),
)
sns.despine()
plt.ylabel('Mean PROSE score', labelpad=10)
plt.xlabel('rank')
plt.text(s='Protein',x=1.4,y=.95,size=30,ha='right',transform = ax.transAxes, weight='bold')
plt.text(s='Score',x=1.45,y=.95,size=30,ha='left',transform = ax.transAxes,weight='bold')
highscore = df.mean().sort_values(ascending=False)[:10]
for p,m,i in zip(highscore.index, round(highscore,3),range(len(highscore))):
print(p,m)
plt.text(s=m,x=1.45,y=.85-i*0.08, ha='left',
size=30,transform = ax.transAxes)
plt.text(s=p,x=1.4,y=.85-i*0.08, ha='right',
size=30,transform = ax.transAxes)
plt.savefig('plots/CCLE_PE2_rank.png',
format='png', dpi=600, bbox_inches='tight')
data.to_csv('source_data/Fig S4a (PE2 rank plot).csv')
#%%
cmap = sns.diverging_palette(9, 255, as_cmap=True)
g = sns.clustermap(data=df[highscore.index].T,
cmap=cmap,
vmin=0,vmax=2,center=0,
figsize=[12,10],
xticklabels=False,yticklabels=True,
dendrogram_ratio=0.1,
row_cluster=False,col_cluster=False,
cbar_kws={"orientation": "horizontal", 'aspect':50},
)
ax = g.ax_heatmap
ax.set_xlabel('cell lines',size=40,labelpad=10)
g.cax.set_position([.45, -0.08, .3, .02])
ax.text(x=0.3,y=-0.2,s='PROSE score',ha='center',size=40, transform = ax.transAxes)
g.savefig('plots/CCLE_PE2_heatmap.png',
format='png', dpi=600, bbox_inches='tight')
df[highscore.index].T.to_csv('source_data/Fig S4b (PE2 matrix).csv')
| 27.380531 | 95 | 0.627666 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 626 | 0.202327 |
388c31bf17059d83c5f152ced39c6c1dfbd21371 | 428 | py | Python | python/play.py | banza-group/2048 | 90dcabf2b04e8b6ffba24e3a93d4dc5b16c5605b | [
"MIT"
] | 2 | 2019-03-08T03:42:31.000Z | 2019-03-08T03:42:34.000Z | python/play.py | banza-group/2048 | 90dcabf2b04e8b6ffba24e3a93d4dc5b16c5605b | [
"MIT"
] | 1 | 2019-03-15T19:30:12.000Z | 2019-03-15T19:30:12.000Z | python/play.py | banza-group/2048 | 90dcabf2b04e8b6ffba24e3a93d4dc5b16c5605b | [
"MIT"
] | 1 | 2020-10-26T01:29:22.000Z | 2020-10-26T01:29:22.000Z | """Play the game."""
import engine
import numpy as np
board = engine.createBoard(4)
message1 = "Use the keys to move (L)eft (R)ight (U)p (D)own."
message2 = "Press (Q) to quit:"
while True:
print(np.array(engine.showBoard(board)))
inputValue = input("{} {} ".format(message1, message2))
inputValue = str(inputValue).upper()
if inputValue == "Q":
break
else:
engine.move(inputValue, board)
| 23.777778 | 61 | 0.63785 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 101 | 0.235981 |
388c4200cdb6866a189312d1ba76f6d3bb459c3a | 758 | py | Python | pyorient/ogm/commands.py | spy7/pyorient | ac2547287f9299f4eec350666da3b19797872f20 | [
"Apache-2.0"
] | 142 | 2015-01-12T06:34:59.000Z | 2022-01-19T10:34:30.000Z | pyorient/ogm/commands.py | spy7/pyorient | ac2547287f9299f4eec350666da3b19797872f20 | [
"Apache-2.0"
] | 238 | 2015-01-04T21:05:41.000Z | 2021-04-12T17:45:53.000Z | pyorient/ogm/commands.py | spy7/pyorient | ac2547287f9299f4eec350666da3b19797872f20 | [
"Apache-2.0"
] | 107 | 2015-01-03T03:33:17.000Z | 2021-12-07T16:48:48.000Z | from ..utils import to_str
class VertexCommand(object):
def __init__(self, command_text):
self.command_text = command_text
def __str__(self):
return to_str(self.__unicode__())
def __unicode__(self):
return u'{}'.format(self.command_text)
class CreateEdgeCommand(object):
def __init__(self, command_text):
self.command_text = command_text
self.retries = None
def __str__(self):
return to_str(self.__unicode__())
def __unicode__(self):
if self.retries:
return u'{} RETRY {}'.format(self.command_text, self.retries)
else:
return u'{}'.format(self.command_text)
def retry(self, retries):
self.retries = retries
return self
| 25.266667 | 73 | 0.637203 | 727 | 0.959103 | 0 | 0 | 0 | 0 | 0 | 0 | 24 | 0.031662 |
388c951866762cef5e383f255fb30261476df70e | 2,431 | py | Python | source/GetDatasetInformation.py | san-harsh/PyImageRoi | fc95d48f33e3dcde308a027f1f0dc5ee6d9a3919 | [
"MIT"
] | 10 | 2018-01-29T18:56:17.000Z | 2021-06-04T09:34:17.000Z | source/GetDatasetInformation.py | san-harsh/PyImageRoi | fc95d48f33e3dcde308a027f1f0dc5ee6d9a3919 | [
"MIT"
] | 1 | 2018-01-29T19:09:11.000Z | 2018-01-30T02:20:25.000Z | source/GetDatasetInformation.py | san-harsh/PyImageRoi | fc95d48f33e3dcde308a027f1f0dc5ee6d9a3919 | [
"MIT"
] | 5 | 2017-07-14T16:22:40.000Z | 2021-06-10T07:14:54.000Z | import argparse
import os
import glob
import pandas as pd
from libraryTools import imageRegionOfInterest
#filename,width,height,class,xmin,ymin,xmax,ymax
#20170730_132530-(F00000).jpeg,576,1024,sinaleira,221,396,246,437
valid_images = [".jpg",".gif",".png",".tga",".jpeg"]
def run(image_path, classNameList = ["someclass"], searchSubdir = False):
global classes_qtd
global images_total_qtd
global images_without_classes_qtd
global xml_list
classes_qtd = []
images_total_qtd = 0
images_without_classes_qtd = 0
xml_list = []
searchFolder(image_path, classNameList, searchSubdir)
print()
print('Total Images: ', images_total_qtd)
print('Images without classes: ', images_without_classes_qtd)
print('Classes: ')
for q in classes_qtd:
print( q)
def searchFolder(image_path, classNameList, searchSubdir):
global valid_images
global classes_qtd
global images_total_qtd
global images_without_classes_qtd
global xml_list
print("Folder", image_path)
obj = imageRegionOfInterest(image_path)
for filename in os.listdir(image_path):
if searchSubdir and os.path.isdir(os.path.join(image_path, filename)):
searchFolder(os.path.join(image_path, filename), classNameList, searchSubdir)
name, ext = os.path.splitext(filename)
if ext.lower() not in valid_images:
continue
print(filename)
images_total_qtd = images_total_qtd + 1
obj.setFileImage(filename)
points = obj.loadBoxFromTxt()
if len(points)>0:
for point in points:
iclass = int(point[4])
while len(classes_qtd) < iclass+1:
classes_qtd.append(0)
classes_qtd[iclass] = classes_qtd[iclass] + 1
else:
images_without_classes_qtd = images_without_classes_qtd + 1
return
#=============================================================================
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-p", "--path", required=True, help="images path")
ap.add_argument('-className', nargs='*', help='class name list (0..9 positions, max 10), e.g. -classes dog cat')
ap.add_argument('-s', '--subdir', action='store_true', help="Search sub folders")
args = vars(ap.parse_args())
run(args["path"], args["className"], args["subdir"]) | 29.646341 | 112 | 0.65364 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 525 | 0.215961 |
388e1ba13f01555ab5ef97f17338085142e9b93d | 22,968 | py | Python | AShareData/data_source/WindData.py | TMG-TheMoneyGame/AShareData | 2c4fded364c987e4f1ec77fafbb55f75ace1264d | [
"MIT"
] | null | null | null | AShareData/data_source/WindData.py | TMG-TheMoneyGame/AShareData | 2c4fded364c987e4f1ec77fafbb55f75ace1264d | [
"MIT"
] | null | null | null | AShareData/data_source/WindData.py | TMG-TheMoneyGame/AShareData | 2c4fded364c987e4f1ec77fafbb55f75ace1264d | [
"MIT"
] | null | null | null | import datetime as dt
from functools import cached_property
from typing import Dict, List, Sequence, Union
import numpy as np
import pandas as pd
import WindPy
from tqdm import tqdm
from .DataSource import DataSource
from .. import config, constants, DateUtils, utils
from ..DBInterface import DBInterface
from ..Tickers import ConvertibleBondTickers, ETFOptionTickers, ETFTickers, FutureTickers, IndexOptionTickers, \
StockTickers
class WindWrapper(object):
"""Wind Wrapper to make wind API easier to use"""
def __init__(self):
self._w = None
def __enter__(self):
self.connect()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.disconnect()
def connect(self):
with utils.NullPrinter():
self._w = WindPy.w
self._w.start()
def disconnect(self):
if self._w:
self._w.close()
def is_connected(self):
return self._w.isconnected()
@staticmethod
def _api_error(api_data):
if isinstance(api_data, tuple):
error_code = api_data[0]
has_data = True
else:
error_code = api_data.ErrorCode
data = api_data.Data
has_data = any(data)
if (error_code != 0) or (not has_data):
raise ValueError(f"Failed to get data, ErrorCode: {error_code}, Error Message: {api_data[1].iloc[0, 0]}")
@staticmethod
def _standardize_date(date: DateUtils.DateType = None):
if not date:
date = dt.date.today()
if isinstance(date, (dt.date, dt.datetime)):
date = date.strftime('%Y-%m-%d')
return date
@staticmethod
def _to_df(out: WindPy.w.WindData) -> Union[pd.Series, pd.DataFrame]:
times = DateUtils.date_type2datetime(out.Times)
df = pd.DataFrame(out.Data).T
if len(out.Times) > 1:
df.index = times
if len(out.Fields) >= len(out.Codes):
df.columns = out.Fields
df['ID'] = out.Codes[0]
df.set_index('ID', append=True, inplace=True)
else:
df.columns = out.Codes
df = df.stack()
df.name = out.Fields[0]
else:
df.index = out.Codes
df.columns = out.Fields
df['DateTime'] = times[0]
df = df.set_index(['DateTime'], append=True).swaplevel()
df.index.names = ['DateTime', 'ID']
if isinstance(df, pd.DataFrame) and (df.shape[1] == 1):
df = df.iloc[:, 0]
return df
# wrapped functions
def wsd(self, codes: Union[str, List[str]], fields: Union[str, List[str]],
begin_time: Union[str, dt.datetime] = None,
end_time: Union[str, dt.datetime] = None,
options: str = None, **kwargs) -> Union[pd.Series, pd.DataFrame]:
data = self._w.wsd(codes, fields, begin_time, end_time, options, **kwargs)
self._api_error(data)
return self._to_df(data)
@DateUtils.dtlize_input_dates
def wss(self, codes: Union[str, List[str]], fields: Union[str, List[str]], options: str = '',
date: DateUtils.DateType = None, **kwargs) -> pd.DataFrame:
if date:
options = f'tradeDate={date.strftime("%Y%m%d")};' + options
data = self._w.wss(codes, fields, options, usedf=True, **kwargs)
self._api_error(data)
ret_data = data[1]
if date:
ret_data.index.names = ['ID']
ret_data['DateTime'] = date
ret_data = ret_data.reset_index().set_index(['DateTime', 'ID'])
return ret_data
def wsi(self, codes: Union[str, List[str]], fields: Union[str, List[str]],
begin_time: Union[str, dt.datetime] = None,
end_time: Union[str, dt.datetime] = None,
options: str = None) -> pd.DataFrame:
data = self._w.wsi(codes, fields, begin_time, end_time, options, usedf=True)
self._api_error(data)
return data[1]
def wst(self, codes: Union[str, List[str]], fields: Union[str, List[str]],
begin_time: Union[str, dt.datetime] = None,
end_time: Union[str, dt.datetime] = None,
options: str = None, **kwargs) -> pd.DataFrame:
data = self._w.wsi(codes, fields, begin_time, end_time, options, usedf=True, **kwargs)
self._api_error(data)
return data[1]
def wset(self, table_name: str, options: str = '', **kwargs) -> pd.DataFrame:
data = self._w.wset(table_name, options, usedf=True, **kwargs)
self._api_error(data)
df = data[1]
df.rename({'date': 'DateTime', 'wind_code': 'ID'}, axis=1, inplace=True)
index_val = sorted(list({'DateTime', 'ID'} & set(df.columns)))
if index_val:
df.set_index(index_val, drop=True, inplace=True)
return df
def wsq(self, codes: Union[str, List[str]], fields: Union[str, List[str]]) -> pd.DataFrame:
data = self._w.wsq(codes, fields, usedf=True)
self._api_error(data)
return data[1]
# outright functions
def get_index_constitute(self, date: DateUtils.DateType = dt.date.today(),
index: str = '000300.SH') -> pd.DataFrame:
date = DateUtils.date_type2datetime(date)
data = self.wset('indexconstituent', date=date, windcode=index)
return data
class WindData(DataSource):
"""Wind 数据源"""
def __init__(self, db_interface: DBInterface = None, param_json_loc: str = None):
super().__init__(db_interface)
self._factor_param = utils.load_param('wind_param.json', param_json_loc)
self.w = WindWrapper()
def __enter__(self):
self.w.connect()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.w.disconnect()
def connect(self):
self.w.connect()
@cached_property
def stock_list(self) -> StockTickers:
return StockTickers(self.db_interface)
@cached_property
def future_list(self) -> FutureTickers:
return FutureTickers(self.db_interface)
@cached_property
def option_list(self) -> IndexOptionTickers:
return IndexOptionTickers(self.db_interface)
@cached_property
def stock_index_option_list(self) -> IndexOptionTickers:
return IndexOptionTickers(self.db_interface)
@cached_property
def etf_option_list(self) -> ETFOptionTickers:
return ETFOptionTickers(self.db_interface)
@cached_property
def etf_list(self):
return ETFTickers(self.db_interface)
@cached_property
def convertible_bond_list(self):
return ConvertibleBondTickers(self.db_interface)
#######################################
# stock funcs
#######################################
def get_stock_daily_data(self, date: DateUtils.DateType) -> None:
"""更新每日行情, 写入数据库, 不返回
行情信息包括: 开高低收量额
:param date: 交易日期
:return: None
"""
table_name = '股票日行情'
renaming_dict = self._factor_param[table_name]
price_df = self.w.wss(self.stock_list.ticker(date), list(renaming_dict.keys()), date=date,
options='priceAdj=U;cycle=D;unit=1')
price_df.rename(renaming_dict, axis=1, inplace=True)
self.db_interface.update_df(price_df, table_name)
def get_stock_rt_price(self):
tickers = self.stock_list.ticker(dt.date.today())
storage = []
for ticker in utils.chunk_list(tickers, 3000):
storage.append(self.w.wsq(ticker, 'rt_latest'))
data = pd.concat(storage)
data.index.names = ['ID']
data.columns = ['最新价']
data['DateTime'] = dt.datetime.now()
data.set_index('DateTime', append=True, inplace=True)
self.db_interface.purge_table('股票最新价')
self.db_interface.insert_df(data, '股票最新价')
def update_stock_daily_data(self):
table_name = '股票日行情'
start_date = self._check_db_timestamp(table_name, dt.date(1990, 12, 10))
dates = self.calendar.select_dates(start_date, dt.date.today(), inclusive=(False, True))
with tqdm(dates) as pbar:
for date in dates:
pbar.set_description(f'下载{date}的{table_name}')
self.get_stock_daily_data(date)
pbar.update()
def get_stock_minute_data(self, date: dt.datetime):
table_name = '股票分钟行情'
replace_dict = self._factor_param[table_name]
start_time = dt.datetime.combine(date.date(), dt.time(hour=8))
end_time = dt.datetime.combine(date.date(), dt.time(hour=16))
storage = []
for section in utils.chunk_list(self.stock_list.ticker(date), 100):
partial_data = self.w.wsi(section, "open,high,low,close,volume,amt", start_time, end_time, "")
storage.append(partial_data.dropna())
data = pd.concat(storage)
data.set_index('windcode', append=True, inplace=True)
data.index.names = ['DateTime', 'ID']
data.rename(replace_dict, axis=1, inplace=True)
self.db_interface.insert_df(data, table_name)
def update_minutes_data(self) -> None:
"""股票分钟行情更新脚本"""
table_name = '股票分钟行情'
latest = self._check_db_timestamp(table_name, dt.datetime.today() - dt.timedelta(days=365 * 3))
date_range = self.calendar.select_dates(latest.date(), dt.date.today(), inclusive=(False, True))
with tqdm(date_range) as pbar:
for date in date_range:
pbar.set_description(f'更新{date}的{table_name}')
self.get_stock_minute_data(date)
pbar.update()
def update_stock_adj_factor(self):
def data_func(ticker: str, date: DateUtils.DateType) -> pd.Series:
data = self.w.wsd(ticker, 'adjfactor', date, date)
data.name = '复权因子'
return data
self.sparse_data_template('复权因子', data_func)
def update_stock_units(self):
# 流通股本
def float_a_func(ticker: str, date: DateUtils.DateType) -> pd.Series:
data = self.w.wsd(ticker, "float_a_shares", date, date, "unit=1")
data.name = 'A股流通股本'
return data
self.sparse_data_template('A股流通股本', float_a_func)
# 自由流通股本
def free_float_a_func(ticker: str, date: DateUtils.DateType) -> pd.Series:
data = self.w.wsd(ticker, "free_float_shares", date, date, "unit=1")
data.name = '自由流通股本'
return data
self.sparse_data_template('自由流通股本', free_float_a_func)
# 总股本
def total_share_func(ticker: str, date: DateUtils.DateType) -> pd.Series:
data = self.w.wsd(ticker, "total_shares", date, date, "unit=1")
data.name = '总股本'
return data
self.sparse_data_template('总股本', total_share_func)
# A股总股本
def total_a_share_func(ticker: str, date: DateUtils.DateType) -> pd.Series:
data = self.w.wsd(ticker, "share_totala", date, date, "unit=1")
data.name = 'A股总股本'
return data
self.sparse_data_template('A股总股本', total_a_share_func)
def _update_industry(self, provider: str) -> None:
"""更新行业信息
:param provider: 行业分类提供商
"""
def _get_industry_data(ticker: Union[str, List[str]], date: dt.datetime) -> pd.Series:
wind_data = self.w.wsd(ticker, f'industry_{constants.INDUSTRY_DATA_PROVIDER_CODE_DICT[provider]}',
date, date, industryType=constants.INDUSTRY_LEVEL[provider])
wind_data.name = f'{provider}行业'
wind_data = wind_data.str.replace('III|Ⅲ|IV|Ⅳ$', '', regex=True)
return wind_data
table_name = f'{provider}行业'
query_date = self.calendar.yesterday()
latest = self.db_interface.get_latest_timestamp(table_name)
if latest is None:
latest = DateUtils.date_type2datetime(constants.INDUSTRY_START_DATE[provider])
initial_data = self.w.wss(self.stock_list.ticker(latest),
f'industry_{constants.INDUSTRY_DATA_PROVIDER_CODE_DICT[provider]}',
date=latest).dropna()
self.db_interface.insert_df(initial_data, table_name)
else:
initial_data = self.db_interface.read_table(table_name).groupby('ID').tail(1)
new_data = _get_industry_data(ticker=self.stock_list.ticker(), date=query_date).dropna()
default_start_date = self.stock_list.list_date()
for ticker, date in default_start_date.items():
if date < constants.INDUSTRY_START_DATE[provider]:
default_start_date[ticker] = constants.INDUSTRY_START_DATE[provider]
self.sparse_data_queryer(_get_industry_data, initial_data, new_data, f'更新{table_name}',
default_start_date=default_start_date)
def update_industry(self) -> None:
for provider in constants.INDUSTRY_DATA_PROVIDER:
self._update_industry(provider)
def update_pause_stock_info(self):
table_name = '股票停牌'
start_date = self._check_db_timestamp(table_name, dt.date(1990, 12, 10)) + dt.timedelta(days=1)
end_date = self.calendar.yesterday()
chunks = self.calendar.split_to_chunks(start_date, end_date, 20)
renaming_dict = self._factor_param[table_name]
with tqdm(chunks) as pbar:
pbar.set_description('下载股票停牌数据')
for range_start, range_end in chunks:
start_date_str = range_start.strftime("%Y%m%d")
end_date_str = range_end.strftime("%Y%m%d")
pbar.set_postfix_str(f'{start_date_str} - {end_date_str}')
data = self.w.wset("tradesuspend",
f'startdate={start_date_str};enddate={end_date_str};field=date,wind_code,suspend_type,suspend_reason')
data.rename(renaming_dict, axis=1, inplace=True)
ind1 = (data['停牌类型'] == '盘中停牌') & (data['停牌原因'].str.startswith('股票价格'))
ind2 = (data['停牌原因'].str.startswith('盘中'))
data = data.loc[(~ind1) & (~ind2), :]
self.db_interface.insert_df(data, table_name)
pbar.update()
#######################################
# convertible bond funcs
#######################################
def update_convertible_bond_daily_data(self):
table_name = '可转债日行情'
renaming_dict = self._factor_param['股票日行情']
start_date = self._check_db_timestamp(table_name, dt.datetime(1993, 2, 9))
dates = self.calendar.select_dates(start_date, dt.date.today(), inclusive=(False, True))
with tqdm(dates) as pbar:
for date in dates:
pbar.set_description(f'下载{date}的{table_name}')
tickers = self.convertible_bond_list.ticker(date)
if tickers:
data = self.w.wss(tickers, "open, high, low, close, volume, amt",
date=date, options='priceAdj=U;cycle=D')
data.rename(renaming_dict, axis=1, inplace=True)
self.db_interface.insert_df(data, table_name)
pbar.update()
#######################################
# future funcs
#######################################
def update_future_daily_data(self):
contract_daily_table_name = '期货日行情'
start_date = self.db_interface.get_latest_timestamp(contract_daily_table_name)
dates = self.calendar.select_dates(start_date, dt.date.today(), inclusive=(False, True))
with tqdm(dates) as pbar:
for date in dates:
pbar.set_description(f'下载{date}的{contract_daily_table_name}')
data = self.w.wss(self.future_list.ticker(date), "open, high, low, close, settle, volume, amt, oi",
date=date, options='priceAdj=U;cycle=D')
data.rename(self._factor_param[contract_daily_table_name], axis=1, inplace=True)
self.db_interface.insert_df(data, contract_daily_table_name)
pbar.update()
#######################################
# option funcs
#######################################
def get_stock_option_daily_data(self, date: dt.datetime) -> None:
contract_daily_table_name = '期权日行情'
tickers = self.etf_option_list.ticker(date) + self.option_list.ticker(date)
data = self.w.wss(tickers,
"high,open,low,close,volume,amt,oi,delta,gamma,vega,theta,rho",
date=date, priceAdj='U', cycle='D')
data.rename(self._factor_param[contract_daily_table_name], axis=1, inplace=True)
self.db_interface.insert_df(data, contract_daily_table_name)
def update_stock_option_daily_data(self) -> None:
contract_daily_table_name = '期权日行情'
start_date = self._check_db_timestamp(contract_daily_table_name, dt.datetime(2015, 2, 8))
dates = self.calendar.select_dates(start_date, dt.date.today(), inclusive=(False, True))
with tqdm(dates) as pbar:
for date in dates:
pbar.set_description(f'下载{date}的{contract_daily_table_name}')
self.get_stock_option_daily_data(date)
pbar.update()
#######################################
# index funcs
#######################################
def update_target_stock_index_daily(self) -> None:
table_name = '指数日行情'
start_date = self.db_interface.get_latest_timestamp(table_name)
dates = self.calendar.select_dates(start_date, dt.date.today(), inclusive=(False, True))
indexes = list(constants.STOCK_INDEXES.values())
with tqdm(dates) as pbar:
for date in dates:
pbar.set_description(f'下载{date}的{table_name}')
indicators = "open,low,high,close,volume,amt,mkt_cap_ard,total_shares,float_a_shares,free_float_shares,pe_ttm"
data = self.w.wss(indexes, indicators, date=date, priceAdj='U', cycle='D')
data = data.rename(self._factor_param[table_name], axis=1).rename({'ID', 'IndexCode'}, axis=1)
self.db_interface.insert_df(data, table_name)
pbar.update()
#######################################
# helper funcs
#######################################
def sparse_data_queryer(self, data_func, start_series: pd.Series = None, end_series: pd.Series = None,
desc: str = '', default_start_date: Union[Dict, DateUtils.DateType] = None):
start_ticker = [] if start_series.empty else start_series.index.get_level_values('ID')
all_ticker = sorted(list(set(start_ticker) | set(end_series.index.get_level_values('ID'))))
tmp = start_series.reset_index().set_index('ID').reindex(all_ticker)
start_series = tmp.reset_index().set_index(['DateTime', 'ID']).iloc[:, 0]
end_index = pd.MultiIndex.from_product([[end_series.index.get_level_values('DateTime')[0]], all_ticker],
names=['DateTime', 'ID'])
end_series = end_series.reindex(end_index)
ind = np.logical_not(start_series.isnull().values & end_series.isnull().values)
start_series = start_series.loc[ind, :]
end_series = end_series.loc[ind, :]
if start_series.dtype == 'float64':
ind = np.abs(start_series.values - end_series.values) > 0.0001
ind = ind | start_series.isnull().values | end_series.isnull().values
ind = ind & (start_series.values != 0)
else:
ind = (start_series.values != end_series.values)
start_series = start_series.loc[ind]
end_series = end_series.loc[ind, :]
with tqdm(start_series) as pbar:
for i in range(start_series.shape[0]):
new_val = end_series.iloc[i:i + 1]
old_val = start_series.iloc[i:i + 1]
if np.isnan(old_val.index.get_level_values('DateTime').values[0]):
ticker = old_val.index.get_level_values('ID').values[0]
if isinstance(default_start_date, dict):
index_date = default_start_date[ticker]
else:
index_date = DateUtils.date_type2datetime(default_start_date)
old_val = data_func(ticker=ticker, date=index_date.date())
self.db_interface.update_df(old_val.to_frame(), old_val.name)
pbar.set_description(f'{desc}: {new_val.index.get_level_values("ID").values[0]}')
self._binary_data_queryer(data_func, old_val, new_val)
pbar.update(1)
def _binary_data_queryer(self, data_func, start_data: pd.Series, end_data: pd.Series) -> None:
if start_data.dtype == 'float64':
if all(start_data.notnull()) and all(end_data.notnull()) and abs(
start_data.values[0] - end_data.values[0]) < 0.001:
is_diff = False
else:
is_diff = True
else:
is_diff = start_data.values[0] != end_data.values[0]
if is_diff:
start_date = start_data.index.get_level_values('DateTime')[0]
end_date = end_data.index.get_level_values('DateTime')[0]
if self.calendar.days_count(start_date, end_date) < 2:
self.db_interface.update_df(end_data.to_frame(), end_data.name)
else:
ticker = end_data.index.get_level_values('ID')[0]
mid_date = self.calendar.middle(start_date, end_date)
mid_data = data_func(ticker=ticker, date=mid_date)
self._binary_data_queryer(data_func, start_data, mid_data)
self._binary_data_queryer(data_func, mid_data, end_data)
def sparse_data_template(self, table_name: str, data_func, ticker: Sequence[str] = None,
default_start_date: Union[Dict, DateUtils.DateType] = None):
if default_start_date is None:
default_start_date = self.stock_list.list_date()
if ticker is None:
ticker = self.stock_list.all_ticker()
current_data = self.db_interface.read_table(table_name).groupby('ID').tail(1)
current_data = current_data.loc[current_data.index.get_level_values('ID').isin(ticker), :]
end_date = self.calendar.yesterday()
new_data = data_func(ticker=ticker, date=end_date)
new_data.name = table_name
self.sparse_data_queryer(data_func, current_data, new_data, f'更新{table_name}',
default_start_date=default_start_date)
@classmethod
def from_config(cls, config_loc: str):
db_interface = config.generate_db_interface_from_config(config_loc)
return cls(db_interface)
| 42.930841 | 137 | 0.602229 | 23,014 | 0.981073 | 0 | 0 | 3,151 | 0.134325 | 0 | 0 | 3,135 | 0.133643 |
388f74e37b3771101d7c6d1684ce571a957ae9fc | 12,874 | py | Python | benchmark/obd/evaluate_off_policy_estimators.py | isabella232/pyIEOE | bc2ab396a38984dec57a50dd2dae4dd726d5eb3b | [
"MIT"
] | 8 | 2021-08-31T09:06:01.000Z | 2022-01-20T01:13:03.000Z | benchmark/obd/evaluate_off_policy_estimators.py | isabella232/pyIEOE | bc2ab396a38984dec57a50dd2dae4dd726d5eb3b | [
"MIT"
] | null | null | null | benchmark/obd/evaluate_off_policy_estimators.py | isabella232/pyIEOE | bc2ab396a38984dec57a50dd2dae4dd726d5eb3b | [
"MIT"
] | 1 | 2022-03-25T16:57:50.000Z | 2022-03-25T16:57:50.000Z | # Copyright (c) 2021 Sony Group Corporation and Hanjuku-kaso Co., Ltd. All Rights Reserved.
#
# This software is released under the MIT License.
# http://opensource.org/licenses/mit-license.php
import argparse
from distutils.util import strtobool
from pathlib import Path
import pickle
import warnings
from sklearn.exceptions import ConvergenceWarning
warnings.filterwarnings(action="ignore", category=ConvergenceWarning)
import numpy as np
from pandas import DataFrame
from sklearn.experimental import enable_hist_gradient_boosting
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier as RandomForest
from sklearn.ensemble import HistGradientBoostingClassifier as LightGBM
from sklearn.model_selection import RandomizedSearchCV
from obp.dataset import OpenBanditDataset
from obp.policy import Random, BernoulliTS
from obp.ope import (
InverseProbabilityWeightingTuning,
SelfNormalizedInverseProbabilityWeighting,
DirectMethod,
DoublyRobustTuning,
SelfNormalizedDoublyRobust,
SwitchDoublyRobustTuning,
DoublyRobustWithShrinkageTuning,
)
from pyieoe.evaluator import InterpretableOPEEvaluator
# hyperparameter space for the OPE estimators themselves
from conf import ope_estimator_hyperparams
# hyperparameter space for the regression model used in model dependent OPE estimators
from conf import ope_regression_uniform_hyperparams
from conf import ope_regression_rscv_hyperparams
# compared ope estimators
ope_estimators = [
InverseProbabilityWeightingTuning(
lambdas=ope_estimator_hyperparams.tau_lambda, estimator_name="IPWps"
),
SelfNormalizedInverseProbabilityWeighting(estimator_name="SNIPW"),
DirectMethod(estimator_name="DM"),
DoublyRobustTuning(
lambdas=ope_estimator_hyperparams.tau_lambda, estimator_name="DRps"
),
SelfNormalizedDoublyRobust(estimator_name="SNDR"),
SwitchDoublyRobustTuning(
taus=ope_estimator_hyperparams.tau_lambda, estimator_name="Switch-DR"
),
DoublyRobustWithShrinkageTuning(
lambdas=ope_estimator_hyperparams.tau_lambda, estimator_name="DRos"
),
]
ope_estimator_hyperparams_ = {
DirectMethod.estimator_name: ope_estimator_hyperparams.dm_param,
DoublyRobustTuning.estimator_name: ope_estimator_hyperparams.dr_param,
SelfNormalizedDoublyRobust.estimator_name: ope_estimator_hyperparams.sndr_param,
SwitchDoublyRobustTuning.estimator_name: ope_estimator_hyperparams.switch_dr_param,
DoublyRobustWithShrinkageTuning.estimator_name: ope_estimator_hyperparams.dros_param,
}
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="evaluate off-policy estimators with multi-class classification data."
)
parser.add_argument(
"--n_seeds",
type=int,
default=1000,
help="number of seeds used in the experiment.",
)
parser.add_argument(
"--use_random_search",
type=strtobool,
default=False,
help="whether to use random search for hyperparamter selection or not, otherwise uniform sampling is used",
)
parser.add_argument(
"--use_estimated_pscore",
type=strtobool,
default=False,
help="whether to use estimated pscore or not, otherwise ground-truth pscore is used",
)
parser.add_argument(
"--au_cdf_threshold",
type=float,
default=0.001,
help="threshold (the maximum error allowed, z_max) for AU-CDF",
)
parser.add_argument(
"--cvar_alpha",
type=int,
default=70,
help="the percentile used for calculating CVaR, should be in (0, 100)",
)
parser.add_argument(
"--campaign",
type=str,
default="men",
choices=["all", "men", "women"],
help="campaign name, men, women, or all.",
)
parser.add_argument(
"--is_full_obd",
type=strtobool,
default=False,
help="wheather to use the full size obd or not",
)
parser.add_argument(
"--sample_size",
type=int,
default=100000,
help="(maximum) sample size for dataset to be used in the experiment (should be more than 10000)",
)
parser.add_argument("--random_state", type=int, default=12345)
args = parser.parse_args()
print(args)
# configurations
n_seeds = args.n_seeds
use_random_search = args.use_random_search
use_estimated_pscore = args.use_estimated_pscore
au_cdf_threshold = args.au_cdf_threshold
cvar_alpha = args.cvar_alpha
campaign = args.campaign
obd_path = Path("./open_bandit_dataset/") if args.is_full_obd else None
sample_size = args.sample_size
random_state = args.random_state
np.random.seed(random_state)
# assertion
assert 0 < au_cdf_threshold
assert 0 < cvar_alpha < 100
assert 10000 <= sample_size
print("initializing experimental condition..")
# load dataset
dataset_ur = OpenBanditDataset(
behavior_policy="random", campaign=campaign, data_path=obd_path
)
dataset_ts = OpenBanditDataset(
behavior_policy="bts", campaign=campaign, data_path=obd_path
)
# obtain logged bandit feedback generated by the behavior policy
bandit_feedback_ur = dataset_ur.obtain_batch_bandit_feedback()
bandit_feedback_ts = dataset_ts.obtain_batch_bandit_feedback()
bandit_feedbacks = [bandit_feedback_ur, bandit_feedback_ts]
# define sample size to use
sample_size = min(
[sample_size, bandit_feedback_ur["n_rounds"], bandit_feedback_ts["n_rounds"]]
)
# obtain the ground-truth policy value
ground_truth_ur = OpenBanditDataset.calc_on_policy_policy_value_estimate(
behavior_policy="random", campaign=campaign, data_path=obd_path
)
ground_truth_ts = OpenBanditDataset.calc_on_policy_policy_value_estimate(
behavior_policy="bts", campaign=campaign, data_path=obd_path
)
# define policies
policy_ur = Random(
n_actions=dataset_ur.n_actions,
len_list=dataset_ur.len_list,
random_state=random_state,
)
policy_ts = BernoulliTS(
n_actions=dataset_ts.n_actions,
len_list=dataset_ts.len_list,
random_state=random_state,
is_zozotown_prior=True,
campaign=campaign,
)
# obtain action choice probabilities
action_dist_ur = policy_ur.compute_batch_action_dist(n_rounds=1000000)
action_dist_ts = policy_ts.compute_batch_action_dist(n_rounds=1000000)
# define evaluation policies
evaluation_policies = [
(ground_truth_ts, action_dist_ts),
(ground_truth_ur, action_dist_ur),
]
# regression models used in ope estimators
if use_random_search:
logistic_regression = RandomizedSearchCV(
LogisticRegression(),
ope_regression_rscv_hyperparams.logistic_regression_param,
random_state=random_state,
n_iter=5,
)
random_forest = RandomizedSearchCV(
RandomForest(),
ope_regression_rscv_hyperparams.random_forest_param,
random_state=random_state,
n_iter=5,
)
lightgbm = RandomizedSearchCV(
LightGBM(),
ope_regression_rscv_hyperparams.lightgbm_param,
random_state=random_state,
n_iter=5,
)
regression_models = [
logistic_regression,
random_forest,
lightgbm,
]
else: # uniform sampling
regression_models = [
LogisticRegression,
RandomForest,
LightGBM,
]
regression_model_hyperparams = {
LogisticRegression: ope_regression_uniform_hyperparams.logistic_regression_param,
RandomForest: ope_regression_uniform_hyperparams.random_forest_param,
LightGBM: ope_regression_uniform_hyperparams.lightgbm_param,
}
# initializing class
if use_estimated_pscore:
if use_random_search:
evaluator = InterpretableOPEEvaluator(
random_states=np.arange(n_seeds),
bandit_feedbacks=bandit_feedbacks,
evaluation_policies=evaluation_policies,
ope_estimators=ope_estimators,
ope_estimator_hyperparams=ope_estimator_hyperparams_,
regression_models=regression_models,
pscore_estimators=regression_models,
)
else: # uniform sampling
evaluator = InterpretableOPEEvaluator(
random_states=np.arange(n_seeds),
bandit_feedbacks=bandit_feedbacks,
evaluation_policies=evaluation_policies,
ope_estimators=ope_estimators,
ope_estimator_hyperparams=ope_estimator_hyperparams_,
regression_models=regression_models,
regression_model_hyperparams=regression_model_hyperparams,
pscore_estimators=regression_models,
pscore_estimator_hyperparams=regression_model_hyperparams,
)
else: # ground-truth pscore
if use_random_search:
evaluator = InterpretableOPEEvaluator(
random_states=np.arange(n_seeds),
bandit_feedbacks=bandit_feedbacks,
evaluation_policies=evaluation_policies,
ope_estimators=ope_estimators,
ope_estimator_hyperparams=ope_estimator_hyperparams_,
regression_models=regression_models,
)
else: # uniform sampling
evaluator = InterpretableOPEEvaluator(
random_states=np.arange(n_seeds),
bandit_feedbacks=bandit_feedbacks,
evaluation_policies=evaluation_policies,
ope_estimators=ope_estimators,
ope_estimator_hyperparams=ope_estimator_hyperparams_,
regression_models=regression_models,
regression_model_hyperparams=regression_model_hyperparams,
)
# estimate policy values
print("started experiment")
policy_value = evaluator.estimate_policy_value(sample_size=sample_size)
# calculate statistics
print("calculating statistics of estimators' performance..")
au_cdf = evaluator.calculate_au_cdf_score(threshold=au_cdf_threshold)
au_cdf_scaled = evaluator.calculate_au_cdf_score(
threshold=au_cdf_threshold, scale=True
)
cvar = evaluator.calculate_cvar_score(alpha=cvar_alpha)
cvar_scaled = evaluator.calculate_cvar_score(alpha=cvar_alpha, scale=True)
std = evaluator.calculate_variance(std=True)
std_scaled = evaluator.calculate_variance(scale=True, std=True)
mean = evaluator.calculate_mean()
mean_scaled = evaluator.calculate_mean(scale=True)
# rscv/uniform, estimated/ground-truth pscore option
if use_random_search:
if use_estimated_pscore:
option = "rscv_pscore_estimate"
else:
option = "rscv_pscore_true"
else:
if use_estimated_pscore:
option = "uniform_pscore_estimate"
else:
option = "uniform_pscore_true"
# save results of the evaluation of off-policy estimators in './logs/(option)' directory.
log_path = Path("./logs/" + option)
log_path.mkdir(exist_ok=True, parents=True)
print("the results will be saved in", log_path)
# save evaluator in order to change au_cdf_threshold and cvar_alpha afterwhile
f = open(log_path / "evaluator.pickle", "wb")
pickle.dump(evaluator, f)
f.close()
# save au_cdf
au_cdf_df = DataFrame()
au_cdf_df["estimator"] = list(au_cdf.keys())
au_cdf_df["AU-CDF"] = list(au_cdf.values())
au_cdf_df["AU-CDF(scaled)"] = list(au_cdf_scaled.values())
au_cdf_df.to_csv(
log_path / f"au_cdf_of_ope_estimators_threshold_{au_cdf_threshold}.csv"
)
# save cvar
cvar_df = DataFrame()
cvar_df["estimator"] = list(cvar.keys())
cvar_df["CVaR"] = list(cvar.values())
cvar_df["CVaR(scaled)"] = list(cvar_scaled.values())
cvar_df.to_csv(log_path / f"cvar_of_ope_estimators_alpha_{cvar_alpha}.csv")
# save variance
std_df = DataFrame()
std_df["estimator"] = list(std.keys())
std_df["std"] = list(std.values())
std_df["std(scaled)"] = list(std_scaled.values())
std_df.to_csv(log_path / "std_of_ope_estimators.csv")
# save mean
mean_df = DataFrame()
mean_df["estimator"] = list(mean.keys())
mean_df["mean"] = list(mean.values())
mean_df["mean(scaled)"] = list(mean_scaled.values())
mean_df.to_csv(log_path / "mean_of_ope_estimators.csv")
# printout result
print(au_cdf_df)
print(cvar_df)
print(std_df)
# save cdf plot
evaluator.visualize_cdf_aggregate(
fig_dir=log_path, fig_name="cdf_full.png", font_size=16
)
| 37.208092 | 115 | 0.696675 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,542 | 0.197452 |
388fccf224243f2ac97b177fee7c0abcccef5267 | 856 | py | Python | lab5/script/loadimg.py | rum2mojito/osdi2020 | d17a6b56e1fd25a8c5b0ccec5a897e0c1118365b | [
"MIT"
] | null | null | null | lab5/script/loadimg.py | rum2mojito/osdi2020 | d17a6b56e1fd25a8c5b0ccec5a897e0c1118365b | [
"MIT"
] | null | null | null | lab5/script/loadimg.py | rum2mojito/osdi2020 | d17a6b56e1fd25a8c5b0ccec5a897e0c1118365b | [
"MIT"
] | null | null | null | # coding=utf-8
import serial
import time
import os
KERNEL_PATH = './kernel9.img'
def serial_w(content):
ser.write(content)
time.sleep(1)
port1 = '/dev/pts/4'
port2 = '/dev/ttyUSB0'
if __name__ == "__main__":
ser = serial.Serial(port=port1, baudrate=115200)
kernel_size = os.path.getsize(KERNEL_PATH)
with open(KERNEL_PATH, 'rb') as kernel_f:
# cmd
serial_w('loadimg\r')
# addr
serial_w('90000\r')
# kernel size
k_size = os.path.getsize(KERNEL_PATH)
serial_w(str(k_size)+'\r')
# while k_size > 0:
# words = kernel_f.read(0x400)
# k_size -= ser.write(words)
# # k_size -= words
words = kernel_f.read()
serial_w(words)
serial_w('F\r')
print('hi')
kernel_f.close()
| 20.380952 | 52 | 0.551402 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 234 | 0.273364 |
38901046ae19be71310fe2a5d909316397231ac1 | 1,196 | py | Python | weather.py | Qu4ndo/Weather_API | 1405c69d796d0c2df47f71e829b99ecb48857921 | [
"MIT"
] | null | null | null | weather.py | Qu4ndo/Weather_API | 1405c69d796d0c2df47f71e829b99ecb48857921 | [
"MIT"
] | null | null | null | weather.py | Qu4ndo/Weather_API | 1405c69d796d0c2df47f71e829b99ecb48857921 | [
"MIT"
] | null | null | null | import configparser
import requests
import json
def get_callback():
#read the config.txt
config = configparser.ConfigParser()
config.read_file(open(r'config.txt'))
API_key = config.get('Basic-Configuration', 'API_key')
city_name = config.get('Basic-Configuration', 'city_name')
# API
base_url = "http://api.openweathermap.org/data/2.5/weather?"
Final_url = base_url + "q=" + city_name + "&appid=" + API_key
# API call
return requests.get(Final_url).json()
def convert_kelvin(ktemp):
# Convert Kelvin to Celsius
return ktemp - 273.15
def convert_kmh(wind_ms):
# Convert Windspeed to km/h
return wind_ms * 3.6
def values():
# Get Values from API
weather_data = get_callback()
# Parsing different values
ktemp = weather_data["main"]["temp"]
humidity = weather_data["main"]["humidity"]
wind_ms = weather_data["wind"]["speed"]
# Convert Values
ctemp = convert_kelvin(ktemp)
wind = convert_kmh(wind_ms)
return ctemp, humidity, wind
if __name__=="__main__":
temp, humidity, wind = values()
# Print Data
print(int(temp), "C°")
print(humidity, "%")
print(int(wind), "km/h")
| 22.566038 | 65 | 0.658027 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 366 | 0.305764 |
3891d2a8a93288efa2d7669ef1a5641d27143d72 | 19,970 | py | Python | src/documentos/tools/repair.py | TroyWilliams3687/documentos | 4a47feb5db5b03dd4ce8d2f45a13ff0a87987303 | [
"MIT"
] | null | null | null | src/documentos/tools/repair.py | TroyWilliams3687/documentos | 4a47feb5db5b03dd4ce8d2f45a13ff0a87987303 | [
"MIT"
] | null | null | null | src/documentos/tools/repair.py | TroyWilliams3687/documentos | 4a47feb5db5b03dd4ce8d2f45a13ff0a87987303 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
# -----------
# SPDX-License-Identifier: MIT
# Copyright (c) 2021 Troy Williams
# uuid : 633f2088-bbe3-11eb-b9c2-33be0bb8451e
# author: Troy Williams
# email : troy.williams@bluebill.net
# date : 2021-05-23
# -----------
"""
The `repair` command has access to tools that can repair various
problems that could occur.
- bad-links
- relative links that don't point to the correct file
- section attributes
- ATX headers that are missing links
--dry-run
"""
# ------------
# System Modules - Included with Python
import hashlib
from pathlib import Path
from datetime import datetime
from difflib import get_close_matches
# ------------
# 3rd Party - From pip
import click
from rich.console import Console
console = Console()
# ------------
# Custom Modules
from ..documentos.common import (
relative_path,
search,
)
from ..documentos.document import (
MarkdownDocument,
search as md_search,
document_lookup,
)
from ..documentos.markdown_classifiers import MarkdownAttributeSyntax
# -------------
def find_broken_urls(
parent=None,
links=None,
):
"""
Examine the relative links for the MarkdownDocument object and
return a list contain links that don't have matches on the file
system.
Can work for images or relative links pointing to markdown files.
# Parameters
parent:Path
- The path of the parent folder to resolve links
links:list(tuple)
- A list of tuples containing:
- line number (0 based)
- dict
- 'url' - The URL portion of the markdown link
- The `url` key is the required and is the URL of the
relative link
# Return
a list of tuples that contains the problem link and line number.
item:
- line number (0 based)
- dict
- 'url' - The URL portion of the markdown link
"""
problems = []
for rurl in links:
# we only want the URL, not any section anchors
left, _, _ = rurl[1]["url"].partition("#")
file = parent.joinpath(left).resolve()
if not file.exists():
problems.append(rurl)
return problems
def classify_broken_urls(
lookup=None,
broken_urls=None,
):
"""
Using the lookup dictionary and the list of broken URLS, sort the
broken URLS for further processing. Sort them into
- `no match` - There is no match on the file system for the URLs
- `file match` - There are matching file names on the system
- `suggestions` - There are no-matching file names, but some of the
file names are close
# Parameters
lookup:dict
- A dictionary keyed by the file name mapped to a list of
MarkdownDocument objects that have the same name but
different paths.
broken_urls:list
- a list of tuples that contains the problem link and line
number.
- item:
- line number (0 based)
- dict
- 'full' - The full regex match - [text](link)
- 'text' - The text portion of the markdown link
- 'url' - The URL portion of the markdown link
- "md_span": result.span("md"), # tuple(start, end) <- start and end position of the match
- "md": result.group("md"),
- "section_span": result.span("section"),
- "section": section attribute i.e ../file.md#id <- the id portion,
# Return
A dictionary keyed by:
- no_matches - no matches were found, this is a list of the broken
urls
- exact_matches - Direct matches in the file system were found, this
is a tuple of the broken url and a list of MarkdownDocument
objects
- The name of the file has an exact match in the system, or a
number of matches
- multiple exact matches fount
- exact_match - Only one exact match found
- suggestions - Closes matches found in the file system, this is a
tuple of the broken url and a list of MarkdownDocument objects
- This may not be an ideal case or even correct.
Each key will contain a list of tuples: (dict, list)
- dict - this is the same dict that was in the broken_urls list
- list - the list of Path objects that match or are similar
"""
results = {
"no_matches": [],
"suggestions": [],
"exact_match": [],
"exact_matches": [],
}
for problem in broken_urls:
line, url = problem
# we only want the URL, not any section anchors
left, _, _ = url["url"].partition("#")
key = Path(left).name
if key in lookup:
matches = [match for match in lookup[key]]
if len(matches) == 1:
results["exact_match"].append((problem, matches))
else:
results["exact_matches"].append((problem, matches))
else:
# https://docs.python.org/3/library/difflib.html#difflib.get_close_matches
# Can we suggest anything?
suggestions = get_close_matches(key, lookup.keys(), cutoff=0.8)
if suggestions:
results["suggestions"].append(
(problem, [match for pk in suggestions for match in lookup[pk]])
)
else:
# We don't have a file match or any suggestions - a dead
# end :(
results["no_matches"].append((problem, []))
return results
def display_classified_url(results, root=None):
"""
# Parameters
results:list
- A list containing a reference to a MarkdownDocument and a list
of tuples containing line, url (dict) and the list of
matches (MarkdownDocument)
root:Path
- The path to the root of the document folder
"""
for item in results:
md, problems = item
md_relative = md.filename.relative_to(root)
for defect, matches in problems:
line, url = defect
console.print(f"File: {md_relative}")
console.print(f'Line: {line} -> `{url["full"]}`')
for i, match in enumerate(matches, start=1):
console.print(f"{i}. -> {match.filename.relative_to(root)}")
console.print("")
def write_corrected_url(
md=None,
problems=None,
root=None,
dry_run=False,
):
"""
# Parameters
md:MarkdownDocument
- The document we need to correct the URLs
problems:list(dict, list)
- dict - this is the same dict that was in the broken_urls list
- list - the list of Path objects that match or are similar
root:Path
- The path to the root of the document folder
"""
console.print(f"File: {md.filename.relative_to(root)}")
for defect, matches in problems:
line, url = defect
match = (
matches[0].filename
if isinstance(matches[0], MarkdownDocument)
else matches[0]
) # assume pathlib.Path
new_url = relative_path(
md.filename.parent,
match.parent,
).joinpath(match.name)
left, _, _ = url["url"].partition("#")
new_line = md.contents[line].replace(left, str(new_url))
console.print(f"Line: {line} - Replacing `{left}` -> `{new_url}`")
md.contents[line] = new_line
if dry_run:
console.print("------DRY-RUN------")
else:
with md.filename.open("w", encoding="utf-8") as fo:
for line in md.contents:
fo.write(line)
console.print("Changes written...")
def display_and_fix_issues(results, root=None, dry_run=False):
""" """
messages = {
"no_matches": [
"NO MATCHES",
"The following files had no matches or any close matches within the system.",
],
"suggestions": [
"SUGGESTIONS",
"The following files did not have any exact matches within the system but they had some close matches.",
],
"exact_matches": [
"EXACT MATCHES",
"The following files have multiple exact matches within the system.",
],
"exact_match": [
"EXACT MATCHES",
"The following files have a single, exact match within the system.",
],
}
# Display the files that had problems we can't repair automatically
for key in (k for k in messages.keys() if k != "exact_match"):
if results[key]:
console.print("-" * 6)
for msg in messages[key]:
console.print(msg)
console.print("")
display_classified_url(results[key], root=root)
# Display and repair the files we can fix
key = "exact_match"
if results[key]:
console.print("-" * 6)
for msg in messages[key]:
console.print(msg)
console.print("")
for item in results[key]:
md, problems = item
write_corrected_url(
md,
problems,
root=root,
dry_run=dry_run,
)
console.print("")
if dry_run:
console.print(f"Exact Matches - {len(results[key])} files corrected!")
console.print("-" * 6)
def find_missing_header_attributes(
files=None,
root=None,
display_problems=False,
):
"""
# Parameters
files:list(MarkdownDocument)
- The list of MarkdownDocument objects to search for missing
header attributes
root:Path
- The path to the root of the document folder
display_problems:bool
- If true, it will display the problems as it finds them
- Default - False
# Return
A dictionary keyed with the MarkdownDocument object that has missing
attributes mapped to the list of missing attributes which are a
tuple (line number, line text)
"""
md_attribute_syntax_rule = MarkdownAttributeSyntax()
problems = {}
for md in files:
# md.headers() A dictionary keyed by header depth (1 to 6) with
# a list of tuples containing line numbers containing the ATX
# header at that depth and the text of the header(23, "
# [hello World](./en.md) ")
missing_attributes = []
for _, headers in md.headers.items():
for h in headers:
number, text = h
if not md_attribute_syntax_rule.match(text):
missing_attributes.append(h)
if display_problems:
console.print(
f"MISSING ATTRIBUTE: `{md.filename.relative_to(root)}` - Line: {number} - `{text}`"
)
if missing_attributes:
problems[md] = missing_attributes
return problems
def repair_header_issues(
issues,
root=None,
dry_run=False,
):
"""
# Parameters
issues:dict
- A dictionary keyed by the MarkdownDocument object with header
issues. It is mapped to a list of tuples (line number, header
text)
root:Path
- The path to the root of the document folder
dry_run:bool
- If true, it will not write changes
- Default - False
"""
for md, problems in issues.items():
console.print(f"File: {md.filename.relative_to(root)}")
# we'll hash the file name and path using SHA256 and use the
# first 10 hex characters. we just need something to make the
# section header anchors unique if the document is merged into
# a pdf - it honestly doesn't matter
# - https://gnugat.github.io/2018/06/15/short-identifier.html
# - https://preshing.com/20110504/hash-collision-probabilities/
# - https://en.wikipedia.org/wiki/Birthday_attack#Mathematics
# Using 10 characters, i.e. 10 hex numbers yields about 40 bits
# of the 256 bits using the Birthday paradox approximation we
# can determine how many hashes we can generate before there is
# a 50% chance of a collision: 10 hex numbers is 10*4bits =
# 40bits H = 2^40 p(n) = 50% = 0.5 = 1/2 n = sqrt(2 * 2^40 *
# 1/2) = sqrt(2^40) = 1,048,576 Essentially we would need to
# generate at least a million hashes before we expect a
# collision with about a 50% probability.
file_hash = (
hashlib.sha256(str(md.filename).encode("utf-8")).hexdigest()[:10].lower()
)
# split the hash up into something easier to understand -
# `xxx-xxx-xxxx`
file_id = f"{file_hash[:3]}-{file_hash[3:6]}-{file_hash[6:]}"
for i, item in enumerate(problems):
line, _ = item
section_attribute = f"{{#sec:{file_id}_{i}}}"
md.contents[line] = md.contents[line].rstrip() + " " + section_attribute
console.print(f"Line: {line} - Added Section Attribute: `{md.contents[line]}`")
console.print("")
if dry_run:
console.print("------DRY-RUN------")
else:
with md.filename.open("w", encoding="utf-8") as fo:
for line in md.contents:
fo.write(line)
console.print("Changes written...")
@click.group("repair")
@click.option(
"--dry-run",
is_flag=True,
help="List the changes that would be made without actually making any.",
)
@click.pass_context
def repair(*args, **kwargs):
"""
\b
Repair certain things within the Markdown documents. This will
provide tools to deal with validation issues.
# Usage
$ docs --config=./en/config.common.yaml repair --dry-run links
$ docs --config=./en/config.common.yaml repair links
$ docs --config=./en/config.common.yaml repair --dry-run images
$ docs --config=./en/config.common.yaml repair images
$ docs --config=./en/config.common.yaml repair --dry-run headers --list
$ docs --config=./en/config.common.yaml repair --dry-run headers
$ docs --config=./en/config.common.yaml repair headers
"""
# Extract the configuration file from the click context
config = args[0].obj["cfg"]
config["dry_run"] = kwargs["dry_run"] if "dry_run" in kwargs else False
# ----------------
# Find all of the markdown files and lst files
console.print("Searching for Markdown files...")
config["md_files"] = md_search(root=config["documents.path"])
console.print(f'{len(config["md_files"])} Markdown files were found...')
console.print("")
args[0].obj["cfg"] = config
@repair.command("links")
@click.pass_context
def links(*args, **kwargs):
"""
\b
Examine all of the Markdown documents in the configuration folder.
Determine if there are relative links that have a problem and
attempt to fix them.
- Only looks at Markdown Links of the form `[text](url)`
- Only examines relative links
- If it finds the correct file, and there is only one it can correct
the link. If the link could be pointing to multiple files, it
will not correct, but offer the suggestion of potential matches
# Usage
$ docs --config=./en/config.common.yaml repair --dry-run links
"""
# Extract the configuration file from the click context
config = args[0].obj["cfg"]
build_start_time = datetime.now()
# ------
# Validate Markdown Files
console.print("Processing Markdown File Links...")
console.print("")
lookup = document_lookup(config["md_files"])
results = {
"no_matches": [],
"suggestions": [],
"exact_match": [],
"exact_matches": [],
}
for md in config["md_files"]:
sorted_broken_urls = classify_broken_urls(
lookup=lookup,
broken_urls=find_broken_urls(
md.filename.parent,
md.relative_links(),
),
)
for key in results:
if sorted_broken_urls[key]:
results[key].append((md, sorted_broken_urls[key]))
display_and_fix_issues(
results, root=config["documents.path"], dry_run=config["dry_run"]
)
console.print("")
console.print("-" * 6)
build_end_time = datetime.now()
console.print(f"Started - {build_start_time}")
console.print(f"Finished - {build_end_time}")
console.print(f"Elapsed: {build_end_time - build_start_time}")
@repair.command("images")
@click.pass_context
def images(*args, **kwargs):
"""
\b
Examine the MarkdownDocument objects for broken relative image links
and attempt to repair them.
# Usage
$ docs --config=./en/config.common.yaml repair --dry-run images
$ docs --config=./en/config.common.yaml repair images
"""
# Extract the configuration file from the click context
config = args[0].obj["cfg"]
build_start_time = datetime.now()
# --------
# Find the images
images = list(
search(
root=config["documents.path"],
extensions=(".png", ".gif", ".jpg", ".jpeg"),
)
)
console.print(f"{len(images)} images were found...")
console.print("")
# 1. create a reverse look for the image names to their file paths
reverse_image_lookup = {}
for img in images:
reverse_image_lookup.setdefault(img.name, []).append(img)
results = {
"no_matches": [],
"suggestions": [],
"exact_match": [],
"exact_matches": [],
}
for md in config["md_files"]:
sorted_broken_urls = classify_broken_urls(
lookup=reverse_image_lookup,
broken_urls=find_broken_urls(
md.filename.parent,
md.image_links(),
),
)
for key in results:
if sorted_broken_urls[key]:
results[key].append((md, sorted_broken_urls[key]))
display_and_fix_issues(
results, root=config["documents.path"], dry_run=config["dry_run"]
)
# ----------
console.print("")
console.print("-" * 6)
build_end_time = datetime.now()
console.print(f"Started - {build_start_time}")
console.print(f"Finished - {build_end_time}")
console.print(f"Elapsed: {build_end_time - build_start_time}")
@repair.command("headers")
@click.option(
"--list",
is_flag=True,
help="List the problem files as they are encountered.",
)
@click.pass_context
def headers(*args, **kwargs):
"""
\b
Examine all the MarkdownDocument objects for ATX headers that do not
have a proper section attribute set. It can automatically add a
section attribute.
# Usage
$ docs --config=./en/config.common.yaml repair --dry-run headers --list
$ docs --config=./en/config.common.yaml repair headers
"""
# Extract the configuration file from the click context
config = args[0].obj["cfg"]
build_start_time = datetime.now()
# ----------
console.print("Searching for missing header attributes...")
console.print("")
problems = find_missing_header_attributes(
files=config["md_files"],
root=config["documents.path"],
display_problems=kwargs["list"],
)
if len(problems) > 0:
console.print("-" * 6)
console.print(
f'{len(problems)}/{len(config["md_files"])} files have missing attributes.'
)
# -----------
# Add missing header section attributes
repair_header_issues(
problems, root=config["documents.path"], dry_run=config["dry_run"]
)
# ----------
console.print("")
console.print("-" * 6)
build_end_time = datetime.now()
console.print(f"Started - {build_start_time}")
console.print(f"Finished - {build_end_time}")
console.print(f"Elapsed: {build_end_time - build_start_time}")
| 26.733601 | 116 | 0.596645 | 0 | 0 | 0 | 0 | 6,556 | 0.328292 | 0 | 0 | 11,243 | 0.562994 |
3892505f0839bd49e611ef5e52c666213e0222fb | 29,432 | py | Python | connect/cli/plugins/product/export.py | vgrebenschikov/connect-cli | 5d2bffca8ed76060fd2337abf05ccee2d68c6e33 | [
"Apache-2.0"
] | null | null | null | connect/cli/plugins/product/export.py | vgrebenschikov/connect-cli | 5d2bffca8ed76060fd2337abf05ccee2d68c6e33 | [
"Apache-2.0"
] | null | null | null | connect/cli/plugins/product/export.py | vgrebenschikov/connect-cli | 5d2bffca8ed76060fd2337abf05ccee2d68c6e33 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# This file is part of the Ingram Micro Cloud Blue Connect connect-cli.
# Copyright (c) 2019-2021 Ingram Micro. All Rights Reserved.
import os
import json
from datetime import datetime
from urllib import parse
import requests
from click import ClickException
from openpyxl import Workbook
from openpyxl.styles import Alignment, Font, PatternFill
from openpyxl.styles.colors import Color, WHITE
from openpyxl.utils import quote_sheetname
from openpyxl.worksheet.datavalidation import DataValidation
from tqdm import trange
from connect.cli.core.constants import DEFAULT_BAR_FORMAT
from connect.cli.core.http import (
format_http_status,
handle_http_error,
)
from connect.cli.plugins.product.constants import PARAM_TYPES
from connect.cli.plugins.product.utils import (
get_col_headers_by_ws_type,
get_col_limit_by_ws_type,
get_json_object_for_param,
)
from connect.client import ClientError, ConnectClient, R
def _setup_cover_sheet(ws, product, location, client, media_path):
ws.title = 'General Information'
ws.column_dimensions['A'].width = 50
ws.column_dimensions['B'].width = 180
ws.merge_cells('A1:B1')
cell = ws['A1']
cell.fill = PatternFill('solid', start_color=Color('1565C0'))
cell.font = Font(sz=24, color=WHITE)
cell.alignment = Alignment(horizontal='center', vertical='center')
cell.value = 'Product information'
for i in range(3, 13):
ws[f'A{i}'].font = Font(sz=12)
ws[f'B{i}'].font = Font(sz=12)
ws['A3'].value = 'Account ID'
ws['B3'].value = product['owner']['id']
ws['A4'].value = 'Account Name'
ws['B4'].value = product['owner']['name']
ws['A5'].value = 'Product ID'
ws['B5'].value = product['id']
ws['A6'].value = 'Product Name'
ws['B6'].value = product['name']
ws['A7'].value = 'Export datetime'
ws['B7'].value = datetime.now().isoformat()
ws['A8'].value = 'Product Category'
ws['B8'].value = product['category']['name']
ws['A9'].value = 'Product Icon file name'
ws['A9'].font = Font(sz=14)
ws['B9'].value = f'{product["id"]}.{product["icon"].split(".")[-1]}'
_dump_image(
f'{location}{product["icon"]}',
f'{product["id"]}.{product["icon"].split(".")[-1]}',
media_path,
)
ws['A10'].value = 'Product Short Description'
ws['A10'].alignment = Alignment(
horizontal='left',
vertical='top',
)
ws['B10'].value = product['short_description']
ws['B10'].alignment = Alignment(
wrap_text=True,
)
ws['A11'].value = 'Product Detailed Description'
ws['A11'].alignment = Alignment(
horizontal='left',
vertical='top',
)
ws['B11'].value = product['detailed_description']
ws['B11'].alignment = Alignment(
wrap_text=True,
)
ws['A12'].value = 'Embedding description'
ws['B12'].value = product['customer_ui_settings']['description']
ws['B12'].alignment = Alignment(
wrap_text=True,
)
ws['A13'].value = 'Embedding getting started'
ws['B13'].value = product['customer_ui_settings']['getting_started']
ws['B13'].alignment = Alignment(
wrap_text=True,
)
categories = client.categories.all()
unassignable_cat = ['Cloud Services', 'All Categories']
categories_list = [
cat['name'] for cat in categories if cat['name'] not in unassignable_cat
]
ws['AA1'].value = 'Categories'
cat_row_idx = 2
for cat in categories_list:
ws[f'AA{cat_row_idx}'].value = cat
cat_row_idx += 1
categories_validation = DataValidation(
type='list',
formula1=f'{quote_sheetname("General Information")}!$AA$2:$AA${len(categories_list)}',
allow_blank=False,
)
ws.add_data_validation(categories_validation)
categories_validation.add('B8')
def _dump_image(image_location, image_name, media_path):
image = requests.get(image_location)
if image.status_code == 200:
with open(os.path.join(media_path, image_name), 'wb') as f:
f.write(image.content)
else:
raise ClickException(f"Error obtaining image from {image_location}")
def _setup_ws_header(ws, ws_type=None): # noqa: CCR001
if not ws_type:
ws_type = 'items'
color = Color('d3d3d3')
fill = PatternFill('solid', color)
cels = ws['A1': '{cell}1'.format(
cell=get_col_limit_by_ws_type(ws_type),
)]
col_headers = get_col_headers_by_ws_type(ws_type)
for cel in cels[0]:
ws.column_dimensions[cel.column_letter].width = 25
ws.column_dimensions[cel.column_letter].auto_size = True
cel.fill = fill
cel.value = col_headers[cel.column_letter]
if ws_type == 'params' and cel.value == 'JSON Properties':
ws.column_dimensions[cel.column_letter].width = 100
elif ws_type == 'capabilities' and cel.value == 'Capability':
ws.column_dimensions[cel.column_letter].width = 50
elif ws_type == 'static_links' and cel.value == 'Url':
ws.column_dimensions[cel.column_letter].width = 100
elif ws_type == 'templates':
if cel.value == 'Content':
ws.column_dimensions[cel.column_letter].width = 100
if cel.value == 'Title':
ws.column_dimensions[cel.column_letter].width = 50
def _calculate_commitment(item):
period = item.get('period')
if not period:
return '-'
commitment = item.get('commitment')
if not commitment:
return '-'
count = commitment['count']
if count == 1:
return '-'
multiplier = commitment['multiplier']
if multiplier == 'billing_period':
if period == 'monthly':
years = count // 12
return '{quantity} year{plural}'.format(
quantity=years,
plural='s' if years > 1 else '',
)
else:
return '{years} years'.format(
years=count,
)
# One-time
return '-'
def _fill_param_row(ws, row_idx, param):
ws.cell(row_idx, 1, value=param['id']).alignment = Alignment(
horizontal='left',
vertical='top',
)
ws.cell(row_idx, 2, value=param['name']).alignment = Alignment(
horizontal='left',
vertical='top',
)
ws.cell(row_idx, 3, value='-').alignment = Alignment(
horizontal='left',
vertical='top',
)
ws.cell(row_idx, 4, value=param['title']).alignment = Alignment(
horizontal='left',
vertical='top',
)
ws.cell(row_idx, 5, value=param['description']).alignment = Alignment(
horizontal='left',
vertical='top',
)
ws.cell(row_idx, 6, value=param['phase']).alignment = Alignment(
horizontal='left',
vertical='top',
)
ws.cell(row_idx, 7, value=param['scope']).alignment = Alignment(
horizontal='left',
vertical='top',
)
ws.cell(row_idx, 8, value=param['type']).alignment = Alignment(
horizontal='left',
vertical='top',
)
ws.cell(
row_idx, 9,
value=param['constraints']['required'] if param['constraints']['required'] else '-',
).alignment = Alignment(
horizontal='left',
vertical='top',
)
ws.cell(
row_idx, 10,
value=param['constraints']['unique'] if param['constraints']['unique'] else '-',
).alignment = Alignment(
horizontal='left',
vertical='top',
)
ws.cell(
row_idx, 11,
value=param['constraints']['hidden'] if param['constraints']['hidden'] else '-',
).alignment = Alignment(
horizontal='left',
vertical='top',
)
ws.cell(
row_idx, 12,
value=get_json_object_for_param(param),
).alignment = Alignment(
wrap_text=True,
)
ws.cell(
row_idx, 13, value=param['events']['created']['at'],
).alignment = Alignment(
horizontal='left',
vertical='top',
)
ws.cell(
row_idx, 14, value=param['events'].get('updated', {}).get('at'),
).alignment = Alignment(
horizontal='left',
vertical='top',
)
def _fill_media_row(ws, row_idx, media, location, product, media_path):
ws.cell(row_idx, 1, value=media['position'])
ws.cell(row_idx, 2, value=media['id'])
ws.cell(row_idx, 3, value='-')
ws.cell(row_idx, 4, value=media['type'])
ws.cell(row_idx, 5, value=f'{media["id"]}.{media["thumbnail"].split(".")[-1]}')
_dump_image(
f'{location}{media["thumbnail"]}',
f'{media["id"]}.{media["thumbnail"].split(".")[-1]}',
media_path,
)
ws.cell(row_idx, 6, value='-' if media['type'] == 'image' else media['url'])
def _fill_template_row(ws, row_idx, template):
ws.cell(row_idx, 1, value=template['id']).alignment = Alignment(
horizontal='left',
vertical='top',
)
ws.cell(row_idx, 2, value=template['title']).alignment = Alignment(
horizontal='left',
vertical='top',
)
ws.cell(row_idx, 3, value='-').alignment = Alignment(
horizontal='left',
vertical='top',
)
ws.cell(row_idx, 4, value=template['scope']).alignment = Alignment(
horizontal='left',
vertical='top',
)
ws.cell(
row_idx, 5, value=template['type'] if 'type' in template else 'fulfillment',
).alignment = Alignment(
horizontal='left',
vertical='top',
)
ws.cell(row_idx, 6, value=template['body']).alignment = Alignment(
wrap_text=True,
)
ws.cell(row_idx, 7, value=template['events']['created']['at']).alignment = Alignment(
horizontal='left',
vertical='top',
)
ws.cell(
row_idx, 8, value=template['events'].get('updated', {}).get('at'),
).alignment = Alignment(
horizontal='left',
vertical='top',
)
def _fill_action_row(ws, row_idx, action):
ws.cell(row_idx, 1, value=action['id'])
ws.cell(row_idx, 2, value=action['action'])
ws.cell(row_idx, 3, value='-')
ws.cell(row_idx, 4, value=action['name'])
ws.cell(row_idx, 5, value=action['title'])
ws.cell(row_idx, 6, value=action['description'])
ws.cell(row_idx, 7, value=action['scope'])
ws.cell(row_idx, 8, value=action['events']['created']['at'])
ws.cell(row_idx, 9, value=action['events'].get('updated', {}).get('at'))
def _fill_configuration_row(ws, row_idx, configuration, conf_id):
ws.cell(row_idx, 1, value=conf_id)
ws.cell(row_idx, 2, value=configuration['parameter']['id'])
ws.cell(row_idx, 3, value=configuration['parameter']['scope'])
ws.cell(row_idx, 4, value='-')
ws.cell(row_idx, 5, value=configuration['item']['id'] if 'item' in configuration else '-')
ws.cell(row_idx, 6, value=configuration['item']['name'] if 'item' in configuration else '-')
ws.cell(row_idx, 7, value=configuration['marketplace']['id'] if 'marketplace' in configuration else '-')
ws.cell(row_idx, 8,
value=configuration['marketplace']['name'] if 'marketplace' in configuration else '-')
if 'structured_value' in configuration:
value = configuration['structured_value']
value = json.dumps(value, indent=4, sort_keys=True)
ws.cell(row_idx, 9, value=value).alignment = Alignment(wrap_text=True)
elif 'value' in configuration:
ws.cell(row_idx, 9, value=configuration['value'])
else:
ws.cell(row_idx, 9, value='-')
def _fill_item_row(ws, row_idx, item):
ws.cell(row_idx, 1, value=item['id'])
ws.cell(row_idx, 2, value=item['mpn'])
ws.cell(row_idx, 3, value='-')
ws.cell(row_idx, 4, value=item['display_name'])
ws.cell(row_idx, 5, value=item['description'])
ws.cell(row_idx, 6, value=item['type'])
ws.cell(row_idx, 7, value=item['precision'])
ws.cell(row_idx, 8, value=item['unit']['unit'])
period = item.get('period', 'monthly')
if period.startswith('years_'):
period = f'{period.rsplit("_")[-1]} years'
ws.cell(row_idx, 9, value=period)
ws.cell(row_idx, 10, value=_calculate_commitment(item))
ws.cell(row_idx, 11, value=item['status'])
ws.cell(row_idx, 12, value=item['events']['created']['at'])
ws.cell(row_idx, 13, value=item['events'].get('updated', {}).get('at'))
def _calculate_configuration_id(configuration):
conf_id = configuration['parameter']['id']
if 'item' in configuration and 'id' in configuration['item']:
conf_id = f'{conf_id}#{configuration["item"]["id"]}'
else:
conf_id = f'{conf_id}#'
if 'marketplace' in configuration and 'id' in configuration['marketplace']:
conf_id = f'{conf_id}#{configuration["marketplace"]["id"]}'
else:
conf_id = f'{conf_id}#'
return conf_id
def _dump_actions(ws, client, product_id, silent):
_setup_ws_header(ws, 'actions')
row_idx = 2
actions = client.products[product_id].actions.all()
count = actions.count()
action_validation = DataValidation(
type='list',
formula1='"-,create,update,delete"',
allow_blank=False,
)
scope_validation = DataValidation(
type='list',
formula1='"asset,tier1,tier2"',
allow_blank=False,
)
if count > 0:
ws.add_data_validation(action_validation)
ws.add_data_validation(scope_validation)
progress = trange(0, count, disable=silent, leave=True, bar_format=DEFAULT_BAR_FORMAT)
for action in actions:
progress.set_description(f'Processing action {action["id"]}')
progress.update(1)
_fill_action_row(ws, row_idx, action)
action_validation.add(f'C{row_idx}')
scope_validation.add(f'G{row_idx}')
row_idx += 1
progress.close()
print()
def _dump_configuration(ws, client, product_id, silent):
_setup_ws_header(ws, 'configurations')
row_idx = 2
configurations = client.products[product_id].configurations.all()
count = configurations.count()
action_validation = DataValidation(
type='list',
formula1='"-,update,delete"',
allow_blank=False,
)
if count == 0:
return
ws.add_data_validation(action_validation)
progress = trange(0, count, disable=silent, leave=True, bar_format=DEFAULT_BAR_FORMAT)
for configuration in configurations:
conf_id = _calculate_configuration_id(configuration)
progress.set_description(f'Processing parameter configuration {conf_id}')
progress.update(1)
_fill_configuration_row(ws, row_idx, configuration, conf_id)
action_validation.add(f'D{row_idx}')
row_idx += 1
progress.close()
print()
def _dump_parameters(ws, client, product_id, param_type, silent):
_setup_ws_header(ws, 'params')
rql = R().phase.eq(param_type)
row_idx = 2
params = client.products[product_id].parameters.filter(rql)
count = params.count()
if count == 0:
# Product without params is strange, but may exist
return
action_validation = DataValidation(
type='list',
formula1='"-,create,update,delete"',
allow_blank=False,
)
type_validation = DataValidation(
type='list',
formula1='"{params}"'.format(
params=','.join(PARAM_TYPES),
),
allow_blank=False,
)
ordering_fulfillment_scope_validation = DataValidation(
type='list',
formula1='"asset,tier1,tier2"',
allow_blank=False,
)
configuration_scope_validation = DataValidation(
type='list',
formula1='"product,marketplace,item,item_marketplace"',
allow_blank=False,
)
bool_validation = DataValidation(
type='list',
formula1='"True,-"',
allow_blank=False,
)
ws.add_data_validation(action_validation)
ws.add_data_validation(type_validation)
ws.add_data_validation(ordering_fulfillment_scope_validation)
ws.add_data_validation(configuration_scope_validation)
ws.add_data_validation(bool_validation)
progress = trange(0, count, disable=silent, leave=True, bar_format=DEFAULT_BAR_FORMAT)
for param in params:
progress.set_description(f'Processing {param_type} parameter {param["id"]}')
progress.update(1)
_fill_param_row(ws, row_idx, param)
action_validation.add(f'C{row_idx}')
if param['phase'] == 'configuration':
configuration_scope_validation.add(f'G{row_idx}')
else:
ordering_fulfillment_scope_validation.add(f'G{row_idx}')
type_validation.add(f'H{row_idx}')
bool_validation.add(f'I{row_idx}')
bool_validation.add(f'J{row_idx}')
bool_validation.add(f'K{row_idx}')
row_idx += 1
progress.close()
print()
def _dump_media(ws, client, product_id, silent, media_location, media_path):
_setup_ws_header(ws, 'media')
row_idx = 2
medias = client.products[product_id].media.all()
count = medias.count()
action_validation = DataValidation(
type='list',
formula1='"-,create,update,delete"',
allow_blank=False,
)
type_validation = DataValidation(
type='list',
formula1='"image,video"',
allow_blank=False,
)
if count > 0:
ws.add_data_validation(action_validation)
ws.add_data_validation(type_validation)
progress = trange(0, count, disable=silent, leave=True, bar_format=DEFAULT_BAR_FORMAT)
for media in medias:
progress.set_description(f'Processing media {media["id"]}')
progress.update(1)
_fill_media_row(ws, row_idx, media, media_location, product_id, media_path)
action_validation.add(f'C{row_idx}')
type_validation.add(f'D{row_idx}')
row_idx += 1
progress.close()
print()
def _dump_external_static_links(ws, product, silent):
_setup_ws_header(ws, 'static_links')
row_idx = 2
count = len(product['customer_ui_settings']['download_links'])
count = count + len(product['customer_ui_settings']['documents'])
action_validation = DataValidation(
type='list',
formula1='"-,create,delete"',
allow_blank=False,
)
link_type = DataValidation(
type='list',
formula1='"Download,Documentation"',
allow_blank=False,
)
if count > 0:
ws.add_data_validation(action_validation)
ws.add_data_validation(link_type)
progress = trange(0, count, disable=silent, leave=True, bar_format=DEFAULT_BAR_FORMAT)
progress.set_description("Processing static links")
for link in product['customer_ui_settings']['download_links']:
progress.update(1)
ws.cell(row_idx, 1, value='Download')
ws.cell(row_idx, 2, value=link['title'])
ws.cell(row_idx, 3, value='-')
ws.cell(row_idx, 4, value=link['url'])
action_validation.add(f'C{row_idx}')
link_type.add(f'A{row_idx}')
row_idx += 1
for link in product['customer_ui_settings']['documents']:
progress.update(1)
ws.cell(row_idx, 1, value='Documentation')
ws.cell(row_idx, 2, value=link['title'])
ws.cell(row_idx, 3, value='-')
ws.cell(row_idx, 4, value=link['url'])
action_validation.add(f'C{row_idx}')
link_type.add(f'A{row_idx}')
row_idx += 1
progress.close()
print()
def _dump_capabilities(ws, product, silent): # noqa: CCR001
_setup_ws_header(ws, 'capabilities')
progress = trange(0, 1, disable=silent, leave=True, bar_format=DEFAULT_BAR_FORMAT)
progress.set_description("Processing product capabilities")
ppu = product['capabilities']['ppu']
capabilities = product['capabilities']
tiers = capabilities['tiers']
action_validation = DataValidation(
type='list',
formula1='"-,update"',
allow_blank=False,
)
ppu_validation = DataValidation(
type='list',
formula1='"Disabled,QT,TR,PR"',
allow_blank=False,
)
disabled_enabled = DataValidation(
type='list',
formula1='"Disabled,Enabled"',
allow_blank=False,
)
tier_validation = DataValidation(
type='list',
formula1='"Disabled,1,2"',
allow_blank=False,
)
ws.add_data_validation(action_validation)
ws.add_data_validation(ppu_validation)
ws.add_data_validation(disabled_enabled)
ws.add_data_validation(tier_validation)
ws['A2'].value = 'Pay-as-you-go support and schema'
ws['B2'].value = '-'
ws['C2'].value = (ppu['schema'] if ppu else 'Disabled')
ppu_validation.add(ws['C2'])
ws['A3'].value = 'Pay-as-you-go dynamic items support'
ws['B3'].value = '-'
ws['C3'].value = (
'Enabled' if ppu and 'dynamic' in ppu and ppu['dynamic'] else 'Disabled'
)
disabled_enabled.add(ws['C3'])
ws['A4'].value = 'Pay-as-you-go future charges support'
ws['B4'].value = '-'
ws['C4'].value = (
'Enabled' if ppu and 'future' in ppu and ppu['future'] else 'Disabled'
)
disabled_enabled.add(ws['C4'])
ws['A5'].value = 'Consumption reporting for Reservation Items'
ws['B5'].value = '-'
progress.update(1)
progress.close()
print()
def _get_reporting_consumption(reservation_cap):
if 'consumption' in reservation_cap and reservation_cap['consumption']:
return 'Enabled'
return 'Disabled'
ws['C5'].value = _get_reporting_consumption(capabilities['reservation'])
disabled_enabled.add(ws['C5'])
ws['A6'].value = 'Dynamic Validation of the Draft Requests'
ws['B6'].value = '-'
def _get_dynamic_validation_draft(capabilities_cart):
if 'validation' in capabilities_cart and capabilities['cart']['validation']:
return 'Enabled'
return 'Disabled'
ws['C6'].value = _get_dynamic_validation_draft(capabilities['cart'])
disabled_enabled.add(ws['C6'])
ws['A7'].value = 'Dynamic Validation of the Inquiring Form'
ws['B7'].value = '-'
def _get_validation_inquiring(capabilities_inquiring):
if 'validation' in capabilities_inquiring and capabilities_inquiring['validation']:
return 'Enabled'
return 'Disabled'
ws['C7'].value = _get_validation_inquiring(capabilities['inquiring'])
disabled_enabled.add(ws['C7'])
ws['A8'].value = 'Reseller Authorization Level'
ws['B8'].value = '-'
def _get_reseller_authorization_level(tiers):
if tiers and 'configs' in tiers and tiers['configs']:
return tiers['configs']['level']
return 'Disabled'
ws['C8'].value = _get_reseller_authorization_level(tiers)
tier_validation.add(ws['C8'])
ws['A9'].value = 'Tier Accounts Sync'
ws['B9'].value = '-'
ws['C9'].value = (
'Enabled' if tiers and 'updates' in tiers and tiers['updates'] else 'Disabled'
)
disabled_enabled.add(ws['C9'])
ws['A10'].value = 'Administrative Hold'
ws['B10'].value = '-'
def _get_administrative_hold(capabilities):
if 'hold' in capabilities['subscription'] and capabilities['subscription']['hold']:
return 'Enabled'
return 'Disabled'
ws['C10'].value = _get_administrative_hold(capabilities)
disabled_enabled.add(ws['C10'])
idx = 2
while idx < 11:
action_validation.add(f'B{idx}')
idx = idx + 1
progress.update(1)
def _dump_templates(ws, client, product_id, silent):
_setup_ws_header(ws, 'templates')
row_idx = 2
action_validation = DataValidation(
type='list',
formula1='"-,create,update,delete"',
allow_blank=False,
)
scope_validation = DataValidation(
type='list',
formula1='"asset,tier1,tier2"',
allow_blank=False,
)
type_validation = DataValidation(
type='list',
formula1='"fulfillment,inquire"',
allow_blank=False,
)
templates = client.products[product_id].templates.all()
count = templates.count()
if count > 0:
ws.add_data_validation(action_validation)
ws.add_data_validation(scope_validation)
ws.add_data_validation(type_validation)
progress = trange(0, count, disable=silent, leave=True, bar_format=DEFAULT_BAR_FORMAT)
for template in templates:
progress.set_description(f'Processing template {template["id"]}')
progress.update(1)
_fill_template_row(ws, row_idx, template)
action_validation.add(f'C{row_idx}')
scope_validation.add(f'D{row_idx}')
type_validation.add(f'E{row_idx}')
row_idx += 1
progress.close()
print()
def _dump_items(ws, client, product_id, silent):
_setup_ws_header(ws, 'items')
row_idx = 2
items = client.products[product_id].items.all()
count = items.count()
if count == 0:
raise ClickException(f'The product {product_id} doesn\'t have items.')
action_validation = DataValidation(
type='list',
formula1='"-,create,update,delete"',
allow_blank=False,
)
type_validation = DataValidation(
type='list',
formula1='"reservation,ppu"',
allow_blank=False,
)
period_validation = DataValidation(
type='list',
formula1='"onetime,monthly,yearly,2 years,3 years,4 years,5 years"',
allow_blank=False,
)
precision_validation = DataValidation(
type='list',
formula1='"integer,decimal(1),decimal(2),decimal(4),decimal(8)"',
allow_blank=False,
)
commitment_validation = DataValidation(
type='list',
formula1='"-,1 year,2 years,3 years,4 years,5 years"',
allow_blank=False,
)
ws.add_data_validation(action_validation)
ws.add_data_validation(type_validation)
ws.add_data_validation(period_validation)
ws.add_data_validation(precision_validation)
ws.add_data_validation(commitment_validation)
progress = trange(0, count, disable=silent, leave=True, bar_format=DEFAULT_BAR_FORMAT)
for item in items:
progress.set_description(f'Processing item {item["id"]}')
progress.update(1)
_fill_item_row(ws, row_idx, item)
action_validation.add(f'C{row_idx}')
type_validation.add(f'F{row_idx}')
precision_validation.add(f'G{row_idx}')
period_validation.add(f'I{row_idx}')
commitment_validation.add(f'J{row_idx}')
row_idx += 1
progress.close()
print()
def dump_product(api_url, api_key, product_id, output_file, silent, output_path=None): # noqa: CCR001
if not output_path:
output_path = os.path.join(os.getcwd(), product_id)
else:
if not os.path.exists(output_path):
raise ClickException(
"Output Path does not exist",
)
output_path = os.path.join(output_path, product_id)
media_path = os.path.join(output_path, 'media')
if not output_file:
output_file = os.path.join(output_path, f'{product_id}.xlsx')
else:
output_file = os.path.join(output_path, output_file)
if not os.path.exists(output_path):
os.mkdir(output_path)
elif not os.path.isdir(output_path):
raise ClickException(
"Exists a file with product name but a directory is expected, please rename it",
)
if not os.path.exists(media_path):
os.mkdir(media_path)
try:
client = ConnectClient(
api_key=api_key,
endpoint=api_url,
use_specs=False,
max_retries=3,
)
product = client.products[product_id].get()
wb = Workbook()
connect_api_location = parse.urlparse(api_url)
media_location = f'{connect_api_location.scheme}://{connect_api_location.netloc}'
_setup_cover_sheet(
wb.active,
product,
media_location,
client,
media_path,
)
_dump_capabilities(wb.create_sheet('Capabilities'), product, silent)
_dump_external_static_links(wb.create_sheet('Embedding Static Resources'), product, silent)
_dump_media(
wb.create_sheet('Media'),
client,
product_id,
silent,
media_location,
media_path,
)
_dump_templates(wb.create_sheet('Templates'), client, product_id, silent)
_dump_items(wb.create_sheet('Items'), client, product_id, silent)
_dump_parameters(
wb.create_sheet('Ordering Parameters'),
client,
product_id,
'ordering',
silent,
)
_dump_parameters(
wb.create_sheet('Fulfillment Parameters'),
client,
product_id,
'fulfillment',
silent,
)
_dump_parameters(
wb.create_sheet('Configuration Parameters'),
client,
product_id,
'configuration',
silent,
)
_dump_actions(wb.create_sheet('Actions'), client, product_id, silent)
_dump_configuration(wb.create_sheet('Configuration'), client, product_id, silent)
wb.save(output_file)
except ClientError as error:
status = format_http_status(error.status_code)
if error.status_code == 404:
raise ClickException(f'{status}: Product {product_id} not found.')
handle_http_error(error)
return output_file
| 32.557522 | 108 | 0.628364 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,949 | 0.202127 |
3892d5674879bbcf71468a4b3b615df537552e19 | 729 | py | Python | HackTheVote/2020/fileshare/cleaner.py | mystickev/ctf-archives | 89e99a5cd5fb6b2923cad3fe1948d3ff78649b4e | [
"MIT"
] | 1 | 2021-11-02T20:53:58.000Z | 2021-11-02T20:53:58.000Z | HackTheVote/2020/fileshare/cleaner.py | ruhan-islam/ctf-archives | 8c2bf6a608c821314d1a1cfaa05a6cccef8e3103 | [
"MIT"
] | null | null | null | HackTheVote/2020/fileshare/cleaner.py | ruhan-islam/ctf-archives | 8c2bf6a608c821314d1a1cfaa05a6cccef8e3103 | [
"MIT"
] | 1 | 2021-12-19T11:06:24.000Z | 2021-12-19T11:06:24.000Z | import os, time, shutil
def get_used_dirs():
pids = [p for p in os.listdir("/proc") if p.isnumeric()]
res = set()
for p in pids:
try:
path = os.path.realpath("/proc/%s/cwd"%p)
if path.startswith("/tmp/fileshare."):
res.add(path)
except:
pass
return res
while True:
try:
dirs = ["/tmp/"+d for d in os.listdir("/tmp") if d.startswith("fileshare.")]
used = get_used_dirs()
for d in dirs:
if d not in used:
try:
os.system("umount %s/proc"%d)
shutil.rmtree(d)
except:
pass
except:
pass
time.sleep(5)
| 25.137931 | 84 | 0.463649 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 79 | 0.108368 |
38930feb943e3f9cbebdb5281ab3fef2c1edeeab | 1,441 | py | Python | scripts/dataset-preparation/get_image_urls.py | byewokko/guessing-game | ffca7f68836803e1a2049488227306ec0963e65b | [
"MIT"
] | 2 | 2020-09-04T21:15:00.000Z | 2020-09-25T12:20:33.000Z | scripts/dataset-preparation/get_image_urls.py | byewokko/guessing-game | ffca7f68836803e1a2049488227306ec0963e65b | [
"MIT"
] | null | null | null | scripts/dataset-preparation/get_image_urls.py | byewokko/guessing-game | ffca7f68836803e1a2049488227306ec0963e65b | [
"MIT"
] | null | null | null | import os
import requests
from nltk.corpus import wordnet as wn
urldir = "urls"
geturls = "http://www.image-net.org/api/text/imagenet.synset.geturls?wnid={wnid}"
if not os.path.isdir(urldir):
os.makedirs(urldir)
with open("base_concepts.txt") as fin:
for line in fin:
concept = line.strip().split("_")[0]
print("===", concept)
syns = wn.synsets(concept, pos=wn.NOUN)
available = []
for synset in syns:
category = synset.lexname().split(".")[-1]
name = synset.name().split(".")[0]
offset = synset.offset()
wnid = f"n{offset:08d}"
print(f"{wnid}.{category}.{name}")
r = requests.get(geturls.format(wnid=wnid))
if "\n" not in r.text:
continue
urls = r.text.split()
if len(urls) < 100:
continue
filename = os.path.join(urldir, f"{wnid}.{category}.{name}.{len(urls)}.txt")
available.append((filename, len(urls), urls))
if not available:
continue
available.sort(key=lambda x: x[1], reverse=True)
filename, _, urls = available[0]
print(f"BEST: {filename}")
with open(filename, "w", encoding="utf-8") as fout:
for url in urls:
try:
print(url, file=fout)
except Exception as e:
print(type(e), url)
| 33.511628 | 88 | 0.528799 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 229 | 0.158917 |
3895e3d273191a5891b94d3dce87893bc4fae4bc | 14,665 | py | Python | common/database.py | santoshpanna/Discord-Bot | 4757a5899dede946a8e409604d230ddc77626d41 | [
"MIT"
] | null | null | null | common/database.py | santoshpanna/Discord-Bot | 4757a5899dede946a8e409604d230ddc77626d41 | [
"MIT"
] | null | null | null | common/database.py | santoshpanna/Discord-Bot | 4757a5899dede946a8e409604d230ddc77626d41 | [
"MIT"
] | null | null | null | import time
from . import common
from datetime import datetime
from pymongo import MongoClient, ASCENDING, errors
from _datetime import timedelta
# TODO
# - More error handling
# - Refactor
class Database:
def __init__(self):
# getting the configuration
config = common.getConfig()
client = MongoClient(config['DATABASE']['uri'])
self.db = client.discordbot
self.STATUS = common.STATUS
''' Services Start '''
def getService(self, service):
# returns the specified module data
return self.db.services.find_one({'name': service})
def upsertService(self, data):
service = self.getService(data['name'])
if service:
data['date_updated'] = common.getDatetimeIST()
count = self.db.services.update_one({'_id': service['_id']}, {'$set': data}).modified_count
return self.STATUS.SUCCESS.UPDATED if count > 0 else self.STATUS.FAIL.UPDATE
else:
data['date_created'] = common.getDatetimeIST()
data['date_updated'] = common.getDatetimeIST()
try:
status = self.db.services.insert_one(data).acknowledged
return self.STATUS.SUCCESS.INSERTED if status else self.STATUS.FAIL.INSERT
except errors.DuplicateKeyError:
return self.STATUS.FAIL.DUPLICATE
def getChannelByQuery(self, query):
return self.db.guild_channel_mapping.find_one(query)
''' Service End '''
''' Steam Start '''
def insertUserSteam(self, steam):
# inserts a new user for a steam related services
data = {}
data['steam64'] = steam.as_64
data['url'] = steam.community_url
data['date_created'] = common.getDatetimeIST()
data['date_updated'] = common.getDatetimeIST()
try:
status = self.db.steam.insert_one(data).acknowledged
return self.STATUS.SUCCESS if status else self.STATUS.FAIL.INSERT
except errors.DuplicateKeyError:
return self.STATUS.FAIL.DUPLICATE
''' Steam End '''
''' Status Start '''
def getCountEstimates(self):
data = {}
data['gamedeals'] = self.db.gamedeals.estimated_document_count()
data['cracks'] = self.db.crackwatch.count_documents({'type': 'crack'})
data['repacks'] = self.db.crackwatch.count_documents({'type': 'repack'})
data['prices'] = self.db.price_deal_mapping.estimated_document_count()
data['members'] = self.db.members.estimated_document_count()
data['services'] = self.db.services.estimated_document_count()
data['guilds'] = self.db.guilds.estimated_document_count()
return data
def updateBotStartTime(self):
# updates the time when the bot starts
status = self.db.status.find_one()
if status:
count = self.db.status.update_one(
{'_id': status['_id']},
{'$set': {
'botStartTime': common.getDatetimeIST()
}}
).modified_count
return self.STATUS.SUCCESS if count > 0 else self.STATUS.FAIL.UPDATE
else:
status = self.db.status.insert_one({'botStartTime': common.getDatetimeIST()}).acknowledged
return self.STATUS.SUCCESS if status else self.STATUS.FAIL.INSERT
def getStatus(self):
# get stats
return self.db.status.find_one()
''' Status End '''
''' Guild Start '''
def getGuildsByService(self, service):
return self.db.guilds.find({"services_activated": service})
def getChannelsByService(self, query):
return self.db.guild_channel_mapping.find(query)
def upsertGuidInfo(self, data):
# find if guild exists in database
guild = self.db.guilds.find_one({'id': data['id']})
# if guild is present update the info
if guild:
data['date_updated'] = common.getDatetimeIST()
count = self.db.guilds.update_one({'_id': guild['_id']}, {'$set': data}).modified_count
return self.STATUS.SUCCESS if count > 0 else self.STATUS.FAIL.UPDATE
# if its a new guild simply insert
else:
data['date_created'] = common.getDatetimeIST()
data['date_updated'] = common.getDatetimeIST()
try:
status = self.db.guilds.insert_one(data).acknowledged
return self.STATUS.SUCCESS if status else self.STATUS.FAIL.INSERT
except errors.DuplicateKeyError:
return self.STATUS.FAIL.DUPLICATE
def createChannelMapping(self, data):
# get the service
service = self.db.services.find_one({'name': data['service_name']})
# get the channel mapping
mapping = self.db.guild_channel_mapping.find_one({'guild_id': data['guild_id'], 'channel_id': data['channel_id']})
# there exists no mapping for current channel
if not mapping:
insert = {}
insert['guild_id'] = data['guild_id']
insert['channel_id'] = data['channel_id']
insert['channel_name'] = data['channel_name']
insert['service_ids'] = []
insert['service_ids'].append(str(service['_id']))
insert['date_created'] = common.getDatetimeIST()
insert['date_updated'] = common.getDatetimeIST()
status = self.db.guild_channel_mapping.insert_one(insert).acknowledged
return self.STATUS.SUCCESS if status else self.STATUS.FAIL.INSERT
else:
# mapping is already created
if str(service['_id']) in mapping['service_ids']:
return self.STATUS.REDUNDANT
else:
update = {}
update['service_ids'] = mapping['service_ids']
update['service_ids'].append(str(service['_id']))
update['date_updated'] = common.getDatetimeIST()
count = self.db.guild_channel_mapping.update_one({'_id': mapping['_id']}, {'$set': update}).modified_count
return self.STATUS.SUCCESS if count > 0 else self.STATUS.FAIL.UPDATE
def deleteChannelMapping(self, data):
# get the service
service = self.db.services.find_one({'name': data['service_name']})
# get the channel mapping
mapping = self.db.guild_channel_mapping.find_one({'guild_id': data['guild_id'], 'channel_id': data['channel_id'], 'service_ids': str(service['_id'])})
# there exists no mapping for current channel
if not mapping:
return self.STATUS.FAIL.NOT_FOUND
else:
if len(mapping['service_ids']) == 1:
count = self.db.guild_channel_mapping.delete_one({'guild_id': mapping['guild_id'], 'channel_id': mapping['channel_id']}).deleted_count
return self.STATUS.SUCCESS if count > 0 else self.STATUS.FAIL.DELETE
else:
update = {}
update['service_ids'] = mapping['service_ids']
update['service_ids'].remove(str(service['_id']))
update['date_updated'] = common.getDatetimeIST()
count = self.db.guild_channel_mapping.update_one({'_id': mapping['_id']}, {'$set': update}).modified_count
return self.STATUS.SUCCESS if count > 0 else self.STATUS.FAIL.UPDATE
''' Guild End '''
''' Game Deals Start '''
def getGameDeal(self, data):
if isinstance(data, str):
return self.db.gamedeals.find_one({'url': data})
elif isinstance(data, dict) and 'url' in data:
return self.db.gamedeals.find_one({'url': data['url']})
else:
return self.STATUS.FAIL.PARAMETER
def upsertGameDeal(self, data):
# add time to live
deal = self.getGameDeal(data['url'])
if deal:
data['date_updated'] = common.getDatetimeIST()
count = self.db.gamedeals.update_one({'_id': deal['_id']}, {'$set': data}).modified_count
return self.STATUS.SUCCESS.UPDATED if count > 0 else self.STATUS.FAIL.UPDATE
else:
data['date_created'] = common.getDatetimeIST()
data['date_updated'] = common.getDatetimeIST()
try:
status = self.db.gamedeals.insert_one(data).acknowledged
return self.STATUS.SUCCESS.INSERTED if status else self.STATUS.FAIL.INSERT
except errors.DuplicateKeyError:
return self.STATUS.FAIL.DUPLICATE
# function to remove older records
def cleanGameDeal(self):
count = self.db.gamedeals.delete_many({'ttl': {'$lte': common.getDatetimeIST()}}).deleted_count
return self.STATUS.SUCCESS if count > 0 else self.STATUS.FAIL.DELETE
''' Game Deals End '''
''' Member Start '''
def getMember(self, obj):
member_id = None
if isinstance(obj, object):
member_id = obj.author.id
elif isinstance(obj, str):
member_id = int(obj)
elif isinstance(obj, int):
member_id = obj
# if member is registered
member = self.db.members.find_one({'id': member_id})
if member:
return member
# member is not registered
else:
data = {}
data['id'] = member_id
data['name'] = obj.author.name
data['priceTrackerLimit'] = 5
data['isPremium'] = False
data['date_created'] = common.getDatetimeIST()
data['date_updated'] = common.getDatetimeIST()
try:
status = self.db.members.insert_one(data)
return self.db.members.find_one({"_id": status.inserted_id}) if status.acknowledged else self.STATUS.FAIL.INSERT
except errors.DuplicateKeyError:
return self.STATUS.FAIL.DUPLICATE
''' Member End '''
''' Price Alert Start '''
def insertPriceAlert(self, data):
data['date_created'] = common.getDatetimeIST()
data['date_updated'] = common.getDatetimeIST()
data['cooldown'] = common.getDatetimeIST()
try:
status = self.db.price_deal_mapping.insert_one(data).acknowledged
return self.STATUS.SUCCESS if status else self.STATUS.FAIL.INSERT
except errors.DuplicateKeyError:
return self.STATUS.FAIL.DUPLICATE
def updatePriceAlerts(self, url, price):
status = self.db.price_deal_mapping.update_many(
{'url': url},
{'$set': {
'current_price': price,
'cooldown': common.getDatetimeIST() + timedelta(hours=12),
'date_updated': common.getDatetimeIST()
}}
).acknowledged
return self.STATUS.SUCCESS if status else self.STATUS.FAIL.UPDATE
def updatePriceAlert(self, data):
deal = self.db.price_deal_mapping.find_one({'member_id': data['member_id'], 'uuid': data['uuid']})
if deal:
update = None
if 'alert_at' in data:
update = self.db.price_deal_mapping.update_one({'_id': deal['_id']}, {'$set': {'alert_at': data['alert_at'], 'date_updated': common.getDatetimeIST()}}).acknowledged
if 'currency' in data:
update = self.db.price_deal_mapping.update_one({'_id': deal['_id']}, {'$set': {'currency': data['currency'], 'date_updated': common.getDatetimeIST()}}).acknowledged
if 'cooldown' in data:
update = self.db.price_deal_mapping.update_one({'_id': deal['_id']}, {'$set': {'cooldown': data['cooldown'], 'date_updated': common.getDatetimeIST()}}).acknowledged
return self.STATUS.SUCCESS if update else self.STATUS.FAIL.UPDATE
return self.STATUS.FAIL.NOT_FOUND
def deletePriceAlert(self, data):
count = self.db.price_deal_mapping.delete_one({'member_id': data['member_id'], 'uuid': data['uuid']}).deleted_count
return self.STATUS.SUCCESS if count > 0 else self.STATUS.FAIL.DELETE
def getPriceAlert(self, data=None):
if not data:
return self.db.price_deal_mapping.find()
if isinstance(data, int):
return self.db.price_deal_mapping.find({'member_id': data})
elif isinstance(data, dict) and 'limit' in data and 'offset' in data:
return self.db.price_deal_mapping.find().limit(data['limit']).skip(data['offset'])
''' Price Alert End '''
''' Crack Watch Start '''
def getCrackwatch(self, data):
if isinstance(data, str):
return self.db.crackwatch.find_one({'id': data})
elif isinstance(data, dict) and 'id' in data:
return self.db.crackwatch.find_one({'id': data['id']})
else:
return self.STATUS.FAIL.PARAMETER
def upsertCrackwatch(self, data):
crack = self.getCrackwatch(data)
if crack:
data['date_updated'] = common.getDatetimeIST()
count = self.db.crackwatch.update_one({'_id': crack['_id']}, {'$set': data}).modified_count
return self.STATUS.SUCCESS.UPDATED if count > 0 else self.STATUS.FAIL.UPDATE
else:
data['date_created'] = common.getDatetimeIST()
data['date_updated'] = common.getDatetimeIST()
try:
status = self.db.crackwatch.insert_one(data).acknowledged
return self.STATUS.SUCCESS.INSERTED if status else self.STATUS.FAIL.INSERT
except errors.DuplicateKeyError:
return self.STATUS.FAIL.DUPLICATE
def cleanCrackwatch(self):
count = self.db.crackwatch.delete_many({'ttl': {'$lte': common.getDatetimeIST()}}).deleted_count
return self.STATUS.SUCCESS if count > 0 else self.STATUS.FAIL.DELETE
''' Crack Watch End '''
''' Patch Notes Start '''
def getPatchnotes(self, data):
if 'id' in data:
data['id'] = data['service_name'] + str(data['id'])
return self.db.patchnotes.find_one({'service_id': data['service_id'], 'id': data['id']})
else:
return self.db.patchnotes.find_one({'service_id': data['service_id']}).sort('date', ASCENDING)
def upsertPatchnotes(self, data):
patch = self.getPatchnotes(data)
if patch:
return self.STATUS.REDUNDANT
# count = self.db.patchnotes.update_one({'_id': patch['_id']}, {'$set': data}).modified_count
# return self.STATUS.SUCCESS.UPDATED if count > 0 else self.STATUS.FAIL.UPDATE
else:
status = self.db.patchnotes.insert_one(data).acknowledged
return self.STATUS.SUCCESS.INSERTED if status else self.STATUS.FAIL.INSERT
''' Patch Notes End ''' | 44.305136 | 180 | 0.610638 | 14,473 | 0.986908 | 0 | 0 | 0 | 0 | 0 | 0 | 2,730 | 0.186158 |
38960fd0dbb8b90644b976de856162ef1dd45d60 | 9,859 | py | Python | resources.py | luca-penasa/circle-craters | 62881f7fa7f032c8377dee130598ec7a93ccdae3 | [
"BSD-3-Clause"
] | 1 | 2021-02-01T13:59:29.000Z | 2021-02-01T13:59:29.000Z | resources.py | europlanet-gmap/circle-craters | 62881f7fa7f032c8377dee130598ec7a93ccdae3 | [
"BSD-3-Clause"
] | null | null | null | resources.py | europlanet-gmap/circle-craters | 62881f7fa7f032c8377dee130598ec7a93ccdae3 | [
"BSD-3-Clause"
] | 1 | 2020-10-21T13:50:34.000Z | 2020-10-21T13:50:34.000Z | # -*- coding: utf-8 -*-
# Resource object code
#
# Created by: The Resource Compiler for PyQt5 (Qt v5.15.0)
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore
qt_resource_data = b"\
\x00\x00\x01\x64\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x18\x00\x00\x00\x18\x08\x04\x00\x00\x00\x4a\x7e\xf5\x73\
\x00\x00\x00\x20\x63\x48\x52\x4d\x00\x00\x7a\x26\x00\x00\x80\x84\
\x00\x00\xfa\x00\x00\x00\x80\xe8\x00\x00\x75\x30\x00\x00\xea\x60\
\x00\x00\x3a\x98\x00\x00\x17\x70\x9c\xba\x51\x3c\x00\x00\x00\x02\
\x62\x4b\x47\x44\x00\xff\x87\x8f\xcc\xbf\x00\x00\x00\x09\x70\x48\
\x59\x73\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\x00\x9a\x9c\x18\x00\
\x00\x00\xdc\x49\x44\x41\x54\x38\xcb\xbd\x92\x41\x0e\x82\x30\x10\
\x45\x5f\x5c\xe0\x21\x24\x1e\xca\x04\x23\x91\x70\x26\x20\x26\xde\
\x41\xa3\x47\xc2\x34\xe0\x46\xbc\x00\x6c\x70\xa5\x9d\x99\x56\x97\
\xce\xac\xfe\xf4\xbf\x64\xfa\x5b\xf8\x77\x2d\x29\xb9\xe2\x98\x98\
\x70\x5c\x28\x58\xfe\xb2\xef\xb8\x33\x9b\xee\xd9\xc6\xcd\x0b\x8e\
\x81\xf9\xdd\x07\x16\x21\x20\xed\x0f\x2a\x6a\x06\x85\x04\xcb\x48\
\xfb\x0a\x80\x54\x21\x99\xbe\xaa\xdc\xbd\xfa\xcc\x1b\x31\xed\x48\
\x3c\x50\xaa\x8d\xeb\x28\x30\xb3\xf7\xc0\x55\x1d\x0c\xa4\x00\xac\
\x79\xaa\xf9\xd9\x03\xce\xa4\x32\xd0\xd0\x18\xfb\xcc\xcd\x03\xd3\
\xd7\x40\x65\x8f\x21\x60\xe3\xd4\x7a\xb4\x2b\xd9\x38\xad\x6e\x3d\
\x70\x89\xc6\x69\xf5\xc9\x03\x45\x34\x4e\xab\x73\xf9\x70\x7d\x24\
\x4e\xad\x9d\x7c\x38\xd8\x46\xe3\x94\x7a\x63\x7f\xd3\xe1\x67\xa4\
\x75\xec\x7b\x7f\x47\xaa\xd8\xf7\x06\xc8\xe8\x02\xb3\x0b\x97\x91\
\x95\xb0\xe7\xcc\x8d\x91\x91\x96\x13\xb9\xbe\xea\x3f\xea\x05\xa7\
\xf0\xfd\xeb\x14\xb8\xd5\x70\x00\x00\x00\x00\x49\x45\x4e\x44\xae\
\x42\x60\x82\
\x00\x00\x00\xda\
\x3c\
\x73\x76\x67\x20\x78\x6d\x6c\x6e\x73\x3d\x22\x68\x74\x74\x70\x3a\
\x2f\x2f\x77\x77\x77\x2e\x77\x33\x2e\x6f\x72\x67\x2f\x32\x30\x30\
\x30\x2f\x73\x76\x67\x22\x20\x77\x69\x64\x74\x68\x3d\x22\x34\x38\
\x22\x20\x68\x65\x69\x67\x68\x74\x3d\x22\x34\x38\x22\x20\x76\x69\
\x65\x77\x42\x6f\x78\x3d\x22\x30\x20\x30\x20\x34\x38\x20\x34\x38\
\x22\x3e\x3c\x70\x61\x74\x68\x20\x64\x3d\x22\x4d\x32\x33\x2e\x39\
\x39\x20\x33\x37\x2e\x30\x38\x4c\x39\x2e\x32\x34\x20\x32\x35\x2e\
\x36\x31\x20\x36\x20\x32\x38\x2e\x31\x33\x6c\x31\x38\x20\x31\x34\
\x20\x31\x38\x2d\x31\x34\x2d\x33\x2e\x32\x36\x2d\x32\x2e\x35\x33\
\x2d\x31\x34\x2e\x37\x35\x20\x31\x31\x2e\x34\x38\x7a\x4d\x32\x34\
\x20\x33\x32\x6c\x31\x34\x2e\x37\x33\x2d\x31\x31\x2e\x34\x35\x4c\
\x34\x32\x20\x31\x38\x20\x32\x34\x20\x34\x20\x36\x20\x31\x38\x6c\
\x33\x2e\x32\x36\x20\x32\x2e\x35\x33\x4c\x32\x34\x20\x33\x32\x7a\
\x22\x2f\x3e\x3c\x2f\x73\x76\x67\x3e\
\x00\x00\x01\x58\
\x3c\
\x73\x76\x67\x20\x78\x6d\x6c\x6e\x73\x3d\x22\x68\x74\x74\x70\x3a\
\x2f\x2f\x77\x77\x77\x2e\x77\x33\x2e\x6f\x72\x67\x2f\x32\x30\x30\
\x30\x2f\x73\x76\x67\x22\x20\x77\x69\x64\x74\x68\x3d\x22\x34\x38\
\x22\x20\x68\x65\x69\x67\x68\x74\x3d\x22\x34\x38\x22\x20\x76\x69\
\x65\x77\x42\x6f\x78\x3d\x22\x30\x20\x30\x20\x34\x38\x20\x34\x38\
\x22\x3e\x3c\x70\x61\x74\x68\x20\x64\x3d\x22\x4d\x34\x31\x2e\x30\
\x39\x20\x31\x30\x2e\x34\x35\x6c\x2d\x32\x2e\x37\x37\x2d\x33\x2e\
\x33\x36\x43\x33\x37\x2e\x37\x36\x20\x36\x2e\x34\x33\x20\x33\x36\
\x2e\x39\x33\x20\x36\x20\x33\x36\x20\x36\x48\x31\x32\x63\x2d\x2e\
\x39\x33\x20\x30\x2d\x31\x2e\x37\x36\x2e\x34\x33\x2d\x32\x2e\x33\
\x31\x20\x31\x2e\x30\x39\x6c\x2d\x32\x2e\x37\x37\x20\x33\x2e\x33\
\x36\x43\x36\x2e\x33\x34\x20\x31\x31\x2e\x31\x35\x20\x36\x20\x31\
\x32\x2e\x30\x33\x20\x36\x20\x31\x33\x76\x32\x35\x63\x30\x20\x32\
\x2e\x32\x31\x20\x31\x2e\x37\x39\x20\x34\x20\x34\x20\x34\x68\x32\
\x38\x63\x32\x2e\x32\x31\x20\x30\x20\x34\x2d\x31\x2e\x37\x39\x20\
\x34\x2d\x34\x56\x31\x33\x63\x30\x2d\x2e\x39\x37\x2d\x2e\x33\x34\
\x2d\x31\x2e\x38\x35\x2d\x2e\x39\x31\x2d\x32\x2e\x35\x35\x7a\x4d\
\x32\x34\x20\x33\x35\x4c\x31\x33\x20\x32\x34\x68\x37\x76\x2d\x34\
\x68\x38\x76\x34\x68\x37\x4c\x32\x34\x20\x33\x35\x7a\x4d\x31\x30\
\x2e\x32\x35\x20\x31\x30\x6c\x31\x2e\x36\x33\x2d\x32\x68\x32\x34\
\x6c\x31\x2e\x38\x37\x20\x32\x68\x2d\x32\x37\x2e\x35\x7a\x22\x2f\
\x3e\x3c\x2f\x73\x76\x67\x3e\
\x00\x00\x01\x58\
\x3c\
\x73\x76\x67\x20\x78\x6d\x6c\x6e\x73\x3d\x22\x68\x74\x74\x70\x3a\
\x2f\x2f\x77\x77\x77\x2e\x77\x33\x2e\x6f\x72\x67\x2f\x32\x30\x30\
\x30\x2f\x73\x76\x67\x22\x20\x77\x69\x64\x74\x68\x3d\x22\x34\x38\
\x22\x20\x68\x65\x69\x67\x68\x74\x3d\x22\x34\x38\x22\x20\x76\x69\
\x65\x77\x42\x6f\x78\x3d\x22\x30\x20\x30\x20\x34\x38\x20\x34\x38\
\x22\x3e\x3c\x70\x61\x74\x68\x20\x64\x3d\x22\x4d\x33\x39\x2e\x36\
\x32\x20\x32\x39\x2e\x39\x38\x4c\x34\x32\x20\x32\x38\x2e\x31\x33\
\x6c\x2d\x32\x2e\x38\x35\x2d\x32\x2e\x38\x35\x2d\x32\x2e\x33\x38\
\x20\x31\x2e\x38\x35\x20\x32\x2e\x38\x35\x20\x32\x2e\x38\x35\x7a\
\x6d\x2d\x2e\x38\x39\x2d\x39\x2e\x34\x33\x4c\x34\x32\x20\x31\x38\
\x20\x32\x34\x20\x34\x6c\x2d\x35\x2e\x38\x33\x20\x34\x2e\x35\x33\
\x20\x31\x35\x2e\x37\x35\x20\x31\x35\x2e\x37\x35\x20\x34\x2e\x38\
\x31\x2d\x33\x2e\x37\x33\x7a\x4d\x36\x2e\x35\x35\x20\x32\x4c\x34\
\x20\x34\x2e\x35\x35\x6c\x38\x2e\x34\x34\x20\x38\x2e\x34\x34\x4c\
\x36\x20\x31\x38\x6c\x33\x2e\x32\x36\x20\x32\x2e\x35\x33\x4c\x32\
\x34\x20\x33\x32\x6c\x34\x2e\x31\x39\x2d\x33\x2e\x32\x36\x20\x32\
\x2e\x38\x35\x20\x32\x2e\x38\x35\x2d\x37\x2e\x30\x36\x20\x35\x2e\
\x34\x39\x4c\x39\x2e\x32\x34\x20\x32\x35\x2e\x36\x31\x20\x36\x20\
\x32\x38\x2e\x31\x33\x6c\x31\x38\x20\x31\x34\x20\x39\x2e\x38\x39\
\x2d\x37\x2e\x37\x4c\x34\x31\x2e\x34\x36\x20\x34\x32\x20\x34\x34\
\x20\x33\x39\x2e\x34\x35\x20\x36\x2e\x35\x35\x20\x32\x7a\x22\x2f\
\x3e\x3c\x2f\x73\x76\x67\x3e\
\x00\x00\x01\x13\
\x3c\
\x73\x76\x67\x20\x78\x6d\x6c\x6e\x73\x3d\x22\x68\x74\x74\x70\x3a\
\x2f\x2f\x77\x77\x77\x2e\x77\x33\x2e\x6f\x72\x67\x2f\x32\x30\x30\
\x30\x2f\x73\x76\x67\x22\x20\x77\x69\x64\x74\x68\x3d\x22\x34\x38\
\x22\x20\x68\x65\x69\x67\x68\x74\x3d\x22\x34\x38\x22\x20\x76\x69\
\x65\x77\x42\x6f\x78\x3d\x22\x30\x20\x30\x20\x34\x38\x20\x34\x38\
\x22\x3e\x3c\x70\x61\x74\x68\x20\x64\x3d\x22\x4d\x32\x36\x20\x31\
\x34\x68\x2d\x34\x76\x38\x68\x2d\x38\x76\x34\x68\x38\x76\x38\x68\
\x34\x76\x2d\x38\x68\x38\x76\x2d\x34\x68\x2d\x38\x76\x2d\x38\x7a\
\x4d\x32\x34\x20\x34\x43\x31\x32\x2e\x39\x35\x20\x34\x20\x34\x20\
\x31\x32\x2e\x39\x35\x20\x34\x20\x32\x34\x73\x38\x2e\x39\x35\x20\
\x32\x30\x20\x32\x30\x20\x32\x30\x20\x32\x30\x2d\x38\x2e\x39\x35\
\x20\x32\x30\x2d\x32\x30\x53\x33\x35\x2e\x30\x35\x20\x34\x20\x32\
\x34\x20\x34\x7a\x6d\x30\x20\x33\x36\x63\x2d\x38\x2e\x38\x32\x20\
\x30\x2d\x31\x36\x2d\x37\x2e\x31\x38\x2d\x31\x36\x2d\x31\x36\x53\
\x31\x35\x2e\x31\x38\x20\x38\x20\x32\x34\x20\x38\x73\x31\x36\x20\
\x37\x2e\x31\x38\x20\x31\x36\x20\x31\x36\x2d\x37\x2e\x31\x38\x20\
\x31\x36\x2d\x31\x36\x20\x31\x36\x7a\x22\x2f\x3e\x3c\x2f\x73\x76\
\x67\x3e\
"
qt_resource_name = b"\
\x00\x07\
\x07\x3b\xe0\xb3\
\x00\x70\
\x00\x6c\x00\x75\x00\x67\x00\x69\x00\x6e\x00\x73\
\x00\x0d\
\x0e\x99\xe8\x93\
\x00\x43\
\x00\x69\x00\x72\x00\x63\x00\x6c\x00\x65\x00\x43\x00\x72\x00\x61\x00\x74\x00\x65\x00\x72\x00\x73\
\x00\x05\
\x00\x6f\xa6\x53\
\x00\x69\
\x00\x63\x00\x6f\x00\x6e\x00\x73\
\x00\x08\
\x0a\x61\x5a\xa7\
\x00\x69\
\x00\x63\x00\x6f\x00\x6e\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x12\
\x0b\x0d\xf4\x27\
\x00\x69\
\x00\x63\x00\x5f\x00\x6c\x00\x61\x00\x79\x00\x65\x00\x72\x00\x73\x00\x5f\x00\x34\x00\x38\x00\x70\x00\x78\x00\x2e\x00\x73\x00\x76\
\x00\x67\
\x00\x13\
\x0f\x34\x1f\x27\
\x00\x69\
\x00\x63\x00\x5f\x00\x61\x00\x72\x00\x63\x00\x68\x00\x69\x00\x76\x00\x65\x00\x5f\x00\x34\x00\x38\x00\x70\x00\x78\x00\x2e\x00\x73\
\x00\x76\x00\x67\
\x00\x18\
\x02\xe6\x9c\xa7\
\x00\x69\
\x00\x63\x00\x5f\x00\x6c\x00\x61\x00\x79\x00\x65\x00\x72\x00\x73\x00\x5f\x00\x63\x00\x6c\x00\x65\x00\x61\x00\x72\x00\x5f\x00\x34\
\x00\x38\x00\x70\x00\x78\x00\x2e\x00\x73\x00\x76\x00\x67\
\x00\x1e\
\x0d\xce\xb3\x87\
\x00\x69\
\x00\x63\x00\x5f\x00\x61\x00\x64\x00\x64\x00\x5f\x00\x63\x00\x69\x00\x72\x00\x63\x00\x6c\x00\x65\x00\x5f\x00\x6f\x00\x75\x00\x74\
\x00\x6c\x00\x69\x00\x6e\x00\x65\x00\x5f\x00\x34\x00\x38\x00\x70\x00\x78\x00\x2e\x00\x73\x00\x76\x00\x67\
"
qt_resource_struct_v1 = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x14\x00\x02\x00\x00\x00\x02\x00\x00\x00\x03\
\x00\x00\x00\x34\x00\x02\x00\x00\x00\x04\x00\x00\x00\x05\
\x00\x00\x00\x44\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x00\xb0\x00\x00\x00\x00\x00\x01\x00\x00\x03\xa2\
\x00\x00\x00\x5a\x00\x00\x00\x00\x00\x01\x00\x00\x01\x68\
\x00\x00\x00\xe6\x00\x00\x00\x00\x00\x01\x00\x00\x04\xfe\
\x00\x00\x00\x84\x00\x00\x00\x00\x00\x01\x00\x00\x02\x46\
"
qt_resource_struct_v2 = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x14\x00\x02\x00\x00\x00\x02\x00\x00\x00\x03\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x34\x00\x02\x00\x00\x00\x04\x00\x00\x00\x05\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x44\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x01\x75\x3f\xfb\xd6\x9f\
\x00\x00\x00\xb0\x00\x00\x00\x00\x00\x01\x00\x00\x03\xa2\
\x00\x00\x01\x75\x3f\xfb\xd6\x9f\
\x00\x00\x00\x5a\x00\x00\x00\x00\x00\x01\x00\x00\x01\x68\
\x00\x00\x01\x75\x3f\xfb\xd6\x9f\
\x00\x00\x00\xe6\x00\x00\x00\x00\x00\x01\x00\x00\x04\xfe\
\x00\x00\x01\x75\x3f\xfb\xd6\x9f\
\x00\x00\x00\x84\x00\x00\x00\x00\x00\x01\x00\x00\x02\x46\
\x00\x00\x01\x75\x3f\xfb\xd6\x9f\
"
qt_version = [int(v) for v in QtCore.qVersion().split('.')]
if qt_version < [5, 8, 0]:
rcc_version = 1
qt_resource_struct = qt_resource_struct_v1
else:
rcc_version = 2
qt_resource_struct = qt_resource_struct_v2
def qInitResources():
QtCore.qRegisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
| 46.947619 | 129 | 0.723299 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9,234 | 0.936606 |
389677db37e2b64f293419c2cc65068b4e74938c | 6,422 | py | Python | analysis_util.py | googleinterns/invobs-data-assimilation | 36e0ff6319a596d99d6f4197bff0f00a38d299c4 | [
"Apache-2.0"
] | 16 | 2021-07-05T08:09:43.000Z | 2022-03-21T19:12:06.000Z | analysis_util.py | googleinterns/invobs-data-assimilation | 36e0ff6319a596d99d6f4197bff0f00a38d299c4 | [
"Apache-2.0"
] | null | null | null | analysis_util.py | googleinterns/invobs-data-assimilation | 36e0ff6319a596d99d6f4197bff0f00a38d299c4 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import typing
import numpy as np
import jax.numpy as jnp
import xarray as xr
import seaborn as sns
from jax_cfd.data import xarray_utils as xru
import jax_cfd.base as cfd
from dynamical_system import Lorenz96, KolmogorovFlow
from util import jnp_to_aa_tuple, aa_tuple_to_jnp
plot_colors = {
'b': '#5A7D9F',
'r': '#c23b22',
'y': '#ffdb58',
}
def load_da_results(
filenames: list,
retained_variables: list,
retained_attrs: list,
) -> xr.Dataset:
"""
Loads data assimilations for analysis.
Args:
filenames: list of files that contain the four the computed setups.
retained_variables: variables to keep in the dataset for analysis.
retained_attrs: attributes to keep in the dataset for analysis.
Returns:
Data assimilation data for analysis.
"""
ds_list = []
initialization_coords = set()
optspace_coords = set()
# get all data and extract relevant variables
for fname in filenames:
data = xr.open_dataset(fname)
initialization_coords.add(data.attrs['da_init'])
optspace_coords.add(data.attrs['opt_space'])
ds_list.append(data[retained_variables])
initialization_coords = list(initialization_coords)
optspace_coords = list(optspace_coords)
# organize data in nested data structure
num_init = len(initialization_coords)
num_optspace = len(optspace_coords)
ds_grid = np.empty((num_init, num_optspace), dtype=object)
for ds in ds_list:
i = initialization_coords.index(ds.attrs['da_init'])
j = optspace_coords.index(ds.attrs['opt_space'])
ds.attrs = {attr: ds.attrs[attr] for attr in retained_attrs}
ds_grid[i][j] = ds
ds = (
xr.combine_nested(
ds_grid.tolist(),
concat_dim=['init', 'opt_space'],
combine_attrs='identical',
)
.assign_coords(
{'init': initialization_coords, 'opt_space':optspace_coords},
)
)
return ds
def compute_vorticity(ds: xr.Dataset, grid: cfd.grids.Grid) -> xr.Dataset:
"""
Computes vorticity of a dataset containing Kolmogorov flow trajectories.
Args:
ds: dataset conntaining variables with with Kolmogorov flow trajectories.
grid: grid over which to compute vorticity.
Returns:
Vorticity of the Kolmogorov flow trajectories.
"""
coords = xru.construct_coords(grid)
ds = ds.assign_coords(coords)
dy = ds.y[1] - ds.y[0]
dx = ds.x[1] - ds.x[0]
dv_dx = (ds.sel(v=1).roll(x=-1, roll_coords=False) - ds.sel(v=1)) / dx
du_dy = (ds.sel(v=0).roll(y=-1, roll_coords=False) - ds.sel(v=0)) / dy
return (dv_dx - du_dy)
def integrate_kolmogorov_xr(
dyn_sys: KolmogorovFlow,
X0_da: xr.DataArray,
n_steps: int,
) -> xr.DataArray:
"""
Integrates Kolmogorov flow from and to an `xarray.DataArray`.
Args:
dyn_sys: Kolmogorov flow dynamical system.
X0_da: initial states.
n_steps: number of integration steps.
Returns:
Integrated trajectories.
"""
X0 = jnp.asarray(X0_da.data)
batch_dimensions = X0.shape[:-3]
state_dimensions = X0.shape[-3:]
final_shape = batch_dimensions + (n_steps,) + state_dimensions
X0_flat = X0.reshape((-1,) + X0.shape[-3:])
X = dyn_sys.batch_integrate(X0_flat, n_steps, None, True).reshape(final_shape)
dims = list(X0_da.dims)
dims.insert(-3, 't')
X_da = xr.DataArray(X, dims=dims, coords=X0_da.coords)
return X_da
def compute_l1_error_kolmogorov(
X: xr.Dataset,
comparison_var: str,
scale: float = 1,
) -> xr.Dataset:
"""
Computes the scaled L1 error for Kolmogorov flow.
Args:
X: data to compute L1 error of.
comparison_var: base variable to compute deviation from.
scale: error scale.
Returns:
Scaled L1 error.
"""
data_types = list(X.data_type.values)
data_types.remove(comparison_var)
l1_error = np.abs(
X - X.sel(data_type=comparison_var)
).sum(dim=['x', 'y']) / scale
return l1_error.sel(data_type=data_types, drop=True)
def integrate_lorenz96_xr(
dyn_sys: Lorenz96,
X0_da: xr.DataArray,
n_steps: int,
) -> xr. DataArray:
"""
Integrates the Lorenz96 model from and to an `xarray.DataArray`.
Args:
dyn_sys: Lorenz96 dynamical system.
X0_da: initial states.
n_steps: number of integration steps.
Returns:
Integrated trajectories.
"""
X0_jnp = X0_da.data
grid_size = X0_jnp.shape[-1]
batch_dimensions = X0_jnp.shape[:-1]
final_shape = list(batch_dimensions) + [n_steps, grid_size]
X0_jnp_flat = X0_jnp.reshape(-1, grid_size)
X_jnp_flat = dyn_sys.batch_integrate(X0_jnp_flat, n_steps)
X_jnp = X_jnp_flat.reshape(final_shape)
dims = list(X0_da.dims)
dims.insert(-1, 't')
X_da = xr.DataArray(X_jnp, dims=dims, coords=X0_da.coords)
return X_da
def compute_l1_error_lorenz96(
X: xr.Dataset,
comparison_var: str,
scale: float = 1,
) -> xr.Dataset:
"""
Computes the scaled L1 error for the Lorenz96 model.
Args:
X: data to compute L1 error of.
comparison_var: base variable to compute deviation from.
scale: error scale.
Returns:
Scaled L1 error.
"""
data_types = list(X.data_type.values)
data_types.remove(comparison_var)
l1_error = np.abs(X - X.sel(data_type=comparison_var)).sum(dim=['x']) / scale
return l1_error.sel(data_type=data_types, drop=True)
def adjust_row_labels(g: sns.FacetGrid, labels: list):
"""
Adjust row `labels` of a seaborn FaceGrid object `g`.
"""
for ax in g.axes.flat:
if ax.texts:
# ylabel text on the right side
txt = ax.texts[0]
ax.text(txt.get_unitless_position()[0], txt.get_unitless_position()[1],
labels.pop(0),
transform=ax.transAxes,
va='center',
rotation=-90)
# remove original text
ax.texts[0].remove() | 29.324201 | 80 | 0.678605 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,556 | 0.398007 |
38985d959385d3a2057ea5f7a76d2853b1c2d13c | 23,684 | py | Python | Model&Data/tf-VAEGAN/main.py | LiangjunFeng/Generative-Any-Shot-Learning | 693c4ab92f2eb04cc453c870782710a982f98e80 | [
"Apache-2.0"
] | null | null | null | Model&Data/tf-VAEGAN/main.py | LiangjunFeng/Generative-Any-Shot-Learning | 693c4ab92f2eb04cc453c870782710a982f98e80 | [
"Apache-2.0"
] | null | null | null | Model&Data/tf-VAEGAN/main.py | LiangjunFeng/Generative-Any-Shot-Learning | 693c4ab92f2eb04cc453c870782710a982f98e80 | [
"Apache-2.0"
] | null | null | null | import argparse
from train_images import run
# generalized ZSL
# CUDA_VISIBLE_DEVICES=0 nohup python -u main.py --dataset AWA1 --few_train False --num_shots 0 --generalized True > awa1.log 2>&1 &
# CUDA_VISIBLE_DEVICES=1 nohup python -u main.py --dataset SUN --few_train False --num_shots 0 --generalized True > sun.log 2>&1 &
# CUDA_VISIBLE_DEVICES=2 nohup python -u main.py --dataset CUB --few_train False --num_shots 0 --generalized True > cub.log 2>&1 &
# CUDA_VISIBLE_DEVICES=3 nohup python -u main.py --dataset FLO --few_train False --num_shots 0 --generalized True > flo.log 2>&1 &
# CUDA_VISIBLE_DEVICES=0 nohup python -u main.py --dataset AWA2 --few_train False --num_shots 0 --generalized True > awa2.log 2>&1 &
# naive feature
# CUDA_VISIBLE_DEVICES=0 nohup python -u main.py --dataset AWA2 --few_train False --num_shots 0 --generalized True --image_embedding res101_naive > awa2.log 2>&1 &
# CUDA_VISIBLE_DEVICES=1 nohup python -u main.py --dataset SUN --few_train False --num_shots 0 --generalized True --image_embedding res101_naive > sun.log 2>&1 &
# CUDA_VISIBLE_DEVICES=2 nohup python -u main.py --dataset CUB --few_train False --num_shots 0 --generalized True --image_embedding res101_naive > cub.log 2>&1 &
# CUDA_VISIBLE_DEVICES=3 nohup python -u main.py --dataset FLO --few_train False --num_shots 0 --generalized True --image_embedding res101_naive > flo.log 2>&1 &
# CUDA_VISIBLE_DEVICES=0 nohup python -u main.py --dataset aPY --few_train False --num_shots 0 --generalized True --image_embedding res101_naive > apy.log 2>&1 &
# finetue feature
# CUDA_VISIBLE_DEVICES=0 nohup python -u main.py --dataset AWA2 --few_train False --num_shots 0 --generalized True --image_embedding res101_finetune > awa2.log 2>&1 &
# CUDA_VISIBLE_DEVICES=1 nohup python -u main.py --dataset SUN --few_train False --num_shots 0 --generalized True --image_embedding res101_finetune > sun.log 2>&1 &
# CUDA_VISIBLE_DEVICES=2 nohup python -u main.py --dataset CUB --few_train False --num_shots 0 --generalized True --image_embedding res101_finetune > cub.log 2>&1 &
# CUDA_VISIBLE_DEVICES=3 nohup python -u main.py --dataset FLO --few_train False --num_shots 0 --generalized True --image_embedding res101_finetune > flo.log 2>&1 &
# CUDA_VISIBLE_DEVICES=0 nohup python -u main.py --dataset aPY --few_train False --num_shots 0 --generalized True --image_embedding res101_finetune > apy.log 2>&1 &
# reg feature
# CUDA_VISIBLE_DEVICES=0 nohup python -u main.py --dataset FLO --few_train False --num_shots 0 --generalized True --image_embedding res101_reg > flo.log 2>&1 &
# CUDA_VISIBLE_DEVICES=1 nohup python -u main.py --dataset CUB --few_train False --num_shots 0 --generalized True --image_embedding res101_reg > cub.log 2>&1 &
# CUDA_VISIBLE_DEVICES=2 nohup python -u main.py --dataset SUN --few_train False --num_shots 0 --generalized True --image_embedding res101_reg > sun.log 2>&1 &
# CUDA_VISIBLE_DEVICES=0 nohup python -u main.py --dataset AWA2 --few_train False --num_shots 0 --generalized True --image_embedding res101_reg > awa2.log 2>&1 &
# CUDA_VISIBLE_DEVICES=0 nohup python -u main.py --dataset aPY --few_train False --num_shots 0 --generalized True --image_embedding res101_reg > apy.log 2>&1 &
# few shot
# CUDA_VISIBLE_DEVICES=0 nohup python -u main.py --dataset FLO --few_train False --num_shots 1 --generalized True --image_embedding res101_reg > flo0.log 2>&1 &
# CUDA_VISIBLE_DEVICES=1 nohup python -u main.py --dataset FLO --few_train False --num_shots 5 --generalized True --image_embedding res101_reg > flo1.log 2>&1 &
# CUDA_VISIBLE_DEVICES=2 nohup python -u main.py --dataset FLO --few_train False --num_shots 10 --generalized True --image_embedding res101_reg > flo2.log 2>&1 &
# CUDA_VISIBLE_DEVICES=3 nohup python -u main.py --dataset FLO --few_train False --num_shots 20 --generalized True --image_embedding res101_reg > flo3.log 2>&1 &
# CUDA_VISIBLE_DEVICES=0 nohup python -u main.py --dataset FLO --few_train True --num_shots 1 --generalized True --image_embedding res101_naive > flo0.log 2>&1 &
# CUDA_VISIBLE_DEVICES=1 nohup python -u main.py --dataset FLO --few_train True --num_shots 5 --generalized True --image_embedding res101_naive > flo1.log 2>&1 &
# CUDA_VISIBLE_DEVICES=2 nohup python -u main.py --dataset FLO --few_train True --num_shots 10 --generalized True --image_embedding res101_naive > flo2.log 2>&1 &
# CUDA_VISIBLE_DEVICES=3 nohup python -u main.py --dataset FLO --few_train True --num_shots 20 --generalized True --image_embedding res101_naive > flo3.log 2>&1 &
# CUDA_VISIBLE_DEVICES=0 nohup python -u main.py --dataset CUB --few_train False --num_shots 1 --generalized True --image_embedding res101_reg > cub0.log 2>&1 &
# CUDA_VISIBLE_DEVICES=1 nohup python -u main.py --dataset CUB --few_train False --num_shots 5 --generalized True --image_embedding res101_reg > cub1.log 2>&1 &
# CUDA_VISIBLE_DEVICES=2 nohup python -u main.py --dataset CUB --few_train False --num_shots 10 --generalized True --image_embedding res101_reg > cub2.log 2>&1 &
# CUDA_VISIBLE_DEVICES=3 nohup python -u main.py --dataset CUB --few_train False --num_shots 20 --generalized True --image_embedding res101_reg > cub3.log 2>&1 &
# CUDA_VISIBLE_DEVICES=0 nohup python -u main.py --dataset CUB --few_train True --num_shots 1 --generalized True --image_embedding res101_naive > cub0.log 2>&1 &
# CUDA_VISIBLE_DEVICES=1 nohup python -u main.py --dataset CUB --few_train True --num_shots 5 --generalized True --image_embedding res101_naive > cub1.log 2>&1 &
# CUDA_VISIBLE_DEVICES=2 nohup python -u main.py --dataset CUB --few_train True --num_shots 10 --generalized True --image_embedding res101_naive > cub2.log 2>&1 &
# CUDA_VISIBLE_DEVICES=3 nohup python -u main.py --dataset CUB --few_train True --num_shots 20 --generalized True --image_embedding res101_naive > cub3.log 2>&1 &
# CUDA_VISIBLE_DEVICES=0 nohup python -u main.py --dataset SUN --few_train False --num_shots 1 --generalized True --image_embedding res101_reg > sun0.log 2>&1 &
# CUDA_VISIBLE_DEVICES=1 nohup python -u main.py --dataset SUN --few_train False --num_shots 5 --generalized True --image_embedding res101_reg > sun1.log 2>&1 &
# CUDA_VISIBLE_DEVICES=2 nohup python -u main.py --dataset SUN --few_train False --num_shots 10 --generalized True --image_embedding res101_reg > sun2.log 2>&1 &
# CUDA_VISIBLE_DEVICES=0 nohup python -u main.py --dataset SUN --few_train True --num_shots 1 --generalized True --image_embedding res101 > sun0.log 2>&1 &
# CUDA_VISIBLE_DEVICES=1 nohup python -u main.py --dataset SUN --few_train True --num_shots 5 --generalized True --image_embedding res101 > sun1.log 2>&1 &
# CUDA_VISIBLE_DEVICES=2 nohup python -u main.py --dataset SUN --few_train True --num_shots 10 --generalized True --image_embedding res101 > sun2.log 2>&1 &
# CUDA_VISIBLE_DEVICES=0 nohup python -u main.py --dataset AWA2 --few_train False --num_shots 1 --generalized True --image_embedding res101_naive > awa20.log 2>&1 &
# CUDA_VISIBLE_DEVICES=1 nohup python -u main.py --dataset AWA2 --few_train False --num_shots 5 --generalized True --image_embedding res101_naive > awa21.log 2>&1 &
# CUDA_VISIBLE_DEVICES=2 nohup python -u main.py --dataset AWA2 --few_train False --num_shots 10 --generalized True --image_embedding res101_naive > awa22.log 2>&1 &
# CUDA_VISIBLE_DEVICES=3 nohup python -u main.py --dataset AWA2 --few_train False --num_shots 20 --generalized True --image_embedding res101_naive > awa23.log 2>&1 &
# CUDA_VISIBLE_DEVICES=0 nohup python -u main.py --dataset AWA2 --few_train True --num_shots 1 --generalized True --image_embedding res101_naive > awa20.log 2>&1 &
# CUDA_VISIBLE_DEVICES=1 nohup python -u main.py --dataset AWA2 --few_train True --num_shots 5 --generalized True --image_embedding res101_naive > awa21.log 2>&1 &
# CUDA_VISIBLE_DEVICES=2 nohup python -u main.py --dataset AWA2 --few_train True --num_shots 10 --generalized True --image_embedding res101_naive > awa22.log 2>&1 &
# CUDA_VISIBLE_DEVICES=3 nohup python -u main.py --dataset AWA2 --few_train True --num_shots 20 --generalized True --image_embedding res101_naive > awa23.log 2>&1 &
# CUDA_VISIBLE_DEVICES=0 nohup python -u main.py --dataset AWA1 --few_train False --num_shots 1 --generalized True --image_embedding res101 > awa10.log 2>&1 &
# CUDA_VISIBLE_DEVICES=1 nohup python -u main.py --dataset AWA1 --few_train False --num_shots 5 --generalized True --image_embedding res101 > awa11.log 2>&1 &
# CUDA_VISIBLE_DEVICES=2 nohup python -u main.py --dataset AWA1 --few_train False --num_shots 10 --generalized True --image_embedding res101 > awa12.log 2>&1 &
# CUDA_VISIBLE_DEVICES=3 nohup python -u main.py --dataset AWA1 --few_train False --num_shots 20 --generalized True --image_embedding res101 > awa13.log 2>&1 &
# CUDA_VISIBLE_DEVICES=0 nohup python -u main.py --dataset AWA1 --few_train True --num_shots 1 --generalized True --image_embedding res101 > awa10.log 2>&1 &
# CUDA_VISIBLE_DEVICES=1 nohup python -u main.py --dataset AWA1 --few_train True --num_shots 5 --generalized True --image_embedding res101 > awa11.log 2>&1 &
# CUDA_VISIBLE_DEVICES=2 nohup python -u main.py --dataset AWA1 --few_train True --num_shots 10 --generalized True --image_embedding res101 > awa12.log 2>&1 &
# CUDA_VISIBLE_DEVICES=3 nohup python -u main.py --dataset AWA1 --few_train True --num_shots 20 --generalized True --image_embedding res101 > awa13.log 2>&1 &
# few shot
# CUDA_VISIBLE_DEVICES=0 nohup python -u main.py --dataset FLO --few_train False --num_shots 1 --generalized True --image_embedding res101_reg > flo0.log 2>&1 &
# CUDA_VISIBLE_DEVICES=1 nohup python -u main.py --dataset FLO --few_train False --num_shots 5 --generalized True --image_embedding res101_reg > flo1.log 2>&1 &
# CUDA_VISIBLE_DEVICES=2 nohup python -u main.py --dataset FLO --few_train False --num_shots 10 --generalized True --image_embedding res101_reg > flo2.log 2>&1 &
# CUDA_VISIBLE_DEVICES=3 nohup python -u main.py --dataset FLO --few_train False --num_shots 20 --generalized True --image_embedding res101_reg > flo3.log 2>&1 &
# CUDA_VISIBLE_DEVICES=0 nohup python -u main.py --dataset FLO --few_train True --num_shots 1 --generalized True --image_embedding res101_naive > flo0.log 2>&1 &
# CUDA_VISIBLE_DEVICES=1 nohup python -u main.py --dataset FLO --few_train True --num_shots 5 --generalized True --image_embedding res101_naive > flo1.log 2>&1 &
# CUDA_VISIBLE_DEVICES=2 nohup python -u main.py --dataset FLO --few_train True --num_shots 10 --generalized True --image_embedding res101_naive > flo2.log 2>&1 &
# CUDA_VISIBLE_DEVICES=3 nohup python -u main.py --dataset FLO --few_train True --num_shots 20 --generalized True --image_embedding res101_naive > flo3.log 2>&1 &
# CUDA_VISIBLE_DEVICES=0 nohup python -u main.py --dataset CUB --few_train False --num_shots 1 --generalized True --image_embedding res101_reg > cub0.log 2>&1 &
# CUDA_VISIBLE_DEVICES=1 nohup python -u main.py --dataset CUB --few_train False --num_shots 5 --generalized True --image_embedding res101_reg > cub1.log 2>&1 &
# CUDA_VISIBLE_DEVICES=2 nohup python -u main.py --dataset CUB --few_train False --num_shots 10 --generalized True --image_embedding res101_reg > cub2.log 2>&1 &
# CUDA_VISIBLE_DEVICES=3 nohup python -u main.py --dataset CUB --few_train False --num_shots 20 --generalized True --image_embedding res101_reg > cub3.log 2>&1 &
# CUDA_VISIBLE_DEVICES=0 nohup python -u main.py --dataset CUB --few_train True --num_shots 1 --generalized True --image_embedding res101_naive > cub0.log 2>&1 &
# CUDA_VISIBLE_DEVICES=1 nohup python -u main.py --dataset CUB --few_train True --num_shots 5 --generalized True --image_embedding res101_naive > cub1.log 2>&1 &
# CUDA_VISIBLE_DEVICES=2 nohup python -u main.py --dataset CUB --few_train True --num_shots 10 --generalized True --image_embedding res101_naive > cub2.log 2>&1 &
# CUDA_VISIBLE_DEVICES=3 nohup python -u main.py --dataset CUB --few_train True --num_shots 20 --generalized True --image_embedding res101_naive > cub3.log 2>&1 &
# CUDA_VISIBLE_DEVICES=0 nohup python -u main.py --dataset SUN --few_train False --num_shots 1 --generalized True --image_embedding res101_reg > sun0.log 2>&1 &
# CUDA_VISIBLE_DEVICES=1 nohup python -u main.py --dataset SUN --few_train False --num_shots 5 --generalized True --image_embedding res101_reg > sun1.log 2>&1 &
# CUDA_VISIBLE_DEVICES=2 nohup python -u main.py --dataset SUN --few_train False --num_shots 10 --generalized True --image_embedding res101_reg > sun2.log 2>&1 &
# CUDA_VISIBLE_DEVICES=0 nohup python -u main.py --dataset SUN --few_train True --num_shots 1 --generalized True --image_embedding res101 > sun0.log 2>&1 &
# CUDA_VISIBLE_DEVICES=1 nohup python -u main.py --dataset SUN --few_train True --num_shots 5 --generalized True --image_embedding res101 > sun1.log 2>&1 &
# CUDA_VISIBLE_DEVICES=2 nohup python -u main.py --dataset SUN --few_train True --num_shots 10 --generalized True --image_embedding res101 > sun2.log 2>&1 &
# CUDA_VISIBLE_DEVICES=0 nohup python -u main.py --dataset AWA2 --few_train False --num_shots 1 --generalized True --image_embedding res101_naive > awa20.log 2>&1 &
# CUDA_VISIBLE_DEVICES=1 nohup python -u main.py --dataset AWA2 --few_train False --num_shots 5 --generalized True --image_embedding res101_naive > awa21.log 2>&1 &
# CUDA_VISIBLE_DEVICES=2 nohup python -u main.py --dataset AWA2 --few_train False --num_shots 10 --generalized True --image_embedding res101_naive > awa22.log 2>&1 &
# CUDA_VISIBLE_DEVICES=3 nohup python -u main.py --dataset AWA2 --few_train False --num_shots 20 --generalized True --image_embedding res101_naive > awa23.log 2>&1 &
# CUDA_VISIBLE_DEVICES=0 nohup python -u main.py --dataset AWA2 --few_train True --num_shots 1 --generalized True --image_embedding res101_naive > awa20.log 2>&1 &
# CUDA_VISIBLE_DEVICES=1 nohup python -u main.py --dataset AWA2 --few_train True --num_shots 5 --generalized True --image_embedding res101_naive > awa21.log 2>&1 &
# CUDA_VISIBLE_DEVICES=2 nohup python -u main.py --dataset AWA2 --few_train True --num_shots 10 --generalized True --image_embedding res101_naive > awa22.log 2>&1 &
# CUDA_VISIBLE_DEVICES=3 nohup python -u main.py --dataset AWA2 --few_train True --num_shots 20 --generalized True --image_embedding res101_naive > awa23.log 2>&1 &
# CUDA_VISIBLE_DEVICES=0 nohup python -u main.py --dataset AWA1 --few_train False --num_shots 1 --generalized True --image_embedding res101 > awa10.log 2>&1 &
# CUDA_VISIBLE_DEVICES=1 nohup python -u main.py --dataset AWA1 --few_train False --num_shots 5 --generalized True --image_embedding res101 > awa11.log 2>&1 &
# CUDA_VISIBLE_DEVICES=2 nohup python -u main.py --dataset AWA1 --few_train False --num_shots 10 --generalized True --image_embedding res101 > awa12.log 2>&1 &
# CUDA_VISIBLE_DEVICES=3 nohup python -u main.py --dataset AWA1 --few_train False --num_shots 20 --generalized True --image_embedding res101 > awa13.log 2>&1 &
# CUDA_VISIBLE_DEVICES=0 nohup python -u main.py --dataset AWA1 --few_train True --num_shots 1 --generalized True --image_embedding res101 > awa10.log 2>&1 &
# CUDA_VISIBLE_DEVICES=1 nohup python -u main.py --dataset AWA1 --few_train True --num_shots 5 --generalized True --image_embedding res101 > awa11.log 2>&1 &
# CUDA_VISIBLE_DEVICES=2 nohup python -u main.py --dataset AWA1 --few_train True --num_shots 10 --generalized True --image_embedding res101 > awa12.log 2>&1 &
# CUDA_VISIBLE_DEVICES=3 nohup python -u main.py --dataset AWA1 --few_train True --num_shots 20 --generalized True --image_embedding res101 > awa13.log 2>&1 &
# reg feature + att
# CUDA_VISIBLE_DEVICES=0 nohup python -u main.py --dataset FLO --few_train False --num_shots 0 --generalized True --image_embedding res101_reg --class_embedding att > flo0.log 2>&1 &
# CUDA_VISIBLE_DEVICES=1 nohup python -u main.py --dataset FLO --few_train False --num_shots 0 --generalized True --image_embedding res101_reg --class_embedding att_naive > flo1.log 2>&1 &
# CUDA_VISIBLE_DEVICES=2 nohup python -u main.py --dataset FLO --few_train False --num_shots 0 --generalized True --image_embedding res101_reg --class_embedding att_GRU > flo2.log 2>&1 &
# CUDA_VISIBLE_DEVICES=0 nohup python -u main.py --dataset FLO --few_train False --num_shots 0 --generalized True --image_embedding res101_reg --class_embedding att_GRU_biased > flo3.log 2>&1 &
# few shot + class
# CUDA_VISIBLE_DEVICES=0 nohup python -u main.py --dataset FLO --few_train False --num_shots 1 --generalized True --image_embedding res101_reg --class_embedding att_GRU_biased > flo0.log 2>&1 &
# CUDA_VISIBLE_DEVICES=1 nohup python -u main.py --dataset FLO --few_train False --num_shots 5 --generalized True --image_embedding res101_reg --class_embedding att_GRU_biased > flo1.log 2>&1 &
# CUDA_VISIBLE_DEVICES=2 nohup python -u main.py --dataset FLO --few_train False --num_shots 10 --generalized True --image_embedding res101_reg --class_embedding att_GRU_biased > flo2.log 2>&1 &
# CUDA_VISIBLE_DEVICES=0 nohup python -u main.py --dataset FLO --few_train False --num_shots 20 --generalized True --image_embedding res101_reg --class_embedding att_GRU_biased > flo3.log 2>&1 &
# CUDA_VISIBLE_DEVICES=0 nohup python -u main.py --dataset FLO --few_train True --num_shots 1 --generalized True --image_embedding res101_naive --class_embedding att_GRU_biased > flo0.log 2>&1 &
# CUDA_VISIBLE_DEVICES=0 nohup python -u main.py --dataset FLO --few_train True --num_shots 5 --generalized True --image_embedding res101_naive --class_embedding att_GRU_biased > flo1.log 2>&1 &
# CUDA_VISIBLE_DEVICES=1 nohup python -u main.py --dataset FLO --few_train True --num_shots 10 --generalized True --image_embedding res101_naive --class_embedding att_GRU_biased > flo2.log 2>&1 &
# CUDA_VISIBLE_DEVICES=1 nohup python -u main.py --dataset FLO --few_train True --num_shots 20 --generalized True --image_embedding res101_naive --class_embedding att_GRU_biased > flo3.log 2>&1 &
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', default='FLO', help='FLO')
parser.add_argument('--few_train', default = False, type = str2bool, help='use few train samples')
parser.add_argument('--num_shots', type=int, default=5, help='the number of shots, if few_train, then num_shots is for train classes, else for test classes')
parser.add_argument('--generalized', default=False, type = str2bool, help='enable generalized zero-shot learning')
parser.add_argument('--image_embedding', default='res101', help='res101')
parser.add_argument('--class_embedding', default='att', help='att')
args = parser.parse_args()
class myArgs():
def __init__(self, args):
self.dataset = args.dataset
self.few_train = args.few_train
self.num_shots = args.num_shots
self.generalized = args.generalized
self.image_embedding = args.image_embedding
self.class_embedding = args.class_embedding
self.dataroot = "../data"
self.syn_num = 100; self.preprocessing = False; self.standardization = False; self.workers = 8
self.batch_size = 64; self.resSize = 2048; self.attSize = 1024; self.nz = 312; self.ngh = 4096
self.ndh = 1024; self.nepoch = 2000; self.critic_iter = 5; self.lambda1 = 10; self.lambda2 = 10
self.lr = 0.001; self.feed_lr = 0.0001; self.dec_lr = 0.0001; self.classifier_lr = 0.001
self.beta1 = 0.5; self.cuda = True; self.encoded_noise = False; self.manualSeed = 0
self.nclass_all = 200; self.validation = False; self.encoder_layer_sizes = [8192, 4096]
self.decoder_layer_sizes = [4096, 8192]; self.gammaD = 1000; self.gammaG = 1000
self.gammaG_D2 = 1000; self.gammaD2 = 1000; self.latent_size = 312; self.conditional = True
self.a1 = 1.0; self.a2 = 1.0; self.recons_weight = 1.0; self.feedback_loop = 2
self.freeze_dec = False
if self.dataset in ["AWA1", "AWA2"]:
self.gammaD = 10; self.gammaG = 10; self.encoded_noise = True
self.manualSeed = 9182; self.preprocessing = True; self.cuda = True
self.nepoch = 120; self.syn_num = 1800; self.ngh = 4096; self.ndh = 4096
self.lambda1 = 10; self.critic_iter = 5; self.nclass_all = 50; self.batch_size = 64; self.nz = 85
self.latent_size = 85; self.attSize=85; self.resSize = 2048; self.lr = 0.00001; self.classifier_lr = 0.001;
self.recons_weight = 0.1; self.freeze_dec = True; self.feed_lr = 0.0001; self.dec_lr = 0.0001; self.feedback_loop = 2;
self.a1 = 0.01; self.a2 = 0.01
elif self.dataset == "CUB":
self.gammaD = 10; self.gammaG = 10; self.manualSeed = 3483; self.encoded_noise = True; self.preprocessing = True
self.cuda = True; self.nepoch = 300; self.ngh = 4096
self.ndh = 4096; self.lr = 0.0001; self.classifier_lr = 0.001; self.lambda1 = 10; self.critic_iter = 5
self.nclass_all = 200; self.batch_size = 64; self.nz = 312; self.latent_size = 312; self.attSize = 312
self.resSize = 2048; self.syn_num = 300; self.recons_weight = 0.01; self.a1 = 1; self.a2 = 1
self.feed_lr = 0.00001; self.dec_lr = 0.0001; self.feedback_loop = 2
elif self.dataset == "FLO":
self.gammaD = 10; self.gammaG = 10; self.nclass_all = 102; self.latent_size = 1024; self.manualSeed = 806
self.syn_num = 1200; self.preprocessing = True; self.nepoch = 500
self.ngh = 4096; self.ndh = 4096; self.lambda1 = 10; self.critic_iter = 5; self.batch_size = 64
self.nz = 1024; self.attSize = 1024; self.resSize = 2048; self.lr = 0.0001; self.classifier_lr = 0.001
self.cuda = True; self.recons_weight = 0.01; self.feedback_loop = 2
self.feed_lr = 0.00001; self.a1 = 0.5; self.a2 = 0.5; self.dec_lr = 0.0001
elif self.dataset == "SUN":
self.gammaD = 1; self.gammaG = 1; self.manualSeed = 4115; self.encoded_noise = True; self.preprocessing = True
self.cuda = True; self.nepoch = 400
self.ngh = 4096; self.ndh = 4096; self.lambda1 = 10; self.critic_iter = 5; self.batch_size = 64
self.nz = 102; self.latent_size = 102; self.attSize = 102; self.lr = 0.001; self.classifier_lr = 0.0005
self.syn_num = 400; self.nclass_all = 717; self.recons_weight = 0.01; self.a1 = 0.1; self.a2 = 0.01
self.feedback_loop = 2; self.feed_lr = 0.0001
if self.image_embedding == "res101_reg":
self.self.lr = 0.0001; self.classifier_lr = 0.0001; self.recons_weight = 0.0001
elif self.dataset == "aPY":
self.gammaD = 10; self.gammaG = 10; self.nclass_all = 32; self.latent_size = 1024; self.manualSeed = 806
self.syn_num = 1200; self.preprocessing = True; self.nepoch = 500
self.ngh = 4096; self.ndh = 4096; self.lambda1 = 10; self.critic_iter = 5; self.batch_size = 64
self.nz = 64; self.attSize = 64; self.resSize = 2048; self.lr = 0.0001; self.classifier_lr = 0.001
self.cuda = True; self.recons_weight = 0.01; self.feedback_loop = 2
self.feed_lr = 0.00001; self.a1 = 0.5; self.a2 = 0.5; self.dec_lr = 0.0001
opt = myArgs(args)
opt.lambda2 = opt.lambda1
opt.encoder_layer_sizes[0] = opt.resSize
opt.decoder_layer_sizes[-1] = opt.resSize
opt.latent_size = opt.attSize
print("lr: ", opt.lr, "classifier_lr: ", opt.classifier_lr, "recons_weight: ", opt.recons_weight, "a1: ", opt.a1, opt.a2, "a2: ", "feed_lr: ", opt.feed_lr)
run(opt)
| 92.515625 | 195 | 0.728973 | 4,565 | 0.192746 | 0 | 0 | 0 | 0 | 0 | 0 | 18,162 | 0.766847 |
389928928531253b490c07d0fc64099905fa3ddb | 13,514 | py | Python | src/charm.py | openstack-charmers/charm-ovn-central-operator | b64cd0ab974b4059c242c47a237d43b7872d9e1f | [
"Apache-2.0"
] | null | null | null | src/charm.py | openstack-charmers/charm-ovn-central-operator | b64cd0ab974b4059c242c47a237d43b7872d9e1f | [
"Apache-2.0"
] | null | null | null | src/charm.py | openstack-charmers/charm-ovn-central-operator | b64cd0ab974b4059c242c47a237d43b7872d9e1f | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
"""OVN Central Operator Charm.
This charm provide Glance services as part of an OpenStack deployment
"""
import ovn
import ovsdb as ch_ovsdb
import logging
from typing import List
import ops.charm
from ops.framework import StoredState
from ops.main import main
import advanced_sunbeam_openstack.charm as sunbeam_charm
import advanced_sunbeam_openstack.core as sunbeam_core
import advanced_sunbeam_openstack.relation_handlers as sunbeam_rhandlers
import advanced_sunbeam_openstack.config_contexts as sunbeam_ctxts
import advanced_sunbeam_openstack.ovn.container_handlers as ovn_chandlers
import advanced_sunbeam_openstack.ovn.config_contexts as ovn_ctxts
import advanced_sunbeam_openstack.ovn.relation_handlers as ovn_rhandlers
import charms.sunbeam_ovn_central_operator.v0.ovsdb as ovsdb
from charms.observability_libs.v0.kubernetes_service_patch \
import KubernetesServicePatch
logger = logging.getLogger(__name__)
OVN_SB_DB_CONTAINER = "ovn-sb-db-server"
OVN_NB_DB_CONTAINER = "ovn-nb-db-server"
OVN_NORTHD_CONTAINER = "ovn-northd"
OVN_DB_CONTAINERS = [OVN_SB_DB_CONTAINER, OVN_NB_DB_CONTAINER]
class OVNNorthBPebbleHandler(ovn_chandlers.OVNPebbleHandler):
@property
def wrapper_script(self):
return '/root/ovn-northd-wrapper.sh'
@property
def service_description(self):
return 'OVN Northd'
def default_container_configs(self):
_cc = super().default_container_configs()
_cc.append(
sunbeam_core.ContainerConfigFile(
'/etc/ovn/ovn-northd-db-params.conf',
'root',
'root'))
return _cc
class OVNNorthBDBPebbleHandler(ovn_chandlers.OVNPebbleHandler):
@property
def wrapper_script(self):
return '/root/ovn-nb-db-server-wrapper.sh'
@property
def service_description(self):
return 'OVN North Bound DB'
def default_container_configs(self):
_cc = super().default_container_configs()
_cc.append(
sunbeam_core.ContainerConfigFile(
'/root/ovn-nb-cluster-join.sh',
'root',
'root'))
return _cc
class OVNSouthBDBPebbleHandler(ovn_chandlers.OVNPebbleHandler):
@property
def wrapper_script(self):
return '/root/ovn-sb-db-server-wrapper.sh'
@property
def service_description(self):
return 'OVN South Bound DB'
def default_container_configs(self):
_cc = super().default_container_configs()
_cc.append(
sunbeam_core.ContainerConfigFile(
'/root/ovn-sb-cluster-join.sh',
'root',
'root'))
return _cc
class OVNCentralOperatorCharm(sunbeam_charm.OSBaseOperatorCharm):
"""Charm the service."""
_state = StoredState()
def __init__(self, framework):
super().__init__(framework)
self.service_patcher = KubernetesServicePatch(
self,
[
('northbound', 6643),
('southbound', 6644),
]
)
def get_pebble_handlers(self):
pebble_handlers = [
OVNNorthBPebbleHandler(
self,
OVN_NORTHD_CONTAINER,
'ovn-northd',
self.container_configs,
self.template_dir,
self.openstack_release,
self.configure_charm),
OVNSouthBDBPebbleHandler(
self,
OVN_SB_DB_CONTAINER,
'ovn-sb-db-server',
self.container_configs,
self.template_dir,
self.openstack_release,
self.configure_charm),
OVNNorthBDBPebbleHandler(
self,
OVN_NB_DB_CONTAINER,
'ovn-nb-db-server',
self.container_configs,
self.template_dir,
self.openstack_release,
self.configure_charm)]
return pebble_handlers
def get_relation_handlers(self, handlers=None) -> List[
sunbeam_rhandlers.RelationHandler]:
"""Relation handlers for the service."""
handlers = handlers or []
if self.can_add_handler('peers', handlers):
self.peers = ovn_rhandlers.OVNDBClusterPeerHandler(
self,
'peers',
self.configure_charm)
handlers.append(self.peers)
if self.can_add_handler('ovsdb-cms', handlers):
self.ovsdb_cms = ovn_rhandlers.OVSDBCMSProvidesHandler(
self,
'ovsdb-cms',
self.configure_charm)
handlers.append(self.ovsdb_cms)
handlers = super().get_relation_handlers(handlers)
return handlers
@property
def config_contexts(self) -> List[sunbeam_ctxts.ConfigContext]:
"""Configuration contexts for the operator."""
contexts = super().config_contexts
contexts.append(
ovn_ctxts.OVNDBConfigContext(self, "ovs_db"))
return contexts
def ovn_rundir(self):
return '/var/run/ovn'
def get_pebble_executor(self, container_name):
container = self.unit.get_container(
container_name)
def _run_via_pebble(*args):
process = container.exec(list(args), timeout=5*60)
out, warnings = process.wait_output()
if warnings:
for line in warnings.splitlines():
logger.warning('CMD Out: %s', line.strip())
return out
return _run_via_pebble
def cluster_status(self, db, cmd_executor):
"""OVN version agnostic cluster_status helper.
:param db: Database to operate on
:type db: str
:returns: Object describing the cluster status or None
:rtype: Optional[ch_ovn.OVNClusterStatus]
"""
try:
# The charm will attempt to retrieve cluster status before OVN
# is clustered and while units are paused, so we need to handle
# errors from this call gracefully.
return ovn.cluster_status(
db,
rundir=self.ovn_rundir(),
cmd_executor=cmd_executor)
except (ValueError) as e:
logging.error('Unable to get cluster status, ovsdb-server '
'not ready yet?: {}'.format(e))
return
def configure_ovn_listener(self, db, port_map):
"""Create or update OVN listener configuration.
:param db: Database to operate on, 'nb' or 'sb'
:type db: str
:param port_map: Dictionary with port number and associated settings
:type port_map: Dict[int,Dict[str,str]]
:raises: ValueError
"""
if db == 'nb':
executor = self.get_pebble_executor(OVN_NB_DB_CONTAINER)
elif db == 'sb':
executor = self.get_pebble_executor(OVN_SB_DB_CONTAINER)
status = self.cluster_status(
'ovn{}_db'.format(db),
cmd_executor=executor)
if status and status.is_cluster_leader:
logging.debug(
'configure_ovn_listener is_cluster_leader {}'.format(db))
connections = ch_ovsdb.SimpleOVSDB(
'ovn-{}ctl'.format(db),
cmd_executor=executor).connection
for port, settings in port_map.items():
logging.debug('port {} {}'.format(port, settings))
# discover and create any non-existing listeners first
for connection in connections.find(
'target="pssl:{}"'.format(port)):
logging.debug('Found port {}'.format(port))
break
else:
logging.debug('Create port {}'.format(port))
executor(
'ovn-{}ctl'.format(db),
'--',
'--id=@connection',
'create', 'connection',
'target="pssl:{}"'.format(port),
'--',
'add', '{}_Global'.format(db.upper()),
'.', 'connections', '@connection')
# set/update connection settings
for connection in connections.find(
'target="pssl:{}"'.format(port)):
for k, v in settings.items():
logging.debug(
'set {} {} {}'
.format(str(connection['_uuid']), k, v))
connections.set(str(connection['_uuid']), k, v)
def get_named_pebble_handlers(self, container_names):
# XXX Move to ASO
return [
h
for h in self.pebble_handlers
if h.container_name in container_names
]
def configure_charm(self, event: ops.framework.EventBase) -> None:
"""Catchall handler to configure charm services.
"""
if not self.unit.is_leader():
if not self.is_leader_ready():
self.unit.status = ops.model.WaitingStatus(
"Waiting for leader to be ready")
return
missing_leader_data = [
k for k in ['nb_cid', 'sb_cid']
if not self.leader_get(k)]
if missing_leader_data:
logging.debug(f"missing {missing_leader_data} from leader")
self.unit.status = ops.model.WaitingStatus(
"Waiting for data from leader")
return
logging.debug(
"Remote leader is ready and has supplied all data needed")
if not self.relation_handlers_ready():
logging.debug("Aborting charm relations not ready")
return
# Render Config in all containers but init should *NOT* start
# the service.
for ph in self.pebble_handlers:
if ph.pebble_ready:
logging.debug(f"Running init for {ph.service_name}")
ph.init_service(self.contexts())
else:
logging.debug(
f"Not running init for {ph.service_name},"
" container not ready")
if self.unit.is_leader():
# Start services in North/South containers on lead unit
logging.debug("Starting services in DB containers")
for ph in self.get_named_pebble_handlers(OVN_DB_CONTAINERS):
ph.start_service()
# Attempt to setup listers etc
self.configure_ovn()
nb_status = self.cluster_status(
'ovnnb_db',
self.get_pebble_executor(OVN_NB_DB_CONTAINER))
sb_status = self.cluster_status(
'ovnsb_db',
self.get_pebble_executor(OVN_SB_DB_CONTAINER))
logging.debug("Telling peers leader is ready and cluster ids")
self.set_leader_ready()
self.leader_set({
'nb_cid': str(nb_status.cluster_id),
'sb_cid': str(sb_status.cluster_id),
})
self.unit.status = ops.model.ActiveStatus()
else:
logging.debug("Attempting to join OVN_Northbound cluster")
container = self.unit.get_container(OVN_NB_DB_CONTAINER)
process = container.exec(
['bash', '/root/ovn-nb-cluster-join.sh'], timeout=5*60)
out, warnings = process.wait_output()
if warnings:
for line in warnings.splitlines():
logger.warning('CMD Out: %s', line.strip())
logging.debug("Attempting to join OVN_Southbound cluster")
container = self.unit.get_container(OVN_SB_DB_CONTAINER)
process = container.exec(
['bash', '/root/ovn-sb-cluster-join.sh'], timeout=5*60)
out, warnings = process.wait_output()
if warnings:
for line in warnings.splitlines():
logger.warning('CMD Out: %s', line.strip())
logging.debug("Starting services in DB containers")
for ph in self.get_named_pebble_handlers(OVN_DB_CONTAINERS):
ph.start_service()
# Attempt to setup listers etc
self.configure_ovn()
self.unit.status = ops.model.ActiveStatus()
def configure_ovn(self):
inactivity_probe = int(
self.config['ovsdb-server-inactivity-probe']) * 1000
self.configure_ovn_listener(
'nb', {
self.ovsdb_cms.db_nb_port: {
'inactivity_probe': inactivity_probe,
},
})
self.configure_ovn_listener(
'sb', {
self.ovsdb_cms.db_sb_port: {
'role': 'ovn-controller',
'inactivity_probe': inactivity_probe,
},
})
self.configure_ovn_listener(
'sb', {
self.ovsdb_cms.db_sb_admin_port: {
'inactivity_probe': inactivity_probe,
},
})
class OVNCentralXenaOperatorCharm(OVNCentralOperatorCharm):
openstack_release = 'xena'
if __name__ == "__main__":
# Note: use_juju_for_storage=True required per
# https://github.com/canonical/operator/issues/506
main(OVNCentralXenaOperatorCharm, use_juju_for_storage=True)
| 36.13369 | 76 | 0.576735 | 12,168 | 0.9004 | 0 | 0 | 778 | 0.05757 | 0 | 0 | 3,004 | 0.222288 |
3899d54508a0e95bd0473233c830cecd104741d0 | 9,101 | py | Python | main.py | tonymorony/trollbox_gui | 39fd0a60bbc9aed116c10be5f20d2d2539fb4f9f | [
"MIT"
] | 2 | 2018-11-02T15:42:33.000Z | 2018-11-18T00:51:46.000Z | main.py | tonymorony/trollbox_gui | 39fd0a60bbc9aed116c10be5f20d2d2539fb4f9f | [
"MIT"
] | null | null | null | main.py | tonymorony/trollbox_gui | 39fd0a60bbc9aed116c10be5f20d2d2539fb4f9f | [
"MIT"
] | null | null | null | from kivy.app import App
from kivy.config import Config
from kivy.uix.listview import ListItemButton
from kivy.uix.screenmanager import ScreenManager, Screen
from kivy.uix.label import Label
from kivy.clock import Clock
from kivy.uix.button import Button
from kivy.uix.widget import Widget
from functools import partial
from bitcoin.core import CoreMainParams
import bitcoin
# Config.set('graphics', 'width', '1366')
# Config.set('graphics', 'height', '768')
Config.set('kivy', 'window_icon', 'favicon.ico')
import rpclib
import chatlib
import bitcoinrpc
import ast
from bitcoin.wallet import P2PKHBitcoinAddress
from bitcoin.core import x
from datetime import datetime
class CoinParams(CoreMainParams):
MESSAGE_START = b'\x24\xe9\x27\x64'
DEFAULT_PORT = 7770
BASE58_PREFIXES = {'PUBKEY_ADDR': 60,
'SCRIPT_ADDR': 85,
'SECRET_KEY': 188}
bitcoin.params = CoinParams
class LoginPage(Screen):
def verify_credentials(self):
while True:
try:
server_input = self.ids["rpcserver"].text
user_input = self.ids["rpcuser"].text
password_input = self.ids["rpcpassword"].text
port_input = int(self.ids["port"].text)
connection = rpclib.rpc_connect(user_input, password_input, server_input, port_input)
except Exception as e:
print(e)
print("Not connected. Please check credentials")
#TODO: have to throw popup and in this case not clean text fields
self.ids["rpcserver"].text = ''
self.ids["rpcuser"].text = ''
self.ids["rpcpassword"].text = ''
self.ids["port"].text = ''
break
else:
App.get_running_app().rpc_connection = connection
App.get_running_app().is_connected = True
self.manager.current = "user"
break
class UserPage(Screen):
pass
class ScreenManagement(ScreenManager):
pass
class MessagesBoxLabel(Label):
def update(self):
self.text = TrollboxCCApp.active_room_id
class RoomListItemButton(ListItemButton):
def on_release(self):
# setting active room id after room button release
TrollboxCCApp.active_room_id = str(self.text[-64:])
#have to receive time delta for compatibility with kivy clock
class MessageUpdater(Widget):
def messages_checker(self, dt):
while True:
if App.get_running_app().is_connected == False:
break
else:
# getting oraclesinfo for active room
oracles_info = rpclib.oracles_info(App.get_running_app().rpc_connection, App.get_running_app().active_room_id)
if App.get_running_app().active_room_id == '':
print("Seems messages grabbing works")
break
else:
# flushing it to not print previous messages
baton_returned = {}
# getting batons to print on each iteration
data_to_print = {}
# getting dictionary with current batontxid for each publisher
for entry in oracles_info["registered"]:
baton_returned[entry["publisher"]] = entry["batontxid"]
# updating batons for all publishers in app array
for publisher in baton_returned:
if publisher in App.get_running_app().current_baton:
# if publisher already here updating baton and adding it to print queue
if baton_returned[publisher] != App.get_running_app().current_baton[publisher]:
App.get_running_app().current_baton[publisher] = baton_returned[publisher]
try:
data_to_print[publisher] = rpclib.oracles_samples(App.get_running_app().rpc_connection, App.get_running_app().active_room_id, baton_returned[publisher], "1")['samples'][0][0]
except IndexError:
break
# if baton is the same as before there is nothing to update
else:
break
# if publisher not here adding it with latest baton and adding baton to print queue
else:
App.get_running_app().current_baton[publisher] = baton_returned[publisher]
try:
data_to_print[publisher] = rpclib.oracles_samples(App.get_running_app().rpc_connection, App.get_running_app().active_room_id, baton_returned[publisher], "1")['samples'][0][0]
except IndexError:
break
# finally printing messages
try:
for publisher in data_to_print:
message_list = ast.literal_eval(data_to_print[publisher].replace('\r','\\r').replace('\n','\\n'))
kvsearch_result = rpclib.kvsearch(App.get_running_app().rpc_connection, publisher)
if 'value' in kvsearch_result:
addr = str(P2PKHBitcoinAddress.from_pubkey(x(publisher)))
signature = kvsearch_result['value'][:88]
value = kvsearch_result['value'][88:]
verifymessage_result = rpclib.verifymessage(App.get_running_app().rpc_connection, addr, signature, value)
if verifymessage_result:
message_to_print = datetime.utcfromtimestamp(message_list[0]).strftime('%D %H:%M') + '[' + kvsearch_result['value'][88:] + '-' + publisher[0:10] + ']:' + message_list[1]
else:
message_to_print = 'IMPROPER SIGNATURE' + datetime.utcfromtimestamp(message_list[0]).strftime('%D %H:%M') + '[' + kvsearch_result['value'][88:] + '-' + publisher[0:10] + ']:' + message_list[1]
else:
message_to_print = datetime.utcfromtimestamp(message_list[0]).strftime('%D %H:%M') + '[' + publisher[0:10] + ']:' + message_list[1]
App.get_running_app().messages.append(message_to_print)
App.get_running_app().root.ids.messagesview.adapter.data = App.get_running_app().messages
break
except bitcoinrpc.authproxy.JSONRPCException as e:
print(App.get_running_app().active_room_id)
print(e)
break
class CreateRoomButton(Button):
def create_room(self, room_name, room_description):
secret_room_description = "DCHAT " + room_description
try:
new_room_hex = rpclib.oracles_create(App.get_running_app().rpc_connection, room_name, secret_room_description, "S")
print(new_room_hex)
except Exception as e:
print(e)
else:
try:
new_room_txid = rpclib.sendrawtransaction(App.get_running_app().rpc_connection, new_room_hex["hex"])
print(new_room_txid)
except KeyError as e:
print(e)
print(new_room_hex)
class CreateNicknameButton(Button):
def create_nickname(self, nickname, password):
new_nickname = chatlib.set_nickname(App.get_running_app().rpc_connection, nickname, password)
print(new_nickname)
class SubscribeOnRoomButton(Button):
def subscribe_room(self, utxos_amount):
chatlib.room_subscription(App.get_running_app().rpc_connection, str(App.get_running_app().active_room_id), utxos_amount)
class TrollboxCCApp(App):
title = "OraclesCC Trollbox"
active_room_id = ''
messages = []
#key: publisher, value: batontxid
current_baton = {}
is_connected = False
#rpc_connection = None
def get_rooms_list(self):
if App.get_running_app().is_connected == False:
self.data = ''
else:
self.data = chatlib.get_chat_rooms(App.get_running_app().rpc_connection)
return self.data
def on_text(instance, value):
print('The widget', instance, 'have:', value)
def send_message(instance, inputid):
new_message = chatlib.message_sending(App.get_running_app().rpc_connection, App.get_running_app().active_room_id, str(inputid.text))
print(new_message)
inputid.text = ''
def callback_refresh_rooms(self, roomslist):
roomslist.adapter.data = self.get_rooms_list()
print("Room list succesfully refreshed")
# checking selected chat room for new messages every 0.5 seconds
message_updater = MessageUpdater()
check_messages = Clock.schedule_interval(partial(MessageUpdater.messages_checker, message_updater), 0.5)
check_messages()
if __name__ == "__main__":
TrollboxCCApp().run()
| 40.811659 | 224 | 0.600703 | 8,249 | 0.906384 | 0 | 0 | 0 | 0 | 0 | 0 | 1,399 | 0.153719 |
389b67fc68ad22caeff85063c1c4e146f3236d00 | 4,113 | py | Python | marinetrafficapi/vessel_data/VD02_vessel_particulars/models.py | arrrlo/marine-traffic-client-api | 1ac4b65010b1dc3f161940ee83815b341f9455ea | [
"MIT"
] | 15 | 2019-12-24T17:25:33.000Z | 2022-03-04T01:56:30.000Z | marinetrafficapi/vessel_data/VD02_vessel_particulars/models.py | arrrlo/marine-traffic-client-api | 1ac4b65010b1dc3f161940ee83815b341f9455ea | [
"MIT"
] | 27 | 2019-03-14T09:04:07.000Z | 2022-03-02T09:20:36.000Z | marinetrafficapi/vessel_data/VD02_vessel_particulars/models.py | arrrlo/marine-traffic-client-api | 1ac4b65010b1dc3f161940ee83815b341f9455ea | [
"MIT"
] | 3 | 2019-04-15T14:02:32.000Z | 2022-03-25T12:55:47.000Z | from marinetrafficapi.models import Model
from marinetrafficapi.fields import TextField, NumberField, RealNumberField
class VesselParticural(Model):
"""Get vessel particulars (including type, dimensions, ownership etc)."""
mmsi = NumberField(index='MMSI',
desc="Maritime Mobile Service Identity - \n"
"a nine-digit number sent in digital form \n"
"over a radio frequency that identifies \n"
"the vessel's transmitter station")
imo = NumberField(index='IMO',
desc="International Maritime Organisation number - a \n"
"seven-digit number that uniquely identifies vessels")
name = TextField(index='NAME',
desc="The Name of the subject vessel")
build_place = TextField(index='PLACE_OF_BUILD',
desc="The place the subject vessel was built at")
build_year = NumberField(index='BUILD',
desc="The year that the subject vessel was built")
breadth_extreme = RealNumberField(index='BREADTH_EXTREME',
desc="The extreme breadth (in metres) \n"
"of the subject vessel")
summer_dwt = NumberField(index='SUMMER_DWT',
desc="Deadweight - a measure (in metric tons) \n"
"of how much weight a vessel can safely carry \n"
"(excluding the vessel's own weight)")
displacement_summer = NumberField(index='DISPLACEMENT_SUMMER',
desc="Displacement - a measure of \n"
"the vessel's weight")
call_sign = TextField(index='CALLSIGN',
desc="A uniquely designated identifier for \n"
"the vessel's transmitter station")
flag = TextField(index='FLAG',
desc="The flag of the subject vessel according \n"
"to AIS transmissions")
draught = RealNumberField(index='DRAUGHT',
desc="The Draught (in metres x10) of the subject \n"
"vessel according to the AIS transmissions")
overall_length = RealNumberField(index='LENGTH_OVERALL',
desc="The Overall Length (in metres) \n"
"of the subject vessel")
fuel_consumption = TextField(index='FUEL_CONSUMPTION',
desc="The Fuel Consumption of the subject vessel")
max_speed = RealNumberField(index='SPEED_MAX',
desc="The Maximum Operational Speed \n"
"of the subject vessel")
condition_speed = RealNumberField(index='SPEED_SERVICE',
desc="The Speed that the vessel is \n"
"designed to sail under certain \n"
"conditions")
wet_cargo_capacity = NumberField(index='LIQUID_OIL',
desc="The Capacity (in cubic metres) \n"
"of the wet cargo the vessel can carry")
owner = TextField(index='OWNER',
desc="The Owning Company of the subject vessel \n"
"(null if the Owner and the Manager are the same)")
manager = TextField(index='MANAGER',
desc="The Managing Company of the subject vessel \n"
"(null if the Owner and the Manager are the same)")
vessel_type = TextField(index='VESSEL_TYPE',
desc="The specific type of the subject vessel")
manager_owner = TextField(index='MANAGER_OWNER',
desc="The Owning/Managing Company (null \n"
"if the Owner is different than the Manager)")
| 47.825581 | 83 | 0.518843 | 3,992 | 0.970581 | 0 | 0 | 0 | 0 | 0 | 0 | 1,791 | 0.435449 |
389cba084366f7307444b1fce740c72d5772c5a0 | 8,985 | py | Python | 01-tapsterbot/click-accuracy/ClickAutomation.py | AppTestBot/AppTestBot | 035e93e662753e50d7dcc38d6fd362933186983b | [
"Apache-2.0"
] | null | null | null | 01-tapsterbot/click-accuracy/ClickAutomation.py | AppTestBot/AppTestBot | 035e93e662753e50d7dcc38d6fd362933186983b | [
"Apache-2.0"
] | null | null | null | 01-tapsterbot/click-accuracy/ClickAutomation.py | AppTestBot/AppTestBot | 035e93e662753e50d7dcc38d6fd362933186983b | [
"Apache-2.0"
] | null | null | null | import os
import sys
import shutil
import csv
import subprocess
import xml.etree.ElementTree as ET
import random
import re
import time
sys.path.append('/home/kimsoohyun/00-Research/02-Graph/01-tapsterbot/dataSendTest')
import req
from change_axis_qhd import ChangeAxis as C1
FLAGS = None
def get_point(index, package_name):
activity_list = list()
# waiting for rendering end
while True:
if len(activity_list) > 5:
activity_list.pop(0)
if len(activity_list) ==5 and len(set(activity_list)) == 1:
break
#export XML log
command = 'adb shell uiautomator dump /sdcard/{0}.xml'.format(index)
dump_output = None
try:
dump_output = command_output(command)
except subprocess.CalledProcessError:
print("uiautomator dump error")
if dump_output is not None and \
not dump_output.startswith('UI hierchary dumped to:'):
activity_list.append(0)
point = (random.randrange(0, 1080),
random.randrange(0, 1920))
continue
#pull XML log
command = 'adb pull /sdcard/{0}.xml ./dataset/00-xml/{1}/{0}.xml'.format(index, package_name)
try:
command_check(command)
except subprocess.CalledProcessError:
pass
xml = './dataset/00-xml/{0}/{1}.xml'.format(package_name, index)
size, point = parse_xml_log(xml)
activity_list.append(size)
return point
def check_binary(binaries):
for binary in binaries:
if shutil.which(binary) is None:
raise FileNotFoundError
def check_dirs(dirs):
for dir_path in dirs:
os.makedirs(dir_path, exist_ok=True)
def terminate_env(pss):
for ps in pss:
command = 'adb shell "ps | grep {0}"'.format(ps)
try:
output = command_output(command)
except subprocess.CalledProcessError as e:
continue
psnum = re.findall('\d+', output)[0]
command = 'adb shell kill -2 {0}'.format(psnum)
command_check(command)
def command_popen(command):
return subprocess.Popen(command, shell=True)
def command_check(command):
return subprocess.check_call(command, shell=True)
def command_output(command):
return subprocess.check_output(command, shell=True).decode('utf-8')
def parse_xml_log(path):
tree = ET.parse(path)
root = tree.getroot()
it = root.iter()
size = 0
bounds = list()
for item in it:
size = size+1
if item.get('clickable') == 'true':
bounds.append(item.get('bounds'))
try:
choose = random.choice(bounds)
axes = re.findall('\d+', choose)
point = (int(axes[0])+int(axes[2])/2, int(axes[1])+int(axes[3])/2)
except ValueError:
point = (random.randrange(0, 1080),
random.randrange(0, 1920))
except IndexError:
point = (random.randrange(0, 1080),
random.randrange(0, 1920))
return size, point
def main(args):
'''input: app_package_name
output: csvfile
(appname, send-axis,expect-bot-axis,
clicked-axis, clicked-bot-axis, is success)
1. 앱 패키지리스트를 읽어옴
2. adb를 실행시켜 앱 패키지 이름으로 앱을 실행시킴
3. 횟수가 0이될때까지 다음을 반복
3-1. xml의 clickble bound 중앙값을 찾음
3-2. 값 저장: clicked-axis, clicked_bot-axis
3-3. 데이터 robot에 전송 함
3-4. clicked-bot-axis 전송받음 --> 데이터 저장
3-5. adb로부터 실제클릭리스트 받음 --> 데이터 저장
getevent -l /dev/input/event0 | grep "ABS_MT_POSITION"
displayX = x * 1440 / 4096
displayY = y * 2960 / 4096
3-6. 수동으로 클릭되었는지 확인(is success)
3-7. csv 저장
'''
binaries = ['adb']
check_binary(binaries)
change_point = C1(1440, 2960, 40, 100, 695)
dirs = ['./dataset/01-coordinate-csv',
'./dataset/00-xml']
check_dirs(dirs)
print('checked all binaries dirs')
#앱 패키지 리스트를 읽어옴
app_package_list = args.input
event = args.event
if not os.path.exists(app_package_list):
raise Exception(' Need app_list.csv')
app_list = list()
with open(app_package_list, 'r') as f:
reader = csv.DictReader(f)
for row in reader:
print(row['package_name'])
app_list.append(row['package_name'])
# 앱순회
for package_name in app_list:
dirs = ['./dataset/00-xml/'+package_name]
check_dirs(dirs)
command = 'adb shell rm /sdcard/*.xml'
try:
command_check(command)
except subprocess.CalledProcessError:
pass
#adb를 실행시켜 앱실행
command = 'adb shell monkey -p {0} -c android.intent.category.LAUNCHER 1'.format(package_name)
try:
command_check(command)
except subprocess.CalledProcessError:
pass
for index in range(0, event):
#xml의 point ckwdma
send_axis = get_point(index, package_name)
send_bot_axis = (change_point.c_x(send_axis[0]), \
change_point.c_y(send_axis[1]))
res = req.send_req(args.ip, \
send_bot_axis[0], \
send_bot_axis[1], \
package_name)
command = 'adb shell getevent -l /dev/input/event0 | grep "ABS_MT_POSTION"'
try:
result = command_output(command)
except subprocess.CalledProcessError:
result = None
print(result)
#stop app
for index in range(0, 5):
command = 'adb shell input keyevent KEYCODE_BACK'
try:
command_check(command)
except subprocess.CalledProcessError:
pass
command = 'adb shell am force-stop {0}'.format(package_name)
try:
command_check(command)
except subprocess.CalledProcessError:
pass
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(
description='Mobile xml extractor')
parser.add_argument('-i', '--input', type=str,
required=True,
help=('list of app package names to test'))
parser.add_argument('-e', '--event', type=int,
default=10,
help=('the number of generated user event(default: 10)'))
parser.add_argument('-p', '--ip', type=str,
required=True,
help=('input send ip address'))
FLAGS, _ = parser.parse_known_args()
main(FLAGS)
| 40.472973 | 104 | 0.427713 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,054 | 0.222511 |
389d5fdd271842afaa8eb2ba841ed09fa0b2bfa7 | 957 | py | Python | tools/export_fbx.py | SeijiEmery/unity_tools | cb401e6979b95c081a2ab3f944fc6e4419ccfd0e | [
"MIT"
] | null | null | null | tools/export_fbx.py | SeijiEmery/unity_tools | cb401e6979b95c081a2ab3f944fc6e4419ccfd0e | [
"MIT"
] | null | null | null | tools/export_fbx.py | SeijiEmery/unity_tools | cb401e6979b95c081a2ab3f944fc6e4419ccfd0e | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import os
import sys
from run_bpy_script import run_blender_script
def export_fbx(input_blend_file, output=None, **kwargs):
default_args = {
'global_scale': 1e-3
}
default_args.update(kwargs)
kwargs = default_args
run_blender_script('export_fbx.py',
blend_file=input_blend_file,
output=output,
**kwargs)
if __name__ == '__main__':
# split args, kwargs + use these to call a function
script_args = sys.argv[1:]
args, kwargs = [], {}
while len(script_args) > 0:
arg = script_args[0]
if arg.startswith('--') or arg.startswith('-'):
kwargs[arg.lstrip('-')] = script_args[1]
script_args = script_args[2:]
else:
args.append(arg)
script_args = script_args[1:]
print(args)
print(kwargs)
# export_fbx('../tests/export_fbx/input/test.blend', 'test.fbx')
export_fbx(*args, **kwargs)
| 25.864865 | 68 | 0.615465 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 186 | 0.194357 |
389d63e90be0f0a03a327ac541d5fc56ba045954 | 4,389 | py | Python | g2pk/special.py | elbum/g2pK | c00fae07973b552a4e318f3c775c52bbb71c8196 | [
"Apache-2.0"
] | 136 | 2019-06-28T06:50:26.000Z | 2022-03-25T15:54:08.000Z | g2pk/special.py | elbum/g2pK | c00fae07973b552a4e318f3c775c52bbb71c8196 | [
"Apache-2.0"
] | 6 | 2020-08-30T05:46:46.000Z | 2022-02-07T02:00:53.000Z | g2pk/special.py | elbum/g2pK | c00fae07973b552a4e318f3c775c52bbb71c8196 | [
"Apache-2.0"
] | 30 | 2019-06-28T07:17:04.000Z | 2022-02-27T07:40:06.000Z | # -*- coding: utf-8 -*-
'''
Special rule for processing Hangul
https://github.com/kyubyong/g2pK
'''
import re
from g2pk.utils import gloss, get_rule_id2text
rule_id2text = get_rule_id2text()
############################ vowels ############################
def jyeo(inp, descriptive=False, verbose=False):
rule = rule_id2text["5.1"]
# 일반적인 규칙으로 취급한다 by kyubyong
out = re.sub("([ᄌᄍᄎ])ᅧ", r"\1ᅥ", inp)
gloss(verbose, out, inp, rule)
return out
def ye(inp, descriptive=False, verbose=False):
rule = rule_id2text["5.2"]
# 실제로 언중은 예, 녜, 셰, 쎼 이외의 'ㅖ'는 [ㅔ]로 발음한다. by kyubyong
if descriptive:
out = re.sub("([ᄀᄁᄃᄄㄹᄆᄇᄈᄌᄍᄎᄏᄐᄑᄒ])ᅨ", r"\1ᅦ", inp)
else:
out = inp
gloss(verbose, out, inp, rule)
return out
def consonant_ui(inp, descriptive=False, verbose=False):
rule = rule_id2text["5.3"]
out = re.sub("([ᄀᄁᄂᄃᄄᄅᄆᄇᄈᄉᄊᄌᄍᄎᄏᄐᄑᄒ])ᅴ", r"\1ᅵ", inp)
gloss(verbose, out, inp, rule)
return out
def josa_ui(inp, descriptive=False, verbose=False):
rule = rule_id2text["5.4.2"]
# 실제로 언중은 높은 확률로 조사 '의'는 [ㅔ]로 발음한다.
if descriptive:
out = re.sub("의/J", "에", inp)
else:
out = inp.replace("/J", "")
gloss(verbose, out, inp, rule)
return out
def vowel_ui(inp, descriptive=False, verbose=False):
rule = rule_id2text["5.4.1"]
# 실제로 언중은 높은 확률로 단어의 첫음절 이외의 '의'는 [ㅣ]로 발음한다."""
if descriptive:
out = re.sub("(\Sᄋ)ᅴ", r"\1ᅵ", inp)
else:
out = inp
gloss(verbose, out, inp, rule)
return out
def jamo(inp, descriptive=False, verbose=False):
rule = rule_id2text["16"]
out = inp
out = re.sub("([그])ᆮᄋ", r"\1ᄉ", out)
out = re.sub("([으])[ᆽᆾᇀᇂ]ᄋ", r"\1ᄉ", out)
out = re.sub("([으])[ᆿ]ᄋ", r"\1ᄀ", out)
out = re.sub("([으])[ᇁ]ᄋ", r"\1ᄇ", out)
gloss(verbose, out, inp, rule)
return out
############################ 어간 받침 ############################
def rieulgiyeok(inp, descriptive=False, verbose=False):
rule = rule_id2text["11.1"]
out = inp
out = re.sub("ᆰ/P([ᄀᄁ])", r"ᆯᄁ", out)
gloss(verbose, out, inp, rule)
return out
def rieulbieub(inp, descriptive=False, verbose=False):
rule = rule_id2text["25"]
out = inp
out = re.sub("([ᆲᆴ])/Pᄀ", r"\1ᄁ", out)
out = re.sub("([ᆲᆴ])/Pᄃ", r"\1ᄄ", out)
out = re.sub("([ᆲᆴ])/Pᄉ", r"\1ᄊ", out)
out = re.sub("([ᆲᆴ])/Pᄌ", r"\1ᄍ", out)
gloss(verbose, out, inp, rule)
return out
def verb_nieun(inp, descriptive=False, verbose=False):
rule = rule_id2text["24"]
out = inp
pairs = [ ("([ᆫᆷ])/Pᄀ", r"\1ᄁ"),
("([ᆫᆷ])/Pᄃ", r"\1ᄄ"),
("([ᆫᆷ])/Pᄉ", r"\1ᄊ"),
("([ᆫᆷ])/Pᄌ", r"\1ᄍ"),
("ᆬ/Pᄀ", "ᆫᄁ"),
("ᆬ/Pᄃ", "ᆫᄄ"),
("ᆬ/Pᄉ", "ᆫᄊ"),
("ᆬ/Pᄌ", "ᆫᄍ"),
("ᆱ/Pᄀ", "ᆷᄁ"),
("ᆱ/Pᄃ", "ᆷᄄ"),
("ᆱ/Pᄉ", "ᆷᄊ"),
("ᆱ/Pᄌ", "ᆷᄍ") ]
for str1, str2 in pairs:
out = re.sub(str1, str2, out)
gloss(verbose, out, inp, rule)
return out
def balb(inp, descriptive=False, verbose=False):
rule = rule_id2text["10.1"]
out = inp
syllable_final_or_consonants = "($|[^ᄋᄒ])"
# exceptions
out = re.sub(f"(바)ᆲ({syllable_final_or_consonants})", r"\1ᆸ\2", out)
out = re.sub(f"(너)ᆲ([ᄌᄍ]ᅮ|[ᄃᄄ]ᅮ)", r"\1ᆸ\2", out)
gloss(verbose, out, inp, rule)
return out
def palatalize(inp, descriptive=False, verbose=False):
rule = rule_id2text["17"]
out = inp
out = re.sub("ᆮᄋ([ᅵᅧ])", r"ᄌ\1", out)
out = re.sub("ᇀᄋ([ᅵᅧ])", r"ᄎ\1", out)
out = re.sub("ᆴᄋ([ᅵᅧ])", r"ᆯᄎ\1", out)
out = re.sub("ᆮᄒ([ᅵ])", r"ᄎ\1", out)
gloss(verbose, out, inp, rule)
return out
def modifying_rieul(inp, descriptive=False, verbose=False):
rule = rule_id2text["27"]
out = inp
pairs = [ ("ᆯ/E ᄀ", r"ᆯ ᄁ"),
("ᆯ/E ᄃ", r"ᆯ ᄄ"),
("ᆯ/E ᄇ", r"ᆯ ᄈ"),
("ᆯ/E ᄉ", r"ᆯ ᄊ"),
("ᆯ/E ᄌ", r"ᆯ ᄍ"),
("ᆯ걸", "ᆯ껄"),
("ᆯ밖에", "ᆯ빠께"),
("ᆯ세라", "ᆯ쎄라"),
("ᆯ수록", "ᆯ쑤록"),
("ᆯ지라도", "ᆯ찌라도"),
("ᆯ지언정", "ᆯ찌언정"),
("ᆯ진대", "ᆯ찐대") ]
for str1, str2 in pairs:
out = re.sub(str1, str2, out)
gloss(verbose, out, inp, rule)
return out
| 24.657303 | 73 | 0.501253 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,936 | 0.377756 |
389e23e664ef6bec0dd218c1023187fec627c7da | 1,136 | py | Python | mi/dataset/driver/flord_l_wfp/sio/flord_l_wfp_sio_telemetered_driver.py | petercable/mi-dataset | d3c1607ea31af85fbba5719a31d4a60bf39f8dd3 | [
"BSD-2-Clause"
] | 1 | 2018-09-14T23:28:29.000Z | 2018-09-14T23:28:29.000Z | mi/dataset/driver/flord_l_wfp/sio/flord_l_wfp_sio_telemetered_driver.py | petercable/mi-dataset | d3c1607ea31af85fbba5719a31d4a60bf39f8dd3 | [
"BSD-2-Clause"
] | 33 | 2017-04-25T19:53:45.000Z | 2022-03-18T17:42:18.000Z | mi/dataset/driver/flord_l_wfp/sio/flord_l_wfp_sio_telemetered_driver.py | petercable/mi-dataset | d3c1607ea31af85fbba5719a31d4a60bf39f8dd3 | [
"BSD-2-Clause"
] | 31 | 2015-03-04T01:01:09.000Z | 2020-10-28T14:42:12.000Z | #!/usr/local/bin/python2.7
##
# OOIPLACEHOLDER
#
# Copyright 2014 Raytheon Co.
##
__author__ = 'Mark Worden'
from mi.dataset.dataset_parser import DataSetDriverConfigKeys
from mi.dataset.dataset_driver import SimpleDatasetDriver
from mi.dataset.parser.flord_l_wfp_sio import FlordLWfpSioParser
from mi.core.versioning import version
@version("15.6.1")
def parse(unused, source_file_path, particle_data_handler):
with open(source_file_path, 'rb') as stream_handle:
driver = FlordLWfpSioTelemeteredDriver(unused, stream_handle, particle_data_handler)
driver.processFileStream()
return particle_data_handler
class FlordLWfpSioTelemeteredDriver(SimpleDatasetDriver):
def _build_parser(self, stream_handle):
parser_config = {
DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.flord_l_wfp_sio',
DataSetDriverConfigKeys.PARTICLE_CLASS: 'FlordLWfpSioDataParticle'
}
parser = FlordLWfpSioParser(parser_config,
stream_handle,
self._exception_callback)
return parser
| 27.047619 | 92 | 0.71831 | 497 | 0.4375 | 0 | 0 | 298 | 0.262324 | 0 | 0 | 162 | 0.142606 |
389e3359d1c51e16167fb066518dcce270c6cf1d | 1,150 | py | Python | qa_tool/models.py | pg-irc/pathways-backend | 05a8c4e750523d2d081b030a248c5444d1ed7992 | [
"BSD-3-Clause"
] | 12 | 2017-08-30T18:21:00.000Z | 2021-12-09T04:04:17.000Z | qa_tool/models.py | pg-irc/pathways-backend | 05a8c4e750523d2d081b030a248c5444d1ed7992 | [
"BSD-3-Clause"
] | 424 | 2017-08-08T18:32:14.000Z | 2022-03-30T21:42:51.000Z | qa_tool/models.py | pg-irc/pathways-backend | 05a8c4e750523d2d081b030a248c5444d1ed7992 | [
"BSD-3-Clause"
] | 7 | 2017-09-29T21:14:37.000Z | 2019-12-30T21:07:37.000Z | from django.contrib.gis.db import models
from common.models import (RequiredURLField,
OptionalTextField, RequiredCharField)
from human_services.locations.models import ServiceAtLocation
from search.models import Task
from users.models import User
class Algorithm(models.Model):
url = RequiredURLField()
name = RequiredCharField(max_length=200)
notes = OptionalTextField()
class Meta:
ordering = ['id']
class SearchLocation(models.Model):
name = OptionalTextField()
point = models.PointField(blank=True, null=True)
class Meta:
ordering = ['id']
class RelevancyScore(models.Model):
value = models.IntegerField()
time_stamp = models.DateTimeField()
algorithm = models.ForeignKey(Algorithm, on_delete=models.PROTECT)
topic = models.ForeignKey(Task, on_delete=models.PROTECT)
search_location = models.ForeignKey(SearchLocation, on_delete=models.PROTECT)
user = models.ForeignKey(User, on_delete=models.PROTECT)
service_at_location = models.ForeignKey(
ServiceAtLocation, on_delete=models.PROTECT)
class Meta:
ordering = ["id"]
| 30.263158 | 81 | 0.722609 | 867 | 0.753913 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 0.010435 |
389f1a7b583f28aabf6b2f6f6dbd7bcfd9b5dc58 | 3,485 | py | Python | ivi/tektronix/__init__.py | sacherjj/python-ivi | 6dd1ba93d65dc30a652a3a1b34c66921d94315e8 | [
"MIT"
] | 161 | 2015-01-23T17:43:01.000Z | 2022-03-29T14:42:42.000Z | ivi/tektronix/__init__.py | sacherjj/python-ivi | 6dd1ba93d65dc30a652a3a1b34c66921d94315e8 | [
"MIT"
] | 45 | 2015-01-15T13:35:04.000Z | 2021-06-03T01:58:55.000Z | ivi/tektronix/__init__.py | sacherjj/python-ivi | 6dd1ba93d65dc30a652a3a1b34c66921d94315e8 | [
"MIT"
] | 87 | 2015-01-31T10:55:23.000Z | 2022-03-17T08:18:47.000Z | """
Python Interchangeable Virtual Instrument Library
Copyright (c) 2012-2017 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
# Oscilloscopes
# DPO4000
from .tektronixDPO4032 import tektronixDPO4032
from .tektronixDPO4034 import tektronixDPO4034
from .tektronixDPO4054 import tektronixDPO4054
from .tektronixDPO4104 import tektronixDPO4104
# MSO4000
from .tektronixMSO4032 import tektronixMSO4032
from .tektronixMSO4034 import tektronixMSO4034
from .tektronixMSO4054 import tektronixMSO4054
from .tektronixMSO4104 import tektronixMSO4104
# DPO4000B
from .tektronixDPO4014B import tektronixDPO4014B
from .tektronixDPO4034B import tektronixDPO4034B
from .tektronixDPO4054B import tektronixDPO4054B
from .tektronixDPO4102B import tektronixDPO4102B
from .tektronixDPO4104B import tektronixDPO4104B
# MSO4000B
from .tektronixMSO4014B import tektronixMSO4014B
from .tektronixMSO4034B import tektronixMSO4034B
from .tektronixMSO4054B import tektronixMSO4054B
from .tektronixMSO4102B import tektronixMSO4102B
from .tektronixMSO4104B import tektronixMSO4104B
# MDO4000
from .tektronixMDO4054 import tektronixMDO4054
from .tektronixMDO4104 import tektronixMDO4104
# MDO4000B
from .tektronixMDO4014B import tektronixMDO4014B
from .tektronixMDO4034B import tektronixMDO4034B
from .tektronixMDO4054B import tektronixMDO4054B
from .tektronixMDO4104B import tektronixMDO4104B
# MDO3000
from .tektronixMDO3012 import tektronixMDO3012
from .tektronixMDO3014 import tektronixMDO3014
from .tektronixMDO3022 import tektronixMDO3022
from .tektronixMDO3024 import tektronixMDO3024
from .tektronixMDO3032 import tektronixMDO3032
from .tektronixMDO3034 import tektronixMDO3034
from .tektronixMDO3052 import tektronixMDO3052
from .tektronixMDO3054 import tektronixMDO3054
from .tektronixMDO3102 import tektronixMDO3102
from .tektronixMDO3104 import tektronixMDO3104
# Function Generators
from .tektronixAWG2005 import tektronixAWG2005
from .tektronixAWG2020 import tektronixAWG2020
from .tektronixAWG2021 import tektronixAWG2021
from .tektronixAWG2040 import tektronixAWG2040
from .tektronixAWG2041 import tektronixAWG2041
# Power Supplies
from .tektronixPS2520G import tektronixPS2520G
from .tektronixPS2521G import tektronixPS2521G
# Optical attenuators
from .tektronixOA5002 import tektronixOA5002
from .tektronixOA5012 import tektronixOA5012
from .tektronixOA5022 import tektronixOA5022
from .tektronixOA5032 import tektronixOA5032
# Current probe amplifiers
from .tektronixAM5030 import tektronixAM5030
| 39.157303 | 77 | 0.857389 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,287 | 0.369297 |
38a0b921f983b2d2b9365a4bb47c16ebd9b5348e | 449 | py | Python | wazimap_ng/boundaries/migrations/0007_auto_20200121_0907.py | arghyaiitb/wazimap-ng | 2a77860526d865b8fd0c22a2204f121fdb3b28a0 | [
"Apache-2.0"
] | 11 | 2019-12-31T20:27:22.000Z | 2022-03-10T03:55:38.000Z | wazimap_ng/boundaries/migrations/0007_auto_20200121_0907.py | arghyaiitb/wazimap-ng | 2a77860526d865b8fd0c22a2204f121fdb3b28a0 | [
"Apache-2.0"
] | 164 | 2020-02-06T15:02:22.000Z | 2022-03-30T22:42:00.000Z | wazimap_ng/boundaries/migrations/0007_auto_20200121_0907.py | arghyaiitb/wazimap-ng | 2a77860526d865b8fd0c22a2204f121fdb3b28a0 | [
"Apache-2.0"
] | 16 | 2020-01-03T20:30:24.000Z | 2022-01-11T11:05:15.000Z | # Generated by Django 2.2.8 on 2020-01-21 09:07
import django.contrib.gis.db.models.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('boundaries', '0006_worldborder'),
]
operations = [
migrations.AlterField(
model_name='worldborder',
name='mpoly',
field=django.contrib.gis.db.models.fields.PolygonField(srid=4326),
),
]
| 22.45 | 78 | 0.632517 | 321 | 0.714922 | 0 | 0 | 0 | 0 | 0 | 0 | 97 | 0.216036 |
38a129325d66bb189c82f953757e48c8f2208659 | 551 | py | Python | rldb/db/repo__openai_baselines_cbd21ef/algo__ppo2_mpi/entries.py | seungjaeryanlee/sotarl | 8c471c4666d6210c68f3cb468e439a2b168c785d | [
"MIT"
] | 45 | 2019-05-13T17:39:33.000Z | 2022-03-07T23:44:13.000Z | rldb/db/repo__openai_baselines_cbd21ef/algo__ppo2_mpi/entries.py | seungjaeryanlee/sotarl | 8c471c4666d6210c68f3cb468e439a2b168c785d | [
"MIT"
] | 2 | 2019-03-29T01:41:59.000Z | 2019-07-02T02:48:31.000Z | rldb/db/repo__openai_baselines_cbd21ef/algo__ppo2_mpi/entries.py | seungjaeryanlee/sotarl | 8c471c4666d6210c68f3cb468e439a2b168c785d | [
"MIT"
] | 2 | 2020-04-07T20:57:30.000Z | 2020-07-08T12:55:15.000Z | entries = [
{
'env-title': 'atari-enduro',
'score': 207.47,
},
{
'env-title': 'atari-space-invaders',
'score': 459.89,
},
{
'env-title': 'atari-qbert',
'score': 7184.73,
},
{
'env-title': 'atari-seaquest',
'score': 1383.38,
},
{
'env-title': 'atari-pong',
'score': 13.9,
},
{
'env-title': 'atari-beam-rider',
'score': 594.45,
},
{
'env-title': 'atari-breakout',
'score': 81.61,
},
]
| 17.774194 | 44 | 0.399274 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 237 | 0.430127 |
38a1aeec73b5c25381b82d560fdb3ca48a37c74c | 701 | py | Python | autoPyTorch/pipeline/components/setup/network_initializer/NoInit.py | ravinkohli/Auto-PyTorch | a1512d56d4db89133e895e85765e3b72afbfe157 | [
"Apache-2.0"
] | 1 | 2021-05-12T10:11:58.000Z | 2021-05-12T10:11:58.000Z | autoPyTorch/pipeline/components/setup/network_initializer/NoInit.py | maxpark/Auto-PyTorch | 06e67de5017b4cccad9398e24a3d9f0bd8176da3 | [
"Apache-2.0"
] | 34 | 2020-10-06T08:06:46.000Z | 2021-01-21T13:23:34.000Z | autoPyTorch/pipeline/components/setup/network_initializer/NoInit.py | maxpark/Auto-PyTorch | 06e67de5017b4cccad9398e24a3d9f0bd8176da3 | [
"Apache-2.0"
] | 1 | 2020-10-14T12:25:47.000Z | 2020-10-14T12:25:47.000Z | from typing import Callable
import torch
from autoPyTorch.pipeline.components.setup.network_initializer.base_network_initializer import (
BaseNetworkInitializerComponent
)
class NoInit(BaseNetworkInitializerComponent):
"""
No initialization on the weights/bias
"""
def weights_init(self) -> Callable:
"""Returns the actual PyTorch model, that is dynamically created
from a self.config object.
self.config is a dictionary created form a given config in the config space.
It contains the necessary information to build a network.
"""
def initialization(m: torch.nn.Module) -> None:
pass
return initialization
| 28.04 | 96 | 0.707561 | 520 | 0.741797 | 0 | 0 | 0 | 0 | 0 | 0 | 316 | 0.450785 |
38a2a7377367a5e064f25d1941e049967ce7ff47 | 747 | py | Python | NSI/Chapitre 6/TP6.py | S-c-r-a-t-c-h-y/coding-projects | cad33aedb72720c3e3a37c7529e55abd3edb291a | [
"MIT"
] | null | null | null | NSI/Chapitre 6/TP6.py | S-c-r-a-t-c-h-y/coding-projects | cad33aedb72720c3e3a37c7529e55abd3edb291a | [
"MIT"
] | null | null | null | NSI/Chapitre 6/TP6.py | S-c-r-a-t-c-h-y/coding-projects | cad33aedb72720c3e3a37c7529e55abd3edb291a | [
"MIT"
] | null | null | null | from arbre_binaire import AB
from dessiner_arbre import dessiner
def hauteur(arbre):
"""Fonction qui renvoie la hauteur d'un arbre binaire"""
# si l'arbre est vide
if arbre is None:
return 0
hg = hauteur(arbre.get_ag())
hd = hauteur(arbre.get_ad())
return max(hg, hd) + 1
def taille(arbre):
"""Fonction qui renvoie la taille d'un arbre binaire"""
if arbre is None:
return 0
tg = taille(arbre.get_ag())
td = taille(arbre.get_ad())
return tg + td + 1
arbre1 = None
arbre2 = AB(1, AB(3), AB(2))
arbre3 = AB(1, AB(2), AB(2, AB(4)))
arbre4 = AB(1)
arbre5 = AB(1, AB(2))
print(taille(arbre1))
print(taille(arbre2))
print(taille(arbre3))
print(taille(arbre4))
print(taille(arbre5))
| 18.675 | 60 | 0.637216 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 132 | 0.176707 |
38a405bed7f1802e2da0c53ab45dbec45d5bdb40 | 5,441 | py | Python | benchmarks/ltl_timed_transition_system/f3/timed_extending_bound.py | EnricoMagnago/F3 | c863215c318d7d5f258eb9be38c6962cf6863b52 | [
"MIT"
] | 3 | 2021-04-23T23:29:26.000Z | 2022-03-23T10:00:30.000Z | benchmarks/ltl_timed_transition_system/f3/timed_extending_bound.py | EnricoMagnago/F3 | c863215c318d7d5f258eb9be38c6962cf6863b52 | [
"MIT"
] | null | null | null | benchmarks/ltl_timed_transition_system/f3/timed_extending_bound.py | EnricoMagnago/F3 | c863215c318d7d5f258eb9be38c6962cf6863b52 | [
"MIT"
] | 1 | 2021-11-17T22:02:56.000Z | 2021-11-17T22:02:56.000Z | from collections import Iterable
from math import log, ceil
from mathsat import msat_term, msat_env
from mathsat import msat_make_constant, msat_declare_function
from mathsat import msat_get_integer_type, msat_get_rational_type, \
msat_get_bool_type
from mathsat import msat_make_and, msat_make_not, msat_make_or, msat_make_iff
from mathsat import msat_make_leq, msat_make_equal, msat_make_true
from mathsat import msat_make_number, msat_make_plus, msat_make_times
from ltl.ltl import TermMap, LTLEncoder
from utils import name_next
delta_name = "delta"
def decl_consts(menv: msat_env, name: str, c_type) -> tuple:
assert not name.startswith("_"), name
s = msat_declare_function(menv, name, c_type)
s = msat_make_constant(menv, s)
x_s = msat_declare_function(menv, name_next(name), c_type)
x_s = msat_make_constant(menv, x_s)
return s, x_s
def make_enum(menv, v_name: str, enum_size: int):
bool_type = msat_get_bool_type(menv)
num_bits = ceil(log(enum_size, 2))
b_vars = []
for idx in range(num_bits):
c_name = "{}{}".format(v_name, idx)
b_vars.append(tuple(decl_consts(menv, c_name, bool_type)))
vals = []
x_vals = []
for enum_val in range(enum_size):
bit_val = format(enum_val, '0{}b'.format(num_bits))
assert len(bit_val) == num_bits
assert all(c in {'0', '1'} for c in bit_val)
assign = [b_vars[idx] if c == '1' else
(msat_make_not(menv, b_vars[idx][0]),
msat_make_not(menv, b_vars[idx][1]))
for idx, c in enumerate(reversed(bit_val))]
pred = assign[0][0]
x_pred = assign[0][1]
for it in assign[1:]:
pred = msat_make_and(menv, pred, it[0])
x_pred = msat_make_and(menv, x_pred, it[1])
vals.append(pred)
x_vals.append(x_pred)
assert len(vals) == enum_size
assert len(x_vals) == enum_size
return b_vars, vals, x_vals
def msat_make_minus(menv: msat_env, arg0: msat_term, arg1: msat_term):
m_one = msat_make_number(menv, "-1")
arg1 = msat_make_times(menv, arg1, m_one)
return msat_make_plus(menv, arg0, arg1)
def msat_make_lt(menv: msat_env, arg0: msat_term, arg1: msat_term):
geq = msat_make_geq(menv, arg0, arg1)
return msat_make_not(menv, geq)
def msat_make_geq(menv: msat_env, arg0: msat_term, arg1: msat_term):
return msat_make_leq(menv, arg1, arg0)
def msat_make_gt(menv: msat_env, arg0: msat_term, arg1: msat_term):
leq = msat_make_leq(menv, arg0, arg1)
return msat_make_not(menv, leq)
def msat_make_impl(menv: msat_env, arg0: msat_term, arg1: msat_term):
n_arg0 = msat_make_not(menv, arg0)
return msat_make_or(menv, n_arg0, arg1)
def diverging_symbs(menv: msat_env) -> frozenset:
real_type = msat_get_rational_type(menv)
delta = msat_declare_function(menv, delta_name, real_type)
delta = msat_make_constant(menv, delta)
return frozenset([delta])
def check_ltl(menv: msat_env, enc: LTLEncoder) -> (Iterable, msat_term,
msat_term, msat_term):
assert menv
assert isinstance(menv, msat_env)
assert enc
assert isinstance(enc, LTLEncoder)
bool_type = msat_get_bool_type(menv)
int_type = msat_get_integer_type(menv)
real_type = msat_get_rational_type(menv)
c, x_c = decl_consts(menv, "c", real_type)
bound, x_bound = decl_consts(menv, "bound", int_type)
delta, x_delta = decl_consts(menv, delta_name, real_type)
dec_bound, x_dec_bound = decl_consts(menv, "dec_bound", bool_type)
curr2next = {c: x_c, bound: x_bound, delta: x_delta, dec_bound: x_dec_bound}
zero = msat_make_number(menv, "0")
init = dec_bound
# invar delta >= 0
init = msat_make_and(menv, init, msat_make_geq(menv, delta, zero))
trans = msat_make_geq(menv, x_delta, zero)
# invar c <= bound
init = msat_make_and(menv, init,
msat_make_leq(menv, c, bound))
trans = msat_make_and(menv, trans,
msat_make_leq(menv, x_c, x_bound))
# delta > 0 -> (c' = c + delta & bound' = bound & dec_bound')
lhs = msat_make_gt(menv, delta, zero)
rhs = msat_make_and(menv,
msat_make_equal(menv, x_c,
msat_make_plus(menv, c, delta)),
msat_make_and(menv, msat_make_equal(menv, x_bound, bound),
x_dec_bound))
trans = msat_make_and(menv, trans,
msat_make_impl(menv, lhs, rhs))
disc_t = msat_make_equal(menv, delta, zero)
# c' = c
trans = msat_make_and(menv, trans,
msat_make_impl(menv, disc_t,
msat_make_equal(menv, x_c, c)))
# c < bound -> (bound' = bound & dec_bound')
lhs = msat_make_and(menv, disc_t, msat_make_lt(menv, c, bound))
rhs = msat_make_and(menv, msat_make_equal(menv, x_bound, bound), x_dec_bound)
trans = msat_make_and(menv, trans,
msat_make_impl(menv, lhs, rhs))
# bound' > bound -> !x_dec_bound
lhs = msat_make_gt(menv, x_bound, bound)
rhs = msat_make_not(menv, x_dec_bound)
trans = msat_make_and(menv, trans,
msat_make_impl(menv, lhs, rhs))
# F G dec_bound
ltl = enc.make_F(enc.make_G(dec_bound))
return TermMap(curr2next), init, trans, ltl
| 37.784722 | 82 | 0.64547 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 255 | 0.046866 |
38a47549892bc58fb1a7bf130c9891fee8294b1b | 298 | py | Python | Python Examples/Example-FunctionWithNestedIFElse.py | crissyg/SCH-PRACTICE | 9f9042d26361a58c90e5d888e4bda30ada906bae | [
"MIT"
] | null | null | null | Python Examples/Example-FunctionWithNestedIFElse.py | crissyg/SCH-PRACTICE | 9f9042d26361a58c90e5d888e4bda30ada906bae | [
"MIT"
] | null | null | null | Python Examples/Example-FunctionWithNestedIFElse.py | crissyg/SCH-PRACTICE | 9f9042d26361a58c90e5d888e4bda30ada906bae | [
"MIT"
] | null | null | null | # Function with Nested IF Else
def printColor(value):
value = value.upper()
if (value == 'Y'):
print "yellow"
elif (value == 'B'):
print "blue"
elif (value == 'R'):
print "red"
else:
print "unknown"
printColor('r') # call function
| 21.285714 | 33 | 0.510067 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 87 | 0.291946 |
38a88169472e58b7b9fe7f0fe6d8780c412d1def | 299 | py | Python | estimators/__init__.py | eliberis/network-prop-estimator | 8a110652e11fa5484b715baf442f0efc4d281a15 | [
"MIT"
] | null | null | null | estimators/__init__.py | eliberis/network-prop-estimator | 8a110652e11fa5484b715baf442f0efc4d281a15 | [
"MIT"
] | null | null | null | estimators/__init__.py | eliberis/network-prop-estimator | 8a110652e11fa5484b715baf442f0efc4d281a15 | [
"MIT"
] | null | null | null | from estimators.weighted_edge_estimator import *
from estimators.weighted_node_estimator import *
from estimators.weighted_triangle_estimator import *
from estimators.formula_edge_estimator import *
from estimators.formula_node_estimator import *
from estimators.formula_triangle_estimator import *
| 42.714286 | 52 | 0.879599 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
38a9891c1ed5cf5b3c0c9127391a91763d0ff3d8 | 673 | py | Python | p3d/__init__.py | Derfies/p3d | 93ff52819d778643afe1dc1ccf296e99fc52c59d | [
"MIT"
] | null | null | null | p3d/__init__.py | Derfies/p3d | 93ff52819d778643afe1dc1ccf296e99fc52c59d | [
"MIT"
] | null | null | null | p3d/__init__.py | Derfies/p3d | 93ff52819d778643afe1dc1ccf296e99fc52c59d | [
"MIT"
] | null | null | null | from pandac.PandaModules import Vec3
P3D_VERSION = '0.1'
__version__ = P3D_VERSION
from constants import *
from functions import *
import commonUtils
from object import Object
from singleTask import SingleTask
from nodePathObject import NodePathObject
from pandaObject import *
from pandaBehaviour import PandaBehaviour
from pandaManager import PandaManager
from marquee import Marquee
from camera import *
from editorCamera import EditorCamera
from frameRate import FrameRate
from displayShading import DisplayShading
from mouse import *
from mousePicker import MousePicker
import geometry
try:
import wxPanda as wx
except:
print 'Failed to find wx module' | 21.03125 | 41 | 0.827637 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 31 | 0.046062 |
38abddcaea188dde02e036dee74516a15526f072 | 678 | py | Python | project/user/models/user.py | fv316/flask-template-project | 026459b299c7aa4d82c2b59b98e3c929b4786a78 | [
"MIT"
] | 9 | 2017-02-08T21:42:15.000Z | 2021-12-15T05:18:18.000Z | project/user/models/user.py | fv316/flask-template-project | 026459b299c7aa4d82c2b59b98e3c929b4786a78 | [
"MIT"
] | 10 | 2016-07-25T11:00:08.000Z | 2019-09-25T14:56:40.000Z | project/user/models/user.py | fv316/flask-template-project | 026459b299c7aa4d82c2b59b98e3c929b4786a78 | [
"MIT"
] | 7 | 2016-11-01T20:11:03.000Z | 2020-02-04T14:25:49.000Z | # !/usr/bin/python
# -*- coding: utf-8 -*-
from project.database import Base
from sqlalchemy.orm import relationship
from sqlalchemy import Column, Integer, String
from flask_login import UserMixin
from project.user.models.rbac_user_mixin import UserMixin as RBACUserMixin
class User(RBACUserMixin, UserMixin, Base):
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
username = Column(String(64), unique=True)
email = Column(String(128), unique=True)
password = Column(String(128))
api_key = Column(String(128))
roles = relationship('Role', secondary='user_roles')
def __repr__(self):
return '<User %r>' % self.username
| 29.478261 | 74 | 0.721239 | 402 | 0.59292 | 0 | 0 | 0 | 0 | 0 | 0 | 77 | 0.113569 |
38ad1f303439ed72643aaa239b597b2291139e70 | 1,477 | py | Python | test/convert_to_lemon.py | cltl/FrameNetNLTK | 96883447ff006a90becd24bcfdd96ac82d8ec677 | [
"Apache-2.0"
] | 1 | 2020-07-21T08:15:13.000Z | 2020-07-21T08:15:13.000Z | test/convert_to_lemon.py | cltl/FrameNetNLTK | 96883447ff006a90becd24bcfdd96ac82d8ec677 | [
"Apache-2.0"
] | 2 | 2020-07-14T09:15:34.000Z | 2021-03-31T20:00:29.000Z | test/convert_to_lemon.py | cltl/FrameNetNLTK | 96883447ff006a90becd24bcfdd96ac82d8ec677 | [
"Apache-2.0"
] | null | null | null | import sys
import os
sys.path.insert(0, '../..')
from nltk.corpus import framenet as fn
import FrameNetNLTK
from FrameNetNLTK import load, convert_to_lemon
my_fn = load(folder='test_lexicon',
verbose=2)
output_path = os.path.join(os.getcwd(),
'stats',
'dfn_0.1.ttl')
convert_to_lemon(lemon=FrameNetNLTK.lemon,
premon_nt_path=FrameNetNLTK.premon_nt,
ontolex=FrameNetNLTK.ontolex,
fn_pos_to_lexinfo=FrameNetNLTK.fn_pos_to_lexinfo,
your_fn=my_fn,
namespace='http://rdf.cltl.nl/dfn/',
namespace_prefix='dfn',
language='nld',
major_version=0,
minor_version=1,
output_path=output_path,
verbose=2)
output_path = os.path.join(os.getcwd(),
'stats',
'efn_1.7.ttl')
convert_to_lemon(lemon=FrameNetNLTK.lemon,
premon_nt_path=FrameNetNLTK.premon_nt,
ontolex=FrameNetNLTK.ontolex,
fn_pos_to_lexinfo=FrameNetNLTK.fn_pos_to_lexinfo,
your_fn=fn,
namespace='http://rdf.cltl.nl/efn/',
namespace_prefix='efn',
language='eng',
major_version=1,
minor_version=7,
output_path=output_path,
verbose=5) | 32.822222 | 66 | 0.528775 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 131 | 0.088693 |
38adf5ebd6f269f17c1fa14e7dbf39222e45d753 | 1,362 | py | Python | python/dataProcessing/generatePlots.py | Maplenormandy/list-62x | c1731d0610fdf9e58cb2792d706e8904c549fbd6 | [
"MIT"
] | 1 | 2020-11-07T12:40:59.000Z | 2020-11-07T12:40:59.000Z | python/dataProcessing/generatePlots.py | Maplenormandy/list-62x | c1731d0610fdf9e58cb2792d706e8904c549fbd6 | [
"MIT"
] | null | null | null | python/dataProcessing/generatePlots.py | Maplenormandy/list-62x | c1731d0610fdf9e58cb2792d706e8904c549fbd6 | [
"MIT"
] | null | null | null | import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from statsmodels.stats.weightstats import ttost_paired
data = pd.read_csv(open('combined_data.csv'))
for t in data.index:
if int(data.loc[t, 'Baseline']) == 0:
data.loc[t, 'STF Baseline'] = data.loc[t, 'Succesfully Tracked Features 0']
data.loc[t, 'STF Experiment'] = data.loc[t, 'Succesfully Tracked Features 1']
else:
data.loc[t, 'STF Baseline'] = data.loc[t, 'Succesfully Tracked Features 1']
data.loc[t, 'STF Experiment'] = data.loc[t, 'Succesfully Tracked Features 0']
pvalue, stats1, stats2 = ttost_paired(data['STF Experiment'], data['STF Baseline'], 0, 10000)
print pvalue
print stats1
print stats2
plt.scatter(data.index, data['STF Baseline'], label='baseline')
plt.scatter(data.index, data['STF Experiment'], color="green", label='experiment')
plt.legend(loc='upper right')
plt.draw()
dataMax = max(data['STF Baseline'].max(), data['STF Experiment'].max())
bins = np.linspace(0, dataMax)
plt.figure()
plt.hist(data['STF Baseline'], alpha = 0.5, bins=bins, label="baseline")
plt.hist(data['STF Experiment'], alpha = 0.5, bins=bins, label="experiment")
plt.legend(loc='upper right')
plt.draw()
plt.figure()
plt.hist(data['STF Experiment'] - data['STF Baseline'], bins=30, color="red")
plt.xlabel('Experiment - Baseline')
plt.show()
| 31.674419 | 93 | 0.696035 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 472 | 0.346549 |
38af66085e0a385bb524dc4be264dbe2d898daba | 1,829 | py | Python | zunzun/app/app.py | aprezcuba24/zunzun | cc294d9dfb84695be0ed1425cf946a0f4ea644a9 | [
"MIT"
] | null | null | null | zunzun/app/app.py | aprezcuba24/zunzun | cc294d9dfb84695be0ed1425cf946a0f4ea644a9 | [
"MIT"
] | null | null | null | zunzun/app/app.py | aprezcuba24/zunzun | cc294d9dfb84695be0ed1425cf946a0f4ea644a9 | [
"MIT"
] | null | null | null | import importlib
from zunzun import CommandRegister
from injector import inject, singleton
from click.core import Group
from zunzun import ListenerConnector
from zunzun import inspect
from pathlib import Path
@singleton
class App:
name = ""
listeners_config: list = []
@inject
def __init__(
self, command_register: CommandRegister, listener_connector: ListenerConnector
):
self.command_register = command_register
self.listener_connector = listener_connector
self._register_listeners()
def register_services(self, injector):
pass
def get_commands(self):
return self.command_register.add_commands(
Group(self.name), self.get_or_create_module("commands", "core.commands")
)
def _register_listeners(self):
for args in self.listeners_config:
self.listener_connector.connect(*args)
def get_config(self, name, default):
return default
def get_or_create_module(self, name, config_name=None):
if config_name:
name = self.get_config(config_name, name)
file = inspect.getfile(self.__class__)
parent = Path(file).parent
folder = Path(f"{parent}/{name}")
if not folder.is_dir():
folder.mkdir()
init_file = Path(f"{folder}/__init__.py")
if not init_file.is_file():
init_file.touch()
return importlib.import_module(f"..{name}", self.__module__)
@property
def path(self):
dotted_path = str(self.__module__)
dir_path, _ = dotted_path.rsplit(".", 1)
return dir_path
def get_module(self, module_name):
return importlib.import_module(self.get_module_name(module_name))
def get_module_name(self, module_name):
return f"{self.path}.{module_name}"
| 29.5 | 86 | 0.668671 | 1,606 | 0.878075 | 0 | 0 | 1,617 | 0.88409 | 0 | 0 | 110 | 0.060142 |
38aff846c9fd73e215fb8964d1e02ff8c3aed61f | 775 | py | Python | scitbx/examples/principal_axes_of_inertia.py | rimmartin/cctbx_project | 644090f9432d9afc22cfb542fc3ab78ca8e15e5d | [
"BSD-3-Clause-LBNL"
] | null | null | null | scitbx/examples/principal_axes_of_inertia.py | rimmartin/cctbx_project | 644090f9432d9afc22cfb542fc3ab78ca8e15e5d | [
"BSD-3-Clause-LBNL"
] | null | null | null | scitbx/examples/principal_axes_of_inertia.py | rimmartin/cctbx_project | 644090f9432d9afc22cfb542fc3ab78ca8e15e5d | [
"BSD-3-Clause-LBNL"
] | null | null | null | from __future__ import division
from scitbx.math import principal_axes_of_inertia
from scitbx.array_family import flex
def run():
points = flex.vec3_double([
( 8.292, 1.817, 6.147),
( 9.159, 2.144, 7.299),
(10.603, 2.331, 6.885),
(11.041, 1.811, 5.855),
( 9.061, 1.065, 8.369),
( 7.665, 0.929, 8.902),
( 6.771, 0.021, 8.327),
( 7.210, 1.756, 9.920),
( 5.480, -0.094, 8.796),
( 5.904, 1.649, 10.416),
( 5.047, 0.729, 9.831),
( 3.766, 0.589, 10.291),
(11.358, 2.999, 7.612)])
pai = principal_axes_of_inertia(points=points)
print pai.center_of_mass()
print pai.inertia_tensor()
es = pai.eigensystem()
print list(es.values())
print list(es.vectors())
if (__name__ == "__main__"):
run()
| 26.724138 | 49 | 0.579355 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 0.012903 |
38b0b6a64069394c466d60b288074902a86d98b8 | 8,071 | py | Python | ape.py | PhilRW/appdaemon-apps | 96d21f73d0cbb49a29799c1f7bf4c42d98b4be26 | [
"MIT"
] | 5 | 2019-01-30T17:23:30.000Z | 2021-12-27T20:46:13.000Z | ape.py | PhilRW/appdaemon-apps | 96d21f73d0cbb49a29799c1f7bf4c42d98b4be26 | [
"MIT"
] | 1 | 2019-10-02T20:52:45.000Z | 2019-10-02T20:52:45.000Z | ape.py | PhilRW/appdaemon-apps | 96d21f73d0cbb49a29799c1f7bf4c42d98b4be26 | [
"MIT"
] | null | null | null | import bisect
import calendar
import datetime
import pickle
import appdaemon.plugins.hass.hassapi as hass
class Event:
def __init__(self, dt, entity, new):
self.dt = dt
self.entity = entity
self.new = new
def __str__(self):
return "event({0}, {1}, {2})".format(self.dt, self.entity, self.new)
def __repr__(self):
return "<event({0}, {1}, {2})>".format(self.dt, self.entity, self.new)
class Monkey(hass.Hass):
DEBUG_LEVEL="DEBUG"
def initialize(self):
self.log("initialize()", level=Monkey.DEBUG_LEVEL)
self.log("args: {0}".format(self.args), level="INFO")
if "occupancy_state" in self.args \
and "entities" in self.args:
self.events_db = "/share/Monkey_events"
if "events_db" in self.args:
self.events_db = self.args["events_db"]
self.events = self.load(self.events_db)
self.log("post event-load", level=Monkey.DEBUG_LEVEL)
if self.events is None:
self.log("No events pickle file found, starting from scratch.", level="WARNING")
self.forget(None, None, None)
self.log("{0} events loaded".format(self.len_d(self.events)), level="INFO")
self.observations = []
self.do_handles = []
self.watching = None
self.listen_state(self.decide, self.args["occupancy_state"])
if "forget_event" in self.args:
self.listen_event(self.forget, self.args["forget_event"])
for e in self.args["entities"]:
self.listen_state(self.monkey_see, e)
self.exit_delay = 60
if "exit_delay" in self.args:
self.exit_delay = int(self.args["exit_delay"])
os = self.get_state(self.args["occupancy_state"])
self.decide(None, None, None, os, None)
else:
self.log("Missing required parameter(s). Cannot continue.", level="ERROR")
def decide(self, entity, attribute, old, new, kwargs):
self.log("decide({0}, {1}, {2}, {3}, {4})".format(entity, attribute, old, new, kwargs), level=Monkey.DEBUG_LEVEL)
if new == 'on':
# cancel all scheduled "do" callbacks
for h in self.do_handles:
self.cancel_timer(h)
self.log("cancelled {0} monkey_do handle(s)".format(len(self.do_handles)), level="INFO")
self.do_handles = []
# start observing
self.watching = True
elif new == 'off':
# delay to start doing things until things have settled
h = self.run_in(self.start_doing, self.exit_delay)
self.do_handles.append(h)
else:
self.log("{0} is {1}, nothing to see or do".format(self.args["occupancy_state"], new))
def start_doing(self, kwargs):
self.log("start_doing({0})".format(kwargs), level=Monkey.DEBUG_LEVEL)
# stop observing
self.watching = False
# remember anything we may have seen
self.remember()
# schedule callbacks to replay what happened
self.schedule_today(None)
when = datetime.time(0, 0)
h = self.run_daily(self.schedule_today, when)
self.do_handles.append(h)
def monkey_see(self, entity, attribute, old, new, kwargs):
self.log("monkey_see({0}, {1}, {2}, {3}, {4})".format(entity, attribute, old, new, kwargs), level=Monkey.DEBUG_LEVEL)
if self.watching and new != old:
self.log("appending event to observations...", level="INFO")
e = Event(datetime.datetime.now(), entity, new)
self.observations.append(e)
self.log("...{0} observation(s)...".format(len(self.observations)), level="INFO")
self.log("...{0}".format(self.observations), level=Monkey.DEBUG_LEVEL)
def monkey_do(self, kwargs):
self.log("do({0})".format(kwargs), level=Monkey.DEBUG_LEVEL)
evnt = kwargs["evnt"]
self.log("replaying {0}".format(evnt), level="INFO")
if evnt.new == "on":
self.turn_on(evnt.entity)
elif evnt.new == "off":
self.turn_off(evnt.entity)
else:
self.log("\"new\" was neither \"on\" nor \"off\": {0}".format(evnt.new), level="WARNING")
def remember(self):
self.log("remember()", level=Monkey.DEBUG_LEVEL)
self.log("{0} observations to remember...".format(len(self.observations)), level="INFO")
self.log("...{0}".format(self.observations), level=Monkey.DEBUG_LEVEL)
days = {}
for i in range(0, 7):
days[i] = []
for e in self.observations:
days[e.dt.weekday()].append(e)
self.log("observations as days: {0}".format(days), level=Monkey.DEBUG_LEVEL)
for i in range(0, 7):
try:
self.log("Remembering events from {0}...".format(calendar.day_name[i]), level="INFO")
left = bisect.bisect_left([e.dt.time() for e in self.events[i]], days[i][0].dt.time())
right = bisect.bisect_right([e.dt.time() for e in self.events[i]], days[i][-1].dt.time())
self.events[i] = self.events[i][:left] + days[i] + self.events[i][right:]
self.log("...{0} events for {1} now...".format(len(self.events), calendar.day_name[i]), level="INFO")
self.log("...{0}".format(self.events[i]), level=Monkey.DEBUG_LEVEL)
except IndexError:
self.log("...{0} has no events yet. Skipping.".format(calendar.day_name[i]), level="INFO")
self.save()
self.observations = []
def schedule_today(self, kwargs):
self.log("schedule_today({0})".format(kwargs), level=Monkey.DEBUG_LEVEL)
today = datetime.datetime.today().replace(hour=0, minute=0, second=0, microsecond=0)
scheduled_events = 0
skipped_events = 0
for e in self.events[today.weekday()]:
# TODO: add randomness
time = e.dt.time()
dt = datetime.datetime.combine(today.date(), time)
if dt > datetime.datetime.now() + datetime.timedelta(seconds=5):
h = self.run_at(self.monkey_do, dt, evnt=e)
self.do_handles.append(h)
self.log("scheduled event for {0}: {1}".format(dt, e), level="INFO")
scheduled_events += 1
else:
skipped_events += 1
self.log("event occurs in past, skipping ({0})...".format(time), level="INFO")
self.log("{0} events for today, {1} scheduled, {2} skipped".format(len(self.events[today.weekday()]), scheduled_events, skipped_events), level="INFO")
def forget(self, event_name, data, kwargs):
self.log("forget({0}, {1}, {2})".format(event_name, data, kwargs), level=Monkey.DEBUG_LEVEL)
self.events = {}
for i in range(0, 7):
self.events[i] = []
self.save()
def save(self):
self.log(msg="save()", level=Monkey.DEBUG_LEVEL)
self.log("saving {0} observations".format(self.len_d(self.events)), level="INFO")
with open(self.events_db + '.pkl', 'wb') as f:
pickle.dump(self.events, f, pickle.HIGHEST_PROTOCOL)
def load(self, name):
self.log(msg="load({0})".format(name), level=Monkey.DEBUG_LEVEL)
self.log("loading observations", level="INFO")
try:
with open(name + '.pkl', 'rb') as f:
self.log("loaded", level=Monkey.DEBUG_LEVEL)
return pickle.load(f)
except FileNotFoundError as fnfe:
self.log("file not found: {0}".format(fnfe), level="WARN")
return None
except EOFError as eofe:
self.log("error opening file: {0}".format(eofe), level="ERROR")
return None
@staticmethod
def len_d(d):
return sum([len(d[k]) for k in d.keys()])
def terminate(self):
self.log("terminate()", level=Monkey.DEBUG_LEVEL)
self.remember()
| 37.365741 | 158 | 0.575022 | 7,958 | 0.985999 | 0 | 0 | 81 | 0.010036 | 0 | 0 | 1,580 | 0.195763 |
38b116711be814607ed2866ab771fa7d05349727 | 807 | py | Python | main/migrations/0013_game.py | AyushHazard/Samskritam | c5db8e712afe24737cacc6e6f3f27e3fcbe83e26 | [
"MIT"
] | null | null | null | main/migrations/0013_game.py | AyushHazard/Samskritam | c5db8e712afe24737cacc6e6f3f27e3fcbe83e26 | [
"MIT"
] | null | null | null | main/migrations/0013_game.py | AyushHazard/Samskritam | c5db8e712afe24737cacc6e6f3f27e3fcbe83e26 | [
"MIT"
] | 3 | 2021-01-05T18:40:57.000Z | 2021-05-14T07:56:20.000Z | # Generated by Django 3.1.2 on 2021-01-01 05:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0012_attempted_contests'),
]
operations = [
migrations.CreateModel(
name='Game',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('game_type', models.CharField(max_length=200)),
('description', models.TextField(default='-')),
('imageurl', models.TextField(blank=True, default='-', null=True)),
('imagename', models.TextField(blank=True, default='-', null=True)),
],
),
]
| 32.28 | 114 | 0.567534 | 714 | 0.884758 | 0 | 0 | 0 | 0 | 0 | 0 | 152 | 0.188352 |
38b351b78225843bd3597a610a0f89e29687ff5d | 2,224 | py | Python | genome_designer/debug/2014_08_05_de_novo_on_dep_data_with_intervals.py | churchlab/millstone | ddb5d003a5b8a7675e5a56bafd5c432d9642b473 | [
"MIT"
] | 45 | 2015-09-30T14:55:33.000Z | 2021-06-28T02:33:30.000Z | genome_designer/debug/2014_08_05_de_novo_on_dep_data_with_intervals.py | churchlab/millstone | ddb5d003a5b8a7675e5a56bafd5c432d9642b473 | [
"MIT"
] | 261 | 2015-06-03T20:41:56.000Z | 2022-03-07T08:46:10.000Z | genome_designer/debug/2014_08_05_de_novo_on_dep_data_with_intervals.py | churchlab/millstone | ddb5d003a5b8a7675e5a56bafd5c432d9642b473 | [
"MIT"
] | 22 | 2015-06-04T20:43:10.000Z | 2022-02-27T08:27:34.000Z | """
Re-running de novo assembly, this time including reads that map to mobile elements.
"""
import os
import sys
# Setup Django environment.
sys.path.append(
os.path.join(os.path.dirname(os.path.realpath(__file__)), '../'))
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
from Bio import SeqIO
from experimental.de_novo_assembly import run_velvet
from main.models import *
def identify_intervals(ag):
# First identify intervals that map to mobile elements.
genbank_filepath = get_dataset_with_type(ag.reference_genome,
Dataset.TYPE.REFERENCE_GENOME_GENBANK).get_absolute_location()
# Extract the proper genome record.
genome_record = None
with open(genbank_filepath) as input_fh:
genome_record_list = SeqIO.parse(input_fh, 'genbank')
for rec in genome_record_list:
if rec.name == 'CP006698':
genome_record = rec
assert genome_record
# Pick out the intervals we want:
# * mobile elements
# * lon gene
intervals = []
found_lon = False
for f in genome_record.features:
if f.type == 'mobile_element':
intervals.append((f.location.start, f.location.end))
if (f.type == 'gene' and 'gene' in f.qualifiers and
f.qualifiers['gene'][0] in ['lon', 'clpX']):
found_lon = True
intervals.append((f.location.start, f.location.end))
assert found_lon
assert 48 == len(intervals)
# Add buffer to each interval in case reads start before or after.
buffer_size = 150
def _add_buffer(i):
return (
max(i[0] - buffer_size, 0),
min(i[1] + buffer_size, len(genome_record))
)
intervals = [_add_buffer(i) for i in intervals]
return intervals
def main():
ag = AlignmentGroup.objects.get(uid='edc74a3d')
intervals = identify_intervals(ag)
for idx, sa in enumerate(ag.experimentsampletoalignment_set.all()):
print idx + 1, 'of', ag.experimentsampletoalignment_set.count()
run_velvet(sa, force_include_reads_in_intervals=intervals,
output_dir_name='velvet_mobile_lon_clpX', force_rerun=True)
if __name__ == '__main__':
main()
| 30.054054 | 83 | 0.659173 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 495 | 0.222572 |
38b43f59bf0131f94f4000fe15af73705057fab7 | 288 | py | Python | P942.py | Muntaha-Islam0019/Leetcode-Solutions | 0bc56ce43a6d8ad10461b69078166a2a5b913e7f | [
"MIT"
] | null | null | null | P942.py | Muntaha-Islam0019/Leetcode-Solutions | 0bc56ce43a6d8ad10461b69078166a2a5b913e7f | [
"MIT"
] | null | null | null | P942.py | Muntaha-Islam0019/Leetcode-Solutions | 0bc56ce43a6d8ad10461b69078166a2a5b913e7f | [
"MIT"
] | null | null | null | class Solution:
def diStringMatch(self, S):
low,high=0,len(S)
ans=[]
for i in S:
if i=="I":
ans.append(low)
low+=1
else:
ans.append(high)
high-=1
return ans +[low]
| 22.153846 | 32 | 0.381944 | 287 | 0.996528 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.010417 |
38b594515a9bd74963aec29f7d6581b2994b7f2f | 155 | py | Python | controle/admin.py | jeremyrodrigues/auto-ambient-music | a8f622334f921741e0011ef305ac8e991f361d35 | [
"MIT"
] | null | null | null | controle/admin.py | jeremyrodrigues/auto-ambient-music | a8f622334f921741e0011ef305ac8e991f361d35 | [
"MIT"
] | null | null | null | controle/admin.py | jeremyrodrigues/auto-ambient-music | a8f622334f921741e0011ef305ac8e991f361d35 | [
"MIT"
] | null | null | null | from django.contrib import admin
from controle.models import Time, Music
# Register your models here.
admin.site.register(Time)
admin.site.register(Music) | 25.833333 | 39 | 0.812903 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 28 | 0.180645 |
38b68afd1f09515025017ea53a4f82fcb67e1ec1 | 3,802 | py | Python | src/fvm/scripts/Output.py | drm42/fvm-drm | c9b940e593034f1aa3020d63ff1e09ebef9c182a | [
"MIT"
] | null | null | null | src/fvm/scripts/Output.py | drm42/fvm-drm | c9b940e593034f1aa3020d63ff1e09ebef9c182a | [
"MIT"
] | null | null | null | src/fvm/scripts/Output.py | drm42/fvm-drm | c9b940e593034f1aa3020d63ff1e09ebef9c182a | [
"MIT"
] | null | null | null | import os
import pdb
import fvm.models_atyped_double as models
import fvm.exporters_atyped_double as exporters
class Output():
def __init__(self, outputDir, probeIndex, sim):
if os.path.isdir(outputDir) == False:
os.mkdir(outputDir)
self.defFile = open(outputDir + 'deformation.dat', 'a')
self.forceFile = open(outputDir + 'force.dat', 'a')
self.voltageFile = open(outputDir + 'voltage.dat', 'a')
self.sim = sim
self.probeIndex = probeIndex
self.outputDir = outputDir
def finish(self):
self.defFile.close()
self.forceFile.close()
self.voltageFile.close()
def writeData(self):
globalTime = self.sim.globalTime
timeStep = self.sim.timeStep
deformation = self.sim.deformation
maxDef = deformation.min(axis = 0)
self.defFile.write('%e\t%e\t%e\t' % (globalTime, timeStep,maxDef[2]))
for i in range(0, len(self.probeIndex)):
self.defFile.write('%e\t' % deformation[self.probeIndex[i]][2])
self.defFile.write('\n')
self.defFile.flush()
vel = self.sim.velocity
acc = self.sim.acceleration
eForce = self.sim.elecForceSum
fForce = self.sim.flowForceSum
cForce = self.sim.contactForceSum
self.forceFile.write('%e\t' % globalTime)
for i in range(0, len(self.probeIndex)):
self.forceFile.write('%e\t' % vel[self.probeIndex[i]][2])
self.forceFile.write('%e\t%e\t%e\n' % (eForce, fForce, cForce))
self.forceFile.flush()
voltage = self.sim.voltage
self.voltageFile.write('%e\t%e\n' % (globalTime, voltage))
self.voltageFile.flush()
def saveFluidVTK(self, n):
geomFields = self.sim.geomFields
fluidMeshes = self.sim.fluidMeshes
elecFields = self.sim.elecFields
if self.sim.enableFlowModel:
flowFields = self.sim.flowFields
writer = exporters.VTKWriterA(geomFields,fluidMeshes,
self.outputDir + "fluid-" + str(n) + ".vtk",
"gen5_fluid",
False,0)
writer.init()
writer.writeScalarField(elecFields.potential,"potential")
writer.writeVectorField(elecFields.electric_field,"potentialgradient")
if self.sim.enableFlowModel:
writer.writeVectorField(flowFields.velocity,"velocity")
writer.writeScalarField(flowFields.pressure, "pressure")
writer.finish()
def saveBeamVTK(self, n):
geomFields = self.sim.geomFields
solidMeshes = self.sim.solidMeshes
plateFields = self.sim.plateFields
writer = exporters.VTKWriterA(geomFields,solidMeshes,
self.outputDir + "beam-" + str(n) + ".vtk",
"gen5_beam",
False,0)
writer.init()
writer.writeVectorField(plateFields.deformation,"deformation")
writer.writeScalarField(plateFields.force, "force")
writer.finish()
def saveBeamBoundaryVTK(self, n):
geomFields = self.sim.geomFields
solidBoundaryMeshes = self.sim.solidBoundaryMeshes
writer3 = exporters.VTKWriterA(geomFields,solidBoundaryMeshes,
self.outputDir + "beamBoundary-" + str(n) + ".vtk",
"beam Boundary",
False,0,True)
writer3.init()
#writer3.writeVectorField(flowFields.velocity,"velocity")
#writer3.writeVectorField(flowFields.force,"flow_force")
#writer3.writeVectorField(elecFields.force,"elec_force")
writer3.finish()
| 38.795918 | 85 | 0.588638 | 3,685 | 0.969227 | 0 | 0 | 0 | 0 | 0 | 0 | 435 | 0.114413 |
38b765a30bc55c0417892d2304fc6cfeafcf844e | 2,663 | py | Python | forecasting/src/autogluon/forecasting/trainer/auto_trainer.py | sgdread/autogluon | fa95c72a07066dc5380fccf8bbce04b5c031fc68 | [
"Apache-2.0"
] | null | null | null | forecasting/src/autogluon/forecasting/trainer/auto_trainer.py | sgdread/autogluon | fa95c72a07066dc5380fccf8bbce04b5c031fc68 | [
"Apache-2.0"
] | null | null | null | forecasting/src/autogluon/forecasting/trainer/auto_trainer.py | sgdread/autogluon | fa95c72a07066dc5380fccf8bbce04b5c031fc68 | [
"Apache-2.0"
] | null | null | null | import logging
from typing import Dict, Union, Optional, Any
from ..models.presets import get_preset_models
from .abstract_trainer import AbstractForecastingTrainer, TimeSeriesDataFrame
logger = logging.getLogger(__name__)
class AutoForecastingTrainer(AbstractForecastingTrainer):
def construct_model_templates(self, hyperparameters, **kwargs):
path = kwargs.pop("path", self.path)
eval_metric = kwargs.pop("eval_metric", self.eval_metric)
quantile_levels = kwargs.pop("quantile_levels", self.quantile_levels)
hyperparameter_tune = kwargs.get("hyperparameter_tune", False)
return get_preset_models(
path=path,
eval_metric=eval_metric,
prediction_length=self.prediction_length,
freq=self.freq,
hyperparameters=hyperparameters,
hyperparameter_tune=hyperparameter_tune,
quantiles=quantile_levels,
invalid_model_names=self._get_banned_model_names(),
)
# todo: implement cross-validation / holdout strategy
# todo: including CVSplitter logic
def fit(
self,
train_data: TimeSeriesDataFrame,
hyperparameters: Union[str, Dict[Any, Dict]],
val_data: Optional[TimeSeriesDataFrame] = None,
hyperparameter_tune: bool = False,
time_limit: float = None,
infer_limit: float = None, # todo: implement
):
"""
Fit a set of forecasting models specified by the `hyperparameters`
dictionary that maps model names to their specified hyperparameters.
Parameters
----------
train_data: TimeSeriesDataFrame
Training data for fitting time series forecasting models.
hyperparameters: str or Dict
A dictionary mapping selected model names, model classes or model factory to hyperparameter
settings. Model names should be present in `trainer.presets.DEFAULT_MODEL_NAMES`. Optionally,
the user may provide one of "toy", "toy_hpo", "default", "default_hpo" to specify
presets.
val_data: TimeSeriesDataFrame
Optional validation data set to report validation scores on.
hyperparameter_tune
Whether to perform hyperparameter tuning when learning individual models.
time_limit
Time limit for training
infer_limit
Time limit for inference
"""
self._train_multi(
train_data,
val_data=val_data,
hyperparameters=hyperparameters,
hyperparameter_tune=hyperparameter_tune,
time_limit=time_limit,
)
| 39.746269 | 105 | 0.66917 | 2,435 | 0.914382 | 0 | 0 | 0 | 0 | 0 | 0 | 1,176 | 0.441607 |
38bb581927bfd74653d9371508053b5cbf15396a | 28 | py | Python | textbot/action.py | sparwow/textbot | cad7ad310da8af9c826e4c52f1a8f27ae90c1462 | [
"MIT"
] | null | null | null | textbot/action.py | sparwow/textbot | cad7ad310da8af9c826e4c52f1a8f27ae90c1462 | [
"MIT"
] | null | null | null | textbot/action.py | sparwow/textbot | cad7ad310da8af9c826e4c52f1a8f27ae90c1462 | [
"MIT"
] | null | null | null | class EmailAction:
pass
| 9.333333 | 18 | 0.714286 | 27 | 0.964286 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
38bc9b8fe1468f99ebc3819879722d06bb84ef06 | 3,895 | py | Python | snippet/example/python/sqlalchemy-orm-model.py | yp2800/snippet | 054af596655007cbec81340bd166489e706fffe6 | [
"MIT"
] | 94 | 2016-09-22T09:13:19.000Z | 2022-03-30T07:35:35.000Z | snippet/example/python/sqlalchemy-orm-model.py | yp2800/snippet | 054af596655007cbec81340bd166489e706fffe6 | [
"MIT"
] | 1 | 2020-11-22T03:05:05.000Z | 2020-11-22T03:05:05.000Z | snippet/example/python/sqlalchemy-orm-model.py | yp2800/snippet | 054af596655007cbec81340bd166489e706fffe6 | [
"MIT"
] | 38 | 2017-06-11T22:03:04.000Z | 2022-03-10T07:46:39.000Z | # Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Piston Cloud Computing, Inc.
# Copyright 2012 Cloudscaling Group, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy.orm import object_mapper
try:
import six
Iterator = six.Iterator
except ImportError:
import sys
if sys.version_info[0] >= 3:
Iterator = object
else:
class Iterator(object):
def next(self):
return type(self).__next__(self)
class ModelBase(Iterator):
"""Base class for models."""
__table_initialized__ = False
def save(self, session):
"""Save this object."""
# NOTE(boris-42): This part of code should be look like:
# session.add(self)
# session.flush()
# But there is a bug in sqlalchemy and eventlet that
# raises NoneType exception if there is no running
# transaction and rollback is called. As long as
# sqlalchemy has this bug we have to create transaction
# explicitly.
with session.begin(subtransactions=True):
session.add(self)
session.flush()
def __repr__(self):
attrs = ", ".join(("%s=%s" % (k, v) for k, v in self.items()))
return "%s(%s)" % (self.__tablename__.title(), attrs)
def __setitem__(self, key, value):
setattr(self, key, value)
def __getitem__(self, key):
return getattr(self, key)
def __contains__(self, key):
# Don't use hasattr() because hasattr() catches any exception, not only
# AttributeError. We want to passthrough SQLAlchemy exceptions
# (ex: sqlalchemy.orm.exc.DetachedInstanceError).
try:
getattr(self, key)
except AttributeError:
return False
else:
return True
def get(self, key, default=None):
return getattr(self, key, default)
def __iter__(self):
columns = list(dict(object_mapper(self).columns).keys())
return ModelIterator(self, iter(columns))
def update(self, values):
"""Make the model object behave like a dict."""
for k, v in values.items():
setattr(self, k, v)
def _as_dict(self):
"""Make the model object behave like a dict.
Includes attributes from joins.
"""
local = dict((key, value) for key, value in self)
joined = dict([(k, v) for k, v in self.__dict__.items() if not k[0] == '_'])
local.update(joined)
return local
def items(self):
"""Make the model object behave like a dict."""
return self._as_dict().items()
def keys(self):
"""Make the model object behave like a dict."""
return [key for key, value in self.items()]
class ModelIterator(Iterator):
def __init__(self, model, columns):
self.model = model
self.i = columns
def __iter__(self):
return self
# In Python 3, __next__() has replaced next().
def __next__(self):
n = next(self.i)
return n, getattr(self.model, n)
| 33.577586 | 84 | 0.610013 | 2,805 | 0.720154 | 0 | 0 | 0 | 0 | 0 | 0 | 1,835 | 0.471117 |
38bd28fcad4376d276bb778bd1eda275fd9ee34f | 4,417 | py | Python | boofuzz/boofuzz/connections/raw_l3_socket_connection.py | mrTavas/owasp-fstm-auto | 6e9ff36e46d885701c7419db3eca15f12063a7f3 | [
"CC0-1.0"
] | 2 | 2021-05-05T12:03:01.000Z | 2021-06-04T14:27:15.000Z | boofuzz/boofuzz/connections/raw_l3_socket_connection.py | mrTavas/owasp-fstm-auto | 6e9ff36e46d885701c7419db3eca15f12063a7f3 | [
"CC0-1.0"
] | null | null | null | boofuzz/boofuzz/connections/raw_l3_socket_connection.py | mrTavas/owasp-fstm-auto | 6e9ff36e46d885701c7419db3eca15f12063a7f3 | [
"CC0-1.0"
] | 2 | 2021-05-05T12:03:09.000Z | 2021-06-04T14:27:21.000Z | from __future__ import absolute_import
import errno
import socket
import sys
from future.utils import raise_
from boofuzz import exception
from boofuzz.connections import base_socket_connection
ETH_P_ALL = 0x0003 # Ethernet protocol: Every packet, see Linux if_ether.h docs for more details.
ETH_P_IP = 0x0800 # Ethernet protocol: Internet Protocol packet, see Linux <net/if_ether.h> docs for more details.
class RawL3SocketConnection(base_socket_connection.BaseSocketConnection):
"""BaseSocketConnection implementation for use with Raw Layer 2 Sockets.
.. versionadded:: 0.2.0
Args:
interface (str): Interface to send and receive on.
send_timeout (float): Seconds to wait for send before timing out. Default 5.0.
recv_timeout (float): Seconds to wait for recv before timing out. Default 5.0.
ethernet_proto (int): Ethernet protocol to bind to. Defaults to ETH_P_IP (0x0800).
l2_dst (bytes): Layer2 destination address (e.g. MAC address). Default b'\xFF\xFF\xFF\xFF\xFF\xFF' (broadcast)
packet_size (int): Maximum packet size (in bytes). Default 1500 if the underlying interface uses
standard ethernet for layer 2. Otherwise, a different packet size may apply (e.g. Jumboframes,
802.5 Token Ring, 802.11 wifi, ...) that must be specified.
"""
def __init__(
self,
interface,
send_timeout=5.0,
recv_timeout=5.0,
ethernet_proto=ETH_P_IP,
l2_dst=b"\xff" * 6,
packet_size=1500,
):
super(RawL3SocketConnection, self).__init__(send_timeout, recv_timeout)
self.interface = interface
self.ethernet_proto = ethernet_proto
self.l2_dst = l2_dst
self.packet_size = packet_size
def open(self):
self._sock = socket.socket(socket.AF_PACKET, socket.SOCK_DGRAM, socket.htons(self.ethernet_proto))
self._sock.bind((self.interface, self.ethernet_proto))
super(RawL3SocketConnection, self).open()
def recv(self, max_bytes):
"""
Receives a packet from the raw socket. If max_bytes < packet_size, only the first max_bytes are returned and
the rest of the packet is discarded. Otherwise, return the whole packet.
Args:
max_bytes (int): Maximum number of bytes to return. 0 to return the whole packet.
Returns:
Received data
"""
data = b""
try:
data = self._sock.recv(self.packet_size)
if 0 < max_bytes < self.packet_size:
data = data[: self._packet_size]
except socket.timeout:
data = b""
except socket.error as e:
if e.errno == errno.ECONNABORTED:
raise_(
exception.BoofuzzTargetConnectionAborted(socket_errno=e.errno, socket_errmsg=e.strerror),
None,
sys.exc_info()[2],
)
elif e.errno in [errno.ECONNRESET, errno.ENETRESET, errno.ETIMEDOUT]:
raise_(exception.BoofuzzTargetConnectionReset(), None, sys.exc_info()[2])
elif e.errno == errno.EWOULDBLOCK:
data = b""
else:
raise
return data
def send(self, data):
"""
Send data to the target. Only valid after calling open!
Data will be trunctated to self.packet_size (Default: 1500
bytes).
Args:
data: Data to send.
Returns:
int: Number of bytes actually sent.
"""
num_sent = 0
data = data[: self.packet_size]
try:
num_sent = self._sock.sendto(data, (self.interface, self.ethernet_proto, 0, 0, self.l2_dst))
except socket.error as e:
if e.errno == errno.ECONNABORTED:
raise_(
exception.BoofuzzTargetConnectionAborted(socket_errno=e.errno, socket_errmsg=e.strerror),
None,
sys.exc_info()[2],
)
elif e.errno in [errno.ECONNRESET, errno.ENETRESET, errno.ETIMEDOUT, errno.EPIPE]:
raise_(exception.BoofuzzTargetConnectionReset(), None, sys.exc_info()[2])
else:
raise
return num_sent
@property
def info(self):
return "{0}, type 0x{1:04x}".format(self.interface, self.ethernet_proto)
| 34.779528 | 118 | 0.617161 | 4,001 | 0.905818 | 0 | 0 | 110 | 0.024904 | 0 | 0 | 1,699 | 0.38465 |
38bdb7392ff396c9dcaf7942ad720334e0d7365e | 4,143 | py | Python | django_google_dork/migrations/0001_initial.py | chgans/django-google-dork | c8735f2d2a9740844001cf4430263ea79827102f | [
"BSD-2-Clause"
] | 1 | 2019-07-21T02:32:03.000Z | 2019-07-21T02:32:03.000Z | django_google_dork/migrations/0001_initial.py | chgans/django-google-dork | c8735f2d2a9740844001cf4430263ea79827102f | [
"BSD-2-Clause"
] | null | null | null | django_google_dork/migrations/0001_initial.py | chgans/django-google-dork | c8735f2d2a9740844001cf4430263ea79827102f | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django_google_dork.models
import model_utils.fields
import django.utils.timezone
class Migration(migrations.Migration):
replaces = [('django_google_dork', '0001_initial'), ('django_google_dork', '0002_auto_20141116_1551'), ('django_google_dork', '0003_run_engine')]
dependencies = [
]
operations = [
migrations.CreateModel(
name='Campaign',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name='created', editable=False)),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name='modified', editable=False)),
('name', django_google_dork.models.CampaignNameField(unique=True, max_length=32)),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Dork',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name='created', editable=False)),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name='modified', editable=False)),
('query', django_google_dork.models.DorkQueryField(max_length=256)),
('campaign', models.ForeignKey(to='django_google_dork.Campaign')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Result',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=1024)),
('summary', models.TextField()),
('url', models.URLField(max_length=1024)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Run',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('dork', models.ForeignKey(to='django_google_dork.Dork')),
('result_set', models.ManyToManyField(to='django_google_dork.Result')),
],
options={
},
bases=(models.Model,),
),
migrations.AlterUniqueTogether(
name='result',
unique_together=set([('title', 'summary', 'url')]),
),
migrations.AlterUniqueTogether(
name='dork',
unique_together=set([('campaign', 'query')]),
),
migrations.CreateModel(
name='SearchEngine',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('hostname', models.CharField(unique=True, max_length=32)),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='campaign',
name='enabled',
field=models.BooleanField(default=True),
preserve_default=True,
),
migrations.AddField(
model_name='dork',
name='enabled',
field=models.BooleanField(default=True),
preserve_default=True,
),
migrations.AddField(
model_name='run',
name='engine',
field=models.ForeignKey(default=None, to='django_google_dork.SearchEngine'),
preserve_default=False,
),
]
| 39.457143 | 149 | 0.567946 | 3,946 | 0.95245 | 0 | 0 | 0 | 0 | 0 | 0 | 602 | 0.145305 |
38be80c430f81bba9147dbcca0e967396cdc2c5c | 5,314 | py | Python | model2.py | incredible-vision/show-and-tell | 0a10c2064c34dbc4a4097976870922f723ee4d63 | [
"MIT"
] | 8 | 2018-04-25T11:07:36.000Z | 2020-07-14T09:17:58.000Z | model2.py | incredible-vision/show-and-tell | 0a10c2064c34dbc4a4097976870922f723ee4d63 | [
"MIT"
] | null | null | null | model2.py | incredible-vision/show-and-tell | 0a10c2064c34dbc4a4097976870922f723ee4d63 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
from torch.autograd import Variable
from torchvision.models import vgg16
from torch.nn.utils.rnn import pack_padded_sequence
class ShowAttendTellModel(nn.Module):
def __init__(self, hidden_size, context_size, vocab_size, embed_size, opt, feature_size=[196, 512]):
super(ShowAttendTellModel, self).__init__()
""" define encoder, use resnet50 for reproducing """
self.opt = opt
self.encoder = vgg16(pretrained=True)
self.encoder = nn.Sequential(*list(self.encoder.features)[:-3])
self.finetune(allow=False)
""" define weight parameters """
self.image_att_w = nn.Parameter(torch.FloatTensor(feature_size[1], feature_size[1]))
self.init_hidden = nn.Linear(feature_size[1], hidden_size, bias=True)
self.init_memory = nn.Linear(feature_size[1], hidden_size, bias=True)
self.weight_hh = nn.Linear(hidden_size, context_size)
self.weight_att= nn.Parameter(torch.FloatTensor(feature_size[1], 1))
""" define decoder, use lstm cell for reproducing """
self.embedding = nn.Embedding(vocab_size, embed_size)
self.lstmcell = nn.LSTMCell(hidden_size , hidden_size)
""" define classifier """
self.context2out= nn.Linear(context_size, embed_size)
self.hidden2tout= nn.Linear(hidden_size, embed_size)
self.dropout = nn.Dropout(p=0.5)
self.classifier = nn.Linear(embed_size, vocab_size)
def forward(self, images, captions, lengths):
embeddings = self.embedding(captions)
packed, batch_sizes = pack_padded_sequence(embeddings, lengths, batch_first=True)
""" put input data through cnn """
features = self.encoder(images) # [batch, 512, 14, 14]
features = features.view(features.size(0), features.size(1), -1).transpose(2, 1) # [batch, 196, 512]
context_encode = torch.bmm(features, self.image_att_w.unsqueeze(0).expand(features.size(0), self.image_att_w.size(0), self.image_att_w.size(1))) # [batch, 196, 512]
""" initialize hidden and memory unit"""
hidden, c = self.init_lstm(features)
alpha_list = []
hiddens = []
outputs = []
for t, batch_size in enumerate(batch_sizes):
embedding = embeddings[:batch_size, t, :]
context, alpha = self.attention_layer(features[:batch_size], context_encode[:batch_size], hidden[:batch_size])
rnn_input = torch.cat([embedding, context], dim=1)
hidden, c = self.lstmcell(rnn_input, (hidden[:batch_size], c[:batch_size]))
output = self.output_layer(context, hidden)
alpha_list.append(alpha)
hiddens.append(hidden)
outputs.append(output)
outputs = torch.cat(outputs, dim=0)
return outputs
def init_lstm(self, features):
features_mean = features.mean(1).squeeze(1)
h = self.init_hidden(features_mean)
c = self.init_memory(features_mean)
return h, c
def attention_layer(self, features, context_encode, hidden):
h_att = F.tanh(context_encode + self.weight_hh(hidden).unsqueeze(1).expand_as(context_encode))
out_att = torch.bmm(h_att, self.weight_att.unsqueeze(0).expand(h_att.size(0), self.weight_att.size(0), self.weight_att.size(1))).squeeze(2)
alpha = F.softmax(out_att)
context = (features * alpha.unsqueeze(2).expand_as(features)).mean(1).squeeze(1)
return context, alpha
def output_layer(self, context, hidden, prev=None):
context = self.context2out(context)
hidden = self.hidden2tout(hidden)
out = self.classifier(context + hidden)
return out
def finetune(self, allow=False):
for param in self.encoder.parameters():
param.requires_grad = True if allow else False
def sample(self, images, states):
""""""
embeddings = self.embedding(Variable(torch.ones(images.size(0))).long().cuda())
"""Samples captions for given image features (Greedy search)."""
sampled_ids = []
features = self.encoder(images) # [batch, 512, 14, 14]
features = features.view(features.size(0), features.size(1), -1).transpose(2, 1) # [batch, 196, 512]
context_encode = torch.bmm(features, self.image_att_w.unsqueeze(0).expand(features.size(0), self.image_att_w.size(0), self.image_att_w.size(1))) # [batch, 196, 512]
hidden , c = states
for i in range(20): # maximum sampling length
context, alpha = self.attention_layer(features, context_encode, hidden)
if i == 0:
rnn_input = torch.cat([embeddings, context], dim=1)
hidden, c = self.lstmcell(rnn_input, (hidden, c)) # (batch_size, 1, hidden_size)
outputs = self.output_layer(context, hidden) # (batch_size, vocab_size)
predicted = outputs.max(1)[1]
sampled_ids.append(predicted)
embedding = self.embedding(predicted).squeeze(1)
rnn_input = torch.cat([embedding, context], dim=1)
sampled_ids = torch.cat(sampled_ids, 1) # (batch_size, 20)
return sampled_ids.squeeze()
def sample_beam(self, images, state, beam_size):
"""""" | 46.614035 | 173 | 0.652804 | 5,092 | 0.958224 | 0 | 0 | 0 | 0 | 0 | 0 | 531 | 0.099925 |
38be9241383135c31416fcbdb7bbbe1661a5308b | 770 | py | Python | src/fruit_castle/hadwin/hadwin.py | brownboycodes/common-api-server | a3cf92395de31a3dd0c927003e7919d3c74c300f | [
"MIT"
] | 2 | 2021-11-15T06:04:00.000Z | 2021-12-30T11:45:34.000Z | src/fruit_castle/hadwin/hadwin.py | brownboycodes/common-api-server | a3cf92395de31a3dd0c927003e7919d3c74c300f | [
"MIT"
] | null | null | null | src/fruit_castle/hadwin/hadwin.py | brownboycodes/common-api-server | a3cf92395de31a3dd0c927003e7919d3c74c300f | [
"MIT"
] | null | null | null | from flask import Blueprint, abort, jsonify, render_template
from src.fruit_castle.hadwin.utilities import get_json_data
from .v1.version_1 import v1
from .v2.version_2 import v2
from .v3.version_3 import v3
hadwin = Blueprint('hadwin', __name__, url_prefix='/hadwin',static_url_path='/dist',
static_folder='../client/dist', template_folder='client')
hadwin.register_blueprint(v1)
hadwin.register_blueprint(v2)
hadwin.register_blueprint(v3)
@hadwin.route("/")
def hadwin_home():
# return render_template("dashboard.html", py_sent_data="hadwin concept data")
abort(401)
@hadwin.route('/app')
def hadwin_about_app():
retrieved_file_data = get_json_data(
"src/data/hadwin/about_the_app.json")
return jsonify(retrieved_file_data)
| 28.518519 | 85 | 0.755844 | 0 | 0 | 0 | 0 | 307 | 0.398701 | 0 | 0 | 171 | 0.222078 |
38bf08dd063a876b0519dcc7594eeaa4f9ce3eaf | 636 | py | Python | tests/formatting/catch_for_formatting_tests.py | friendly-traceback/friendly-traceback | 4f6785f14c271a4d6412ef19c140f9d380cdbcbf | [
"MIT"
] | 45 | 2021-07-06T03:30:20.000Z | 2022-03-16T17:30:58.000Z | tests/formatting/catch_for_formatting_tests.py | friendly-traceback/friendly-traceback | 4f6785f14c271a4d6412ef19c140f9d380cdbcbf | [
"MIT"
] | 110 | 2021-06-28T11:48:46.000Z | 2022-03-25T20:41:25.000Z | tests/formatting/catch_for_formatting_tests.py | friendly-traceback/friendly-traceback | 4f6785f14c271a4d6412ef19c140f9d380cdbcbf | [
"MIT"
] | 4 | 2021-07-05T20:56:39.000Z | 2021-11-11T20:24:34.000Z | import pytest
import friendly_traceback
from friendly_traceback.console_helpers import _get_info
from ..syntax_errors_formatting_cases import descriptions
friendly_traceback.set_lang("en")
where = "parsing_error_source"
cause = "cause"
@pytest.mark.parametrize("filename", descriptions.keys())
def test_syntax_errors(filename):
expected = descriptions[filename]
try:
exec("from . import %s" % filename)
except SyntaxError:
friendly_traceback.explain_traceback(redirect="capture")
info = _get_info()
assert expected[where] == info[where] # noqa
assert expected[cause] in info[cause] # noqa
| 27.652174 | 64 | 0.75 | 0 | 0 | 0 | 0 | 395 | 0.621069 | 0 | 0 | 82 | 0.128931 |
38bf528d15683cec4fc4c025265cae0fee582289 | 1,783 | py | Python | solutions/day17.py | rds504/AoC-2020 | 3901a22863ed4479a8cd02f2fa5ea55d5f1f5739 | [
"MIT"
] | null | null | null | solutions/day17.py | rds504/AoC-2020 | 3901a22863ed4479a8cd02f2fa5ea55d5f1f5739 | [
"MIT"
] | null | null | null | solutions/day17.py | rds504/AoC-2020 | 3901a22863ed4479a8cd02f2fa5ea55d5f1f5739 | [
"MIT"
] | null | null | null | from itertools import product
from tools.general import load_input_list
def get_new_active_range(current_active_set, dimensions):
lowest = [0] * dimensions
highest = [0] * dimensions
for point in current_active_set:
for i, coord in enumerate(point):
if coord < lowest[i]:
lowest[i] = coord
elif highest[i] < coord:
highest[i] = coord
return tuple(range(lowest[i] - 1, highest[i] + 2) for i in range(dimensions))
def count_active_neighbours(active_set, point):
active_count = 0
for nbr in product(*(range(coord - 1, coord + 2) for coord in point)):
if nbr in active_set and nbr != point:
active_count += 1
return active_count
def new_state_is_active(active_set, point):
active_nbr = count_active_neighbours(active_set, point)
if point in active_set:
if 2 <= active_nbr <= 3:
return True
elif active_nbr == 3:
return True
return False
def iterate_grid(initial_grid, dimensions, iterations):
active_points = set()
for y, row in enumerate(initial_grid):
for x, cube in enumerate(row):
if cube == '#':
active_points.add(tuple([x, y] + [0] * (dimensions - 2)))
for _ in range(iterations):
new_active_points = set()
for point in product(*get_new_active_range(active_points, dimensions)):
if new_state_is_active(active_points, point):
new_active_points.add(point)
active_points = new_active_points
return len(active_points)
starting_grid = [list(row) for row in load_input_list("day17.txt")]
print(f"Part 1 => {iterate_grid(starting_grid, 3, 6)}")
print(f"Part 1 => {iterate_grid(starting_grid, 4, 6)}")
| 27.430769 | 81 | 0.637128 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 110 | 0.061694 |
38bf5342401beda94ff276289636e79b34f8c426 | 1,318 | py | Python | app/endpoints/sillyusers/gen_user.py | kant/test-api | 2b2ab5b722dbf18cd99906b27fda356d02ae7a52 | [
"MIT"
] | 9 | 2019-05-22T08:46:01.000Z | 2021-12-10T06:44:56.000Z | app/endpoints/sillyusers/gen_user.py | kant/test-api | 2b2ab5b722dbf18cd99906b27fda356d02ae7a52 | [
"MIT"
] | 285 | 2019-09-03T00:52:39.000Z | 2022-02-13T02:13:59.000Z | app/endpoints/sillyusers/gen_user.py | kant/test-api | 2b2ab5b722dbf18cd99906b27fda356d02ae7a52 | [
"MIT"
] | 4 | 2019-09-19T18:14:09.000Z | 2020-12-15T18:35:07.000Z | # -*- coding: utf-8 -*-
import random
import uuid
import silly
def user_test_info():
set_id = str(uuid.uuid1())
rand_name: str = silly.noun()
rand_num: int = random.randint(1, 10000)
username: str = f"{rand_name}-{rand_num}"
first_name: str = silly.verb()
last_name: str = rand_name
password: str = f"{silly.verb()}-{silly.noun()}"
title: str = silly.title(capitalize=True)
company: str = silly.company(capitalize=True)
address: str = silly.address(capitalize=True)
city: str = silly.city(capitalize=True)
country: str = silly.country(capitalize=True)
postal_code: str = silly.postal_code()
email = silly.email()
phone = silly.phone_number()
description: str = silly.paragraph(length=1)
website = f"https://www.{silly.domain()}"
result = {
"user_id": set_id,
"user_name": username,
"first_name": first_name,
"last_name": last_name,
"password": password,
"title": title,
"company": company,
"address": address,
"city": city,
"country": country,
"postal": postal_code,
"email": email,
"phone": phone,
"website": website,
"description": description,
"is_active": random.choice([True, False]),
}
return result
| 28.652174 | 52 | 0.603187 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 259 | 0.19651 |
38c35f6af1e202d4be9125e181898d11876f48c9 | 759 | py | Python | applications/Corpus/controllers/MI.py | jolivaresc/corpus | 1d2f3885778c29cb56dd1447140376e3e7cd5831 | [
"BSD-3-Clause"
] | 1 | 2017-07-25T20:15:56.000Z | 2017-07-25T20:15:56.000Z | applications/Corpus/controllers/MI.py | jolivaresc/corpus | 1d2f3885778c29cb56dd1447140376e3e7cd5831 | [
"BSD-3-Clause"
] | null | null | null | applications/Corpus/controllers/MI.py | jolivaresc/corpus | 1d2f3885778c29cb56dd1447140376e3e7cd5831 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from numpy import log2
from pickle import load
"""
* Clase que se encarga de ver la información mutua que hay entre dos tokens
* sirve para determinar si es colocación o no
"""
class MI:
def __init__(self):
self.words = load(open("./models/words.d",'r'))
self.ngrams = load(open("./models/ngrams.d","r"))
self.count = self.count()
def count(self):
cnt = 0
for i in self.words:
cnt += self.words[i]
return cnt
def eval(self,str1,str2):
try:
sup = float(self.ngrams[str1+"_"+str2])/float(self.count)
inf = float(self.words[str1]) * float(self.words[str2])
if inf <= 0 or sup <= 0:
return 0
else:
inf = inf/(float(self.count)*float(self.count))
return log2(sup/inf)
except:
return 0
| 22.323529 | 75 | 0.642951 | 554 | 0.727989 | 0 | 0 | 0 | 0 | 0 | 0 | 200 | 0.262812 |
38c367440c32df5c349c8e3577e4146c8b52d7fb | 15,762 | py | Python | tests/test_cli.py | chrahunt/quicken | 2dd00a5f024d7b114b211aad8a2618ec8f101956 | [
"MIT"
] | 3 | 2019-11-12T17:56:08.000Z | 2022-03-12T03:43:10.000Z | tests/test_cli.py | chrahunt/quicken | 2dd00a5f024d7b114b211aad8a2618ec8f101956 | [
"MIT"
] | 47 | 2018-12-10T04:08:58.000Z | 2022-03-20T14:54:36.000Z | tests/test_cli.py | chrahunt/quicken | 2dd00a5f024d7b114b211aad8a2618ec8f101956 | [
"MIT"
] | 1 | 2019-11-12T17:55:17.000Z | 2019-11-12T17:55:17.000Z | import logging
import os
import subprocess
import sys
from contextlib import contextmanager
from pathlib import Path
from textwrap import dedent
import pytest
from quicken._internal.cli.cli import get_arg_parser, parse_file
from quicken._internal.constants import (
DEFAULT_IDLE_TIMEOUT,
ENV_IDLE_TIMEOUT,
ENV_LOG_FILE,
)
from .utils import (
captured_std_streams,
chdir,
env,
isolated_filesystem,
load_json,
local_module,
write_text,
)
from .utils.process import contained_children
from .utils.pytest import non_windows
from .utils.subprocess_helper import track_state
logger = logging.getLogger(__name__)
pytestmark = non_windows
@contextmanager
def sys_path(path):
current_sys_path = sys.path
sys.path = sys.path.copy()
sys.path.append(path)
try:
yield
finally:
sys.path = current_sys_path
def test_args_passthru():
parser = get_arg_parser()
args = parser.parse_args(["run", "--file", "./script.py", "--", "--help"])
assert args.action == "run"
assert args.file == "./script.py"
assert args.args == ["--", "--help"]
# def test_args_module_passthru():
# _, args = parse_args(['-m', 'pytest', '--', '-s', '-ra'])
# assert args.m == 'pytest'
# assert args.args == ['-s', '-ra']
def test_file_args_passthru():
parser = get_arg_parser()
args = parser.parse_args(["stop", "--file", "foo"])
assert args.action == "stop"
assert args.file == "foo"
def test_file_evaluation():
# Given a package hello with
#
# hello/
# __init__.py
# foo.py
#
# # hello/__init__.py
# foo = 1
#
# # script.py
# from hello import foo
# import hello.foo
#
# if __name__ == '__main__':
# print(foo)
#
# should print 1
with local_module():
module = Path("hello")
module.mkdir()
write_text(module / "__init__.py", "foo = 1")
write_text(module / "foo.py", "")
write_text(
Path("script.py"),
"""
from hello import foo
import hello.foo
if __name__ == '__main__':
print(foo)
""",
)
prelude, main = parse_file("script.py")
prelude()
with captured_std_streams() as (stdin, stdout, stderr):
main()
output = stdout.read()
assert output == "1\n"
def pytest_exception_location(exc_info):
entry = exc_info.traceback[1]
# The pytest traceback information line number is one less than actual.
return str(entry.path), entry.lineno + 1
def test_file_prelude_backtrace_line_numbering():
# Given a file `script.py` that raises an exception in its prelude
# And the file is parsed
# When the prelude section is executed
# Then the backtrace should have the correct exception
# And the line number should match the line in the file
with isolated_filesystem():
write_text(
Path("script.py"),
"""\
import os
raise RuntimeError('example')
if __name__ == '__main__':
raise RuntimeError('example2')
""",
)
prelude, main = parse_file("script.py")
with pytest.raises(RuntimeError) as e:
prelude()
assert "example" in str(e)
filename, lineno = pytest_exception_location(e)
assert filename == str(Path("script.py").absolute())
assert lineno == 2
def test_file_main_backtrace_line_numbering():
# Given a file `script.py` that raises an exception in its main part
# And the file is parsed
# When the prelude section is executed
# Then the backtrace should have the correct exception
# And the line number should match the line in the file
with isolated_filesystem():
write_text(
Path("script.py"),
"""\
import os
if __name__ == '__main__':
os.getpid
raise RuntimeError('example')
""",
)
prelude, main = parse_file("script.py")
prelude()
with pytest.raises(RuntimeError) as e:
main()
filename, lineno = pytest_exception_location(e)
assert filename == str(Path("script.py").absolute())
assert lineno == 5
def test_python_sets_file_path_using_argument():
# Given a script, a/script.py
# And a symlink a/foo pointing to script.py
# When python executes <target> from <cwd>
# Then __file__ should be <__file__>
with isolated_filesystem() as path:
parent = path / "a"
parent.mkdir()
script = parent / "script.py"
write_text(
script,
"""
print(__file__)
""",
)
symlink = parent / "foo"
symlink.symlink_to(script.name)
cases = [
["a", symlink.name],
["a", symlink],
["a", script.name],
["a", script],
[".", f"a/{symlink.name}"],
[".", symlink],
[".", f"a/{script.name}"],
[".", script],
]
for cwd, file in cases:
result = subprocess.run(
[sys.executable, file], stdout=subprocess.PIPE, cwd=cwd
)
output = result.stdout.decode("utf-8").strip()
assert output == str(file)
def test_file_path_set_absolute():
# Given a file `script.py`
# And the code is split into prelude and main
# When executed with the results of parse_file
# Then __file__ should be the full, resolved path to the file
with isolated_filesystem() as path:
script = path / "script.py"
write_text(
script,
"""
print(__file__)
if __name__ == '__main__':
print(__file__)
""",
)
prelude, main = parse_file(str(script))
with captured_std_streams() as (stdin, stdout, stderr):
prelude()
assert stdout.read().strip() == str(script)
with captured_std_streams() as (stdin, stdout, stderr):
main()
assert stdout.read().strip() == str(script)
def test_file_path_symlink_uses_resolved_path():
# Given a file `script.py`
# And a symlink `foo` that points to it
# When executed with the results of parse_file
# Then __file__ should be the full, resolved path to the file
with isolated_filesystem() as path:
script = path / "script.py"
write_text(
script,
"""
print(__file__)
if __name__ == '__main__':
print(__file__)
""",
)
symlink = path / "foo"
symlink.symlink_to(script.name)
prelude, main = parse_file(str(script))
with captured_std_streams() as (stdin, stdout, stderr):
prelude()
assert stdout.read().strip() == str(script)
with captured_std_streams() as (stdin, stdout, stderr):
main()
assert stdout.read().strip() == str(script)
@pytest.fixture
def quicken_script(quicken_venv):
path = os.environ["PATH"]
bin_dir = quicken_venv.path / "bin"
with env(PATH=f"{bin_dir}:{path}"):
yield
@pytest.fixture
def logged(log_file_path):
with env(**{ENV_LOG_FILE: str(log_file_path.absolute())}):
yield
def test_file_argv_set(quicken_script, logged):
# Given a file `script.py`
# sys.argv should start with `script.py` and be followed by any
# other arguments
with isolated_filesystem():
Path("script.py").write_text(
dedent(
"""
import sys
if __name__ == '__main__':
print(sys.argv[0])
print(sys.argv[1])
"""
)
)
args = ["hello"]
with contained_children():
result = subprocess.run(
["quicken", "run", "--file", "script.py", "hello"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
assert result.returncode == 0, f"process must succeed: {result}"
assert result.stdout.decode("utf-8") == f"script.py\n{args[0]}\n"
def test_file_server_name_uses_absolute_resolved_path(quicken_script, logged):
# Given a file `a/script.py`
# And a symlink `a/foo` pointing to `script.py`
# And a server started from `a/script.py`
# When `quicken -f a/script.py` is executed from `.`
# And `quicken -f a/foo` is executed from `.`
# And `quicken -f script.py` is executed from `a`
# And `quicken -f foo` is executed from `a`
# Then the same server should be used to handle all of them
with isolated_filesystem():
base_dir = Path("a")
base_dir.mkdir()
script = base_dir / "script.py"
write_text(
script,
"""
import __test_helper__
if __name__ == '__main__':
__test_helper__.record()
""",
)
symlink = base_dir / "foo"
symlink.symlink_to(script.name)
with contained_children():
with track_state() as run1:
result = subprocess.run(["quicken", "run", "--file", str(script)])
assert result.returncode == 0
run1.assert_unrelated_to_current_process()
with track_state() as run2:
result = subprocess.run(["quicken", "run", "--file", str(symlink)])
assert result.returncode == 0
run2.assert_same_parent_as(run1)
with chdir("a"):
with track_state() as run3:
result = subprocess.run(["quicken", "run", "--file", script.name])
assert result.returncode == 0
run3.assert_same_parent_as(run1)
with track_state() as run4:
result = subprocess.run(["quicken", "run", "--file", symlink.name])
assert result.returncode == 0
run4.assert_same_parent_as(run1)
def test_file_path_symlink_modified(quicken_script, logged):
# Given a file `script.py`
# And a symlink `foo` that points to it
# And the server is already up, having been executed via the symlink
# And `script.py` is updated
# When the script is executed again via the symlink
# Then the server will be reloaded
with isolated_filesystem():
base_dir = Path("a")
base_dir.mkdir()
script = base_dir / "script.py"
write_text(
script,
"""
import __test_helper__
if __name__ == '__main__':
__test_helper__.record()
""",
)
symlink = base_dir / "foo"
symlink.symlink_to(script.name)
def update_file_mtime(path):
result = os.stat(path)
new_times = (result.st_atime, result.st_mtime + 1)
os.utime(path, new_times)
with contained_children():
with track_state() as run1:
result = subprocess.run(["quicken", "run", "--file", str(symlink)])
assert result.returncode == 0
run1.assert_unrelated_to_current_process()
update_file_mtime(script)
with track_state() as run2:
result = subprocess.run(["quicken", "run", "--file", str(symlink)])
assert result.returncode == 0
run2.assert_unrelated_to_current_process()
run2.assert_unrelated_to(run1)
def test_default_idle_timeout_is_used_cli(quicken_script, logged):
# Given a script
# And no QUICKEN_IDLE_TIMEOUT is set
# When the server is started
# Then it will have the default idle timeout
with isolated_filesystem():
script = Path("script.py")
write_text(
script,
"""
import __test_helper__
if __name__ == '__main__':
__test_helper__.record()
""",
)
with contained_children():
with track_state() as run1:
result = subprocess.run(["quicken", "run", "--file", str(script)])
assert result.returncode == 0
run1.assert_unrelated_to_current_process()
result = subprocess.run(
["quicken", "status", "--json", "--file", str(script)],
stdout=subprocess.PIPE,
)
assert result.returncode == 0
stdout = result.stdout.decode("utf-8")
server_state = load_json(stdout)
assert server_state["status"] == "up"
assert server_state["idle_timeout"] == DEFAULT_IDLE_TIMEOUT
def test_idle_timeout_is_used_cli(quicken_script, logged):
# Given a script
# And no QUICKEN_IDLE_TIMEOUT is set
# When the server is started
# Then it will have the specified idle timeout
with isolated_filesystem():
script = Path("script.py")
write_text(
script,
"""
import __test_helper__
if __name__ == '__main__':
__test_helper__.record()
""",
)
test_idle_timeout = 100
with env(**{ENV_IDLE_TIMEOUT: str(test_idle_timeout)}):
print(os.environ[ENV_IDLE_TIMEOUT])
with contained_children():
with track_state() as run1:
result = subprocess.run(["quicken", "run", "--file", str(script)])
assert result.returncode == 0
run1.assert_unrelated_to_current_process()
result = subprocess.run(
["quicken", "status", "--json", "--file", str(script)],
stdout=subprocess.PIPE,
)
assert result.returncode == 0
stdout = result.stdout.decode("utf-8")
server_state = load_json(stdout)
assert server_state["status"] == "up"
assert server_state["idle_timeout"] == test_idle_timeout
def test_log_file_unwritable_fails_fast_cli(quicken_script):
# Given a QUICKEN_LOG path pointing to a location that is not writable
# When the CLI is executed
# Then it should fail with a nonzero exit code and reasonable message
with isolated_filesystem():
script = Path("script.py")
write_text(
script,
"""
if __name__ == '__main__':
pass
""",
)
log_file = Path("example.log")
log_file.touch(0o000, exist_ok=False)
with env(**{ENV_LOG_FILE: str(log_file.absolute())}):
with contained_children():
result = subprocess.run(
["quicken", "run", "--file", script], stderr=subprocess.PIPE
)
assert result.returncode == 2
stderr = result.stderr.decode("utf-8")
assert str(log_file.absolute()) in stderr
assert "not writable" in stderr
def test_script_file_unreadable_fails_with_error(quicken_script):
# Given a script file that is not readable
# When the CLI is executed
# Then it should fail with a nonzero exit code and reasonable message
with isolated_filesystem():
script = Path("script.py")
script.touch(0o000, exist_ok=False)
with contained_children():
result = subprocess.run(
["quicken", "run", "--file", str(script)], stderr=subprocess.PIPE
)
assert result.returncode == 2
stderr = result.stderr.decode("utf-8")
assert str(script) in stderr
assert "Cannot read" in stderr
| 28.815356 | 87 | 0.568646 | 0 | 0 | 440 | 0.027915 | 488 | 0.030961 | 0 | 0 | 5,196 | 0.329654 |
38c660adbca3d15d5ca02084209f151a1d111447 | 19,353 | py | Python | bmtk/simulator/popnet/popsimulator.py | hernando/bmtk | 57e6924819a74f41ed94a34f55e6ebed0525d037 | [
"BSD-3-Clause"
] | 1 | 2019-03-27T12:23:09.000Z | 2019-03-27T12:23:09.000Z | bmtk/simulator/popnet/popsimulator.py | hernando/bmtk | 57e6924819a74f41ed94a34f55e6ebed0525d037 | [
"BSD-3-Clause"
] | null | null | null | bmtk/simulator/popnet/popsimulator.py | hernando/bmtk | 57e6924819a74f41ed94a34f55e6ebed0525d037 | [
"BSD-3-Clause"
] | null | null | null | # Copyright 2017. Allen Institute. All rights reserved
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
# following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
# disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import os
import logging
from six import string_types
from dipde.internals.internalpopulation import InternalPopulation
from dipde.internals.externalpopulation import ExternalPopulation
from dipde.internals.connection import Connection
import dipde
from bmtk.simulator.core.simulator import Simulator
from . import config as cfg
from . import utils as poputils
import bmtk.simulator.utils.simulation_inputs as inputs
from bmtk.utils.io import spike_trains, firing_rates
class PopSimulator(Simulator):
def __init__(self, graph, dt=0.0001, tstop=0.0, overwrite=True):
self._graph = graph
self._tstop = tstop
self._dt = dt
self._rates_file = None # name of file where the output is saved
self.__population_list = [] # list of all populations, internal and external
#self.__population_table = {graph: {} for graph in self._graph.networks} # population lookup by [network][id]
self.__connection_list = [] # list of all connections
self._dipde_network = None # reference to dipde.Network object
# diction of rates for every external network/pop_id. Prepopulate dictionary with populations whose rates
# have already been manually set, otherwise they should use one of the add_rates_* function.
#self._rates = {network: {pop.pop_id: pop.firing_rate for pop in self._graph.get_populations(network)
# if not pop.is_internal and pop.is_firing_rate_set}
# for network in self._graph.networks}
"""
for network in self._graph.networks:
for pop in self._graph.get_populations(network):
if pop.is_internal:
dipde_pop = self.__create_internal_pop(pop)
else:
if pop.is_firing_rate_set:
rates = pop.firing_rate
"""
@property
def tstop(self):
return self._tstop
@tstop.setter
def tstop(self, value):
self._tstop = value
@property
def dt(self):
return self._dt
@dt.setter
def dt(self, value):
self._dt = value
@property
def rates_file(self):
return self._rates_file
@rates_file.setter
def rates_file(self, value):
self._rates_file = value
@property
def populations(self):
return self.__population_list
@property
def connections(self):
return self.__connection_list
def add_rates_nwb(self, network, nwb_file, trial, force=False):
"""Creates external population firing rates from an NWB file.
Will iterate through a processing trial of an NWB file by assigning gids the population it belongs too and
taking the average firing rate.
This should be done before calling build_cells(). If a population has already been assigned a firing rate an
error will occur unless force=True.
:param network: Name of network with external populations.
:param nwb_file: NWB file with spike rates.
:param trial: trial id in NWB file
:param force: will overwrite existing firing rates
"""
existing_rates = self._rates[network] # TODO: validate network exists
# Get all unset, external populations in a network.
network_pops = self._graph.get_populations(network)
selected_pops = []
for pop in network_pops:
if pop.is_internal:
continue
elif not force and pop.pop_id in existing_rates:
print('Firing rate for {}/{} has already been set, skipping.'.format(network, pop.pop_id))
else:
selected_pops.append(pop)
if selected_pops:
# assign firing rates from NWB file
# TODO:
rates_dict = poputils.get_firing_rate_from_nwb(selected_pops, nwb_file, trial)
self._rates[network].update(rates_dict)
def add_rate_hz(self, network, pop_id, rate, force=False):
"""Set the firing rate of an external population.
This should be done before calling build_cells(). If a population has already been assigned a firing rate an
error will occur unless force=True.
:param network: name of network with wanted exteranl population
:param pop_id: name/id of external population
:param rate: firing rate in Hz.
:param force: will overwrite existing firing rates
"""
self.__add_rates_validator(network, pop_id, force)
self._rates[network][pop_id] = rate
def __add_rates_validator(self, network, pop_id, force):
if network not in self._graph.networks:
raise Exception('No network {} found in PopGraph.'.format(network))
pop = self._graph.get_population(network, pop_id)
if pop is None:
raise Exception('No population with id {} found in {}.'.format(pop_id, network))
if pop.is_internal:
raise Exception('Population {} in {} is not an external population.'.format(pop_id, network))
if not force and pop_id in self._rates[network]:
raise Exception('The firing rate for {}/{} already set and force=False.'.format(network, pop_id))
def _get_rate(self, network, pop):
"""Gets the firing rate for a given population"""
return self._rates[network][pop.pop_id]
def build_populations(self):
"""Build dipde Population objects from graph nodes.
To calculate external populations firing rates, it first see if a population's firing rate has been manually
set in the graph. Otherwise it attempts to calulate the firing rate from the call to add_rate_hz, add_rates_NWB,
etc. (which should be called first).
"""
for network in self._graph.networks:
for pop in self._graph.get_populations(network):
if pop.is_internal:
dipde_pop = self.__create_internal_pop(pop)
else:
dipde_pop = self.__create_external_pop(pop, self._get_rate(network, pop))
self.__population_list.append(dipde_pop)
self.__population_table[network][pop.pop_id] = dipde_pop
def set_logging(self, log_file):
# TODO: move this out of the function, put in io class
if os.path.exists(log_file):
os.remove(log_file)
# get root logger
logger = logging.getLogger()
for h in list(logger.handlers):
# remove existing handlers that will write to console.
logger.removeHandler(h)
# creates handler that write to log_file
logging.basicConfig(filename=log_file, filemode='w', level=logging.DEBUG)
def set_external_connections(self, network_name):
"""Sets the external connections for populations in a given network.
:param network_name: name of external network with External Populations to connect to internal pops.
"""
for edge in self._graph.get_edges(network_name):
# Get source and target populations
src = edge.source
source_pop = self.__population_table[src.network][src.pop_id]
trg = edge.target
target_pop = self.__population_table[trg.network][trg.pop_id]
# build a connection.
self.__connection_list.append(self.__create_connection(source_pop, target_pop, edge))
def set_recurrent_connections(self):
"""Initialize internal connections."""
for network in self._graph.internal_networks():
for edge in self._graph.get_edges(network):
src = edge.source
source_pop = self.__population_table[src.network][src.pop_id]
trg = edge.target
target_pop = self.__population_table[trg.network][trg.pop_id]
self.__connection_list.append(self.__create_connection(source_pop, target_pop, edge))
def run(self, tstop=None):
# TODO: Check if cells/connections need to be rebuilt.
# Create the networ
dipde_pops = [p.dipde_obj for p in self._graph.populations]
dipde_conns = [c.dipde_obj for c in self._graph.connections]
#print dipde_pops
#print dipde_conns
#exit()
self._dipde_network = dipde.Network(population_list=dipde_pops, connection_list=dipde_conns)
#self._dipde_network = dipde.Network(population_list=self._graph.populations,
# connection_list=self._graph.connections)
if tstop is None:
tstop = self.tstop
#print tstop, self.dt
#print self._graph.populations
#exit()
print("running simulation...")
self._dipde_network.run(t0=0.0, tf=tstop, dt=self.dt)
# TODO: make record_rates optional?
self.__record_rates()
print("done simulation.")
def __create_internal_pop(self, params):
# TODO: use getter methods directly in case arguments are not stored in dynamics params
# pop = InternalPopulation(**params.dynamics_params)
pop = InternalPopulation(**params.model_params)
return pop
def __create_external_pop(self, params, rates):
pop = ExternalPopulation(rates, record=False)
return pop
def __create_connection(self, source, target, params):
return Connection(source, target, nsyn=params.nsyns, delays=params.delay, weights=params.weight)
def __record_rates(self):
with open(self._rates_file, 'w') as f:
for pop in self._graph.internal_populations:
if pop.record:
for time, rate in zip(pop.dipde_obj.t_record, pop.dipde_obj.firing_rate_record):
f.write('{} {} {}\n'.format(pop.pop_id, time, rate))
'''
@classmethod
def from_config(cls, configure, graph):
# load the json file or object
if isinstance(configure, basestring):
config = cfg.from_json(configure, validate=True)
elif isinstance(configure, dict):
config = configure
else:
raise Exception('Could not convert {} (type "{}") to json.'.format(configure, type(configure)))
network = cls(graph)
if 'run' not in config:
raise Exception('Json file is missing "run" entry. Unable to build Bionetwork.')
run_dict = config['run']
# Create the output file
if 'output' in config:
out_dict = config['output']
rates_file = out_dict.get('rates_file', None)
if rates_file is not None:
# create directory if required
network.rates_file = rates_file
parent_dir = os.path.dirname(rates_file)
if not os.path.exists(parent_dir):
os.makedirs(parent_dir)
if 'log_file' in out_dict:
log_file = out_dict['log_file']
network.set_logging(log_file)
# get network parameters
if 'duration' in run_dict:
network.duration = run_dict['duration']
if 'dt' in run_dict:
network.dt = run_dict['dt']
# TODO: need to get firing rates before building populations
if 'input' in config:
for netinput in config['input']:
if netinput['type'] == 'external_spikes' and netinput['format'] == 'nwb' and netinput['active']:
# Load external network spike trains from an NWB file.
print('Setting firing rates for {} from {}.'.format(netinput['source_nodes'], netinput['file']))
network.add_rates_nwb(netinput['source_nodes'], netinput['file'], netinput['trial'])
if netinput['type'] == 'pop_rate':
print('Setting {}/{} to fire at {} Hz.'.format(netinput['source_nodes'], netinput['pop_id'], netinput['rate']))
network.add_rate_hz(netinput['source_nodes'], netinput['pop_id'], netinput['rate'])
# TODO: take input as function with Population argument
# Build populations
print('Building Populations')
network.build_populations()
# Build recurrent connections
if run_dict['connect_internal']:
print('Building recurrention connections')
network.set_recurrent_connections()
# Build external connections. Set connection to default True and turn off only if explicitly stated.
# NOTE: It might be better to set to default off?!?! Need to dicuss what would be more intuitive for the users.
# TODO: ignore case of network name
external_network_settings = {name: True for name in graph.external_networks()}
if 'connect_external' in run_dict:
external_network_settings.update(run_dict['connect_external'])
for netname, connect in external_network_settings.items():
if connect:
print('Setting external connections for {}'.format(netname))
network.set_external_connections(netname)
return network
'''
@classmethod
def from_config(cls, configure, graph):
# load the json file or object
if isinstance(configure, string_types):
config = cfg.from_json(configure, validate=True)
elif isinstance(configure, dict):
config = configure
else:
raise Exception('Could not convert {} (type "{}") to json.'.format(configure, type(configure)))
if 'run' not in config:
raise Exception('Json file is missing "run" entry. Unable to build Bionetwork.')
run_dict = config['run']
# Get network parameters
# step time (dt) is set in the kernel and should be passed
overwrite = run_dict['overwrite_output_dir'] if 'overwrite_output_dir' in run_dict else True
print_time = run_dict['print_time'] if 'print_time' in run_dict else False
dt = run_dict['dt'] # TODO: make sure dt exists
tstop = float(config.tstop) / 1000.0
network = cls(graph, dt=config.dt, tstop=tstop, overwrite=overwrite)
if 'output_dir' in config['output']:
network.output_dir = config['output']['output_dir']
# network.spikes_file = config['output']['spikes_ascii']
if 'block_run' in run_dict and run_dict['block_run']:
if 'block_size' not in run_dict:
raise Exception('"block_run" is set to True but "block_size" not found.')
network._block_size = run_dict['block_size']
if 'duration' in run_dict:
network.duration = run_dict['duration']
graph.io.log_info('Building cells.')
graph.build_nodes()
graph.io.log_info('Building recurrent connections')
graph.build_recurrent_edges()
for sim_input in inputs.from_config(config):
node_set = graph.get_node_set(sim_input.node_set)
if sim_input.input_type == 'spikes':
spikes = spike_trains.SpikesInput.load(name=sim_input.name, module=sim_input.module,
input_type=sim_input.input_type, params=sim_input.params)
graph.io.log_info('Build virtual cell stimulations for {}'.format(sim_input.name))
graph.add_spike_trains(spikes, node_set)
else:
graph.io.log_info('Build virtual cell stimulations for {}'.format(sim_input.name))
rates = firing_rates.RatesInput(sim_input.params)
graph.add_rates(rates, node_set)
# Create the output file
if 'output' in config:
out_dict = config['output']
rates_file = out_dict.get('rates_file', None)
if rates_file is not None:
rates_file = rates_file if os.path.isabs(rates_file) else os.path.join(config.output_dir, rates_file)
# create directory if required
network.rates_file = rates_file
parent_dir = os.path.dirname(rates_file)
if not os.path.exists(parent_dir):
os.makedirs(parent_dir)
if 'log_file' in out_dict:
log_file = out_dict['log_file']
network.set_logging(log_file)
# exit()
# build the cells
#io.log('Building cells')
#network.build_cells()
# Build internal connections
#if run_dict['connect_internal']:
# io.log('Creating recurrent connections')
# network.set_recurrent_connections()
# Build external connections. Set connection to default True and turn off only if explicitly stated.
# NOTE: It might be better to set to default off?!?! Need to dicuss what would be more intuitive for the users.
# TODO: ignore case of network name
'''
external_network_settings = {name: True for name in graph.external_networks()}
if 'connect_external' in run_dict:
external_network_settings.update(run_dict['connect_external'])
for netname, connect in external_network_settings.items():
if connect:
io.log('Setting external connections for {}'.format(netname))
network.set_external_connections(netname)
# Build inputs
if 'input' in config:
for netinput in config['input']:
if netinput['type'] == 'external_spikes' and netinput['format'] == 'nwb' and netinput['active']:
network.add_spikes_nwb(netinput['source_nodes'], netinput['file'], netinput['trial'])
io.log_info('Adding stimulations')
network.make_stims()
'''
graph.io.log_info('Network created.')
return network | 42.911308 | 131 | 0.644293 | 17,358 | 0.896915 | 0 | 0 | 5,214 | 0.269416 | 0 | 0 | 10,769 | 0.556451 |
38c826bb3c6dbbd679effc96ae41e5e3bbce3014 | 4,899 | py | Python | magnrings.py | Qvapil/Electromagnetic_Fields_B_2020 | 9fe139ff6574582ef64861261b0ee8c98a481a63 | [
"MIT"
] | 3 | 2021-08-18T08:47:33.000Z | 2022-03-05T13:14:00.000Z | magnrings.py | Qvapil/Electromagnetic_Fields_B_2020 | 9fe139ff6574582ef64861261b0ee8c98a481a63 | [
"MIT"
] | null | null | null | magnrings.py | Qvapil/Electromagnetic_Fields_B_2020 | 9fe139ff6574582ef64861261b0ee8c98a481a63 | [
"MIT"
] | 1 | 2021-12-22T11:57:30.000Z | 2021-12-22T11:57:30.000Z | import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np
import math
#constants
d=2
h=1
a=0.1
I=1
#Axes
N=100
xmin=0
xmax=4
xx=np.linspace(xmin,xmax,N)
ymin=0
ymax=4
yy=np.linspace(ymin,ymax,N)
zmin=-2
zmax=2
zz=np.linspace(zmin,zmax,N)
X,Z=np.meshgrid(xx,zz)
XX,Y=np.meshgrid(xx,yy)
YY,ZZ=np.meshgrid(yy,zz)
#functions for distances
def R1(x,y,z):
return np.sqrt((x-d)**2+(y-h)**2+z**2)
def R2(x,y,z):
return np.sqrt((x-d)**2+(y+h)**2+z**2)
def R3(x,y,z):
return np.sqrt((x+d)**2+(y-h)**2+z**2)
def R4(x,y,z):
return np.sqrt((x+d)**2+(y+h)**2+z**2)
#functions for potential
def Ax(x,y,z):
res=1/R1(x,y,z)**3-1/R2(x,y,z)**3+1/R3(x,y,z)**3-1/R4(x,y,z)**3
return z*I*(a**2)*res/4
Ay=np.zeros((N,N))
def Az(x,y,z):
res=-(x-d)/R1(x,y,z)**3+(x-d)/R2(x,y,z)**3-(x+d)/R3(x,y,z)**3+(x+d)/R4(x,y,z)**3
return I*(a**2)*res/4
#functions for magnetic field
def Hx(x,y,z):
res=(y-h)*(x-d)/R1(x,y,z)**5-(y+h)*(x-d)/R2(x,y,z)**5+(y-h)*(x+d)/R3(x,y,z)**5-(y+h)*(x+d)/R4(x,y,z)**5
return 3*I*(a**2)*res/4
def Hy(x,y,z):
Hy1=1/(R1(x,y,z)**3)*(3*(y-h)**2/R1(x,y,z)**2-1)
Hy2=1/(R2(x,y,z)**3)*(-3*(y+h)**2/R2(x,y,z)**2+1)
Hy3=1/(R3(x,y,z)**3)*(3*(y-h)**2/R3(x,y,z)**2-1)
Hy4=1/(R4(x,y,z)**3)*(-3*(y+h)**2/R4(x,y,z)**2+1)
return I*(a**2)/4*(Hy1+Hy2+Hy3+Hy4)
#functions for current density on yz plane
def Ky_yz(y,z):
r1=R1(0,y,z)
r2=R2(0,y,z)
r3=R3(0,y,z)
r4=R4(0,y,z)
return I*(a**2)/4*3*(-(y-h)*z/r1**5+(y+h)*z/r2**5-(y-h)*z/r3**5+(y+h)*z/r4**5)
def Kz_yz(y,z):
r1=R1(0,y,z)
r2=R2(0,y,z)
r3=R3(0,y,z)
r4=R4(0,y,z)
term1=3*(y-h)**2/r1**5-1/r1**3
term2=-3*(y+h)**2/r2**5+1/r2**3
term3=3*(y-h)**2/r3**5-1/r3**3
term4=-3*(y+h)**2/r4**5+1/r4**3
return I*(a**2)/4*(term1+term2+term3+term4)
#functions for current density on xz plane
def Kx_xz(x,z):
r1=R1(x,0,z)
r2=R2(x,0,z)
r3=R3(x,0,z)
r4=R4(x,0,z)
return -I*(a**2)/4*3*h*z*(1/r1**5+1/r2**5+1/r3**5+1/r4**5)
def Kz_xz(x,z):
r1=R1(x,0,z)
r2=R2(x,0,z)
r3=R3(x,0,z)
r4=R4(x,0,z)
return I*(a**2)/4*3*(h*(x-d)/r1**5+h*(x-d)/r2**5+h*(x+d)/r3**5+h*(x+d)/r4**5)
#PLOTS
#streamplot of magnetic potential on xz plane
fig1, ax1 = plt.subplots()
p1=ax1.streamplot(X,Z,Ax(X,1,Z),Az(X,1,Z),color=np.log10(np.sqrt(Ax(X,1,Z)**2+Az(X,1,Z)**2)),cmap=cm.jet)
ax1.set_aspect('equal','box')
c1=fig1.colorbar(p1.lines)
c1.set_label('$log_{10}$|A/$μ_0$|')
ax1.set_title('Normalised Magnetic Potential A/$μ_0$ on xz plane for y=1')
ax1.set_xlabel('x(m)')
ax1.set_ylabel('z(m)')
# #quiver plot of magnetic potential on xz plane
# #use N=30
# Ax_norm=Ax(X,1,Z)/np.sqrt(Ax(X,1,Z)**2+Az(X,1,Z)**2)
# Az_norm=Az(X,1,Z)/np.sqrt(Ax(X,1,Z)**2+Az(X,1,Z)**2)
#
# fig2, ax2 = plt.subplots()
# plt.quiver(X,Z,Ax_norm,Az_norm)
# ax2.set_aspect('equal','box')
# ax2.set_title('Normalised Magnetic Potential A/$μ_0$ on xz plane for y=1')
# ax2.set_xlabel('x(m)')
# ax2.set_ylabel('z(m)')
#streamplot of magnetic potential on xy plane
fig3, ax3 = plt.subplots()
p3=ax3.streamplot(XX,Y,Ax(XX,Y,2),Ay,color=np.log10(np.sqrt(Ax(XX,Y,2)**2+Ay**2)),cmap=cm.jet)
ax3.set_aspect('equal','box')
c3=fig3.colorbar(p3.lines)
c3.set_label('$log_{10}$|A/$μ_0$|')
ax3.set_title('Normalised Magnetic Potential A/$μ_0$ on xy plane for z=2')
ax3.set_xlabel('x(m)')
ax3.set_ylabel('y(m)')
# #quiver plot of magnetic potential on xz plane
# #use N=30
# Ax_norm2=Ax(XX,Y,2)/np.sqrt(Ax(XX,Y,2)**2+Ay**2)
# Ay_norm=Ay/np.sqrt(Ax(XX,Y,2)**2+Ay**2)
#
# fig4, ax4 = plt.subplots()
# plt.quiver(XX,Y,Ax_norm2,Ay)
# ax4.set_aspect('equal','box')
# ax4.set_title('Normalised Magnetic Potential A/$μ_0$ on xy plane for z=2')
# ax4.set_xlabel('x(m)')
# ax4.set_ylabel('y(m)')
#streamplot of magnetic field on xy plane
fig5, ax5 = plt.subplots()
p5=ax5.streamplot(XX,Y,Hx(XX,Y,0),Hy(XX,Y,0),color=np.log10(np.sqrt(Hx(XX,Y,0)**2+Hy(XX,Y,0)**2)),cmap=cm.jet,density=1.2)
ax5.set_aspect('equal','box')
c5=fig5.colorbar(p5.lines)
c5.set_label('$log_{10}$|H|')
ax5.set_title('Magnetic Field H on xy plane for z=0')
ax5.set_xlabel('x(m)')
ax5.set_ylabel('y(m)')
#streamplot of current density on yz plane
fig6, ax6 = plt.subplots()
p6=ax6.streamplot(YY,ZZ,Ky_yz(YY,ZZ),Kz_yz(YY,ZZ),color=np.log10(np.sqrt(Ky_yz(YY,ZZ)**2+Kz_yz(YY,ZZ)**2)),cmap=cm.jet,density=1.2)
ax6.set_aspect('equal','box')
c6=fig6.colorbar(p6.lines)
c6.set_label('$log_{10}$|K|')
ax6.set_title('Current density K on yz plane for x=0')
ax6.set_xlabel('y(m)')
ax6.set_ylabel('z(m)')
#streamplot of current density on xz plane
fig7, ax7 = plt.subplots()
p7=ax7.streamplot(X,Z,Kx_xz(X,Z),Kz_xz(X,Z),color=np.log10(np.sqrt(Kx_xz(X,Z)**2+Kz_xz(X,Z)**2)),cmap=cm.jet,density=1.2)
ax7.set_aspect('equal','box')
c7=fig7.colorbar(p7.lines)
c7.set_label('$log_{10}$|K|')
ax7.set_title('Current density K on xz plane for y=0')
ax7.set_xlabel('x(m)')
ax7.set_ylabel('z(m)')
plt.show()
| 27.368715 | 131 | 0.611962 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,592 | 0.324567 |
38ca028dcd1cdce17bd930dfa88dbdd8830ed96c | 1,151 | py | Python | examples/video.py | ankur09011/pycozmo | cd8492a141d61f6fd0119066d4e38528cef61fab | [
"MIT"
] | 1 | 2021-01-11T20:34:38.000Z | 2021-01-11T20:34:38.000Z | examples/video.py | ankur09011/pycozmo | cd8492a141d61f6fd0119066d4e38528cef61fab | [
"MIT"
] | null | null | null | examples/video.py | ankur09011/pycozmo | cd8492a141d61f6fd0119066d4e38528cef61fab | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import time
import pycozmo
# Last image, received from the robot.
last_im = None
def on_camera_image(cli, new_im):
""" Handle new images, coming from the robot. """
del cli
global last_im
last_im = new_im
def pycozmo_program(cli: pycozmo.client.Client):
global last_im
# Raise head.
angle = (pycozmo.robot.MAX_HEAD_ANGLE.radians - pycozmo.robot.MIN_HEAD_ANGLE.radians) / 2.0
cli.set_head_angle(angle)
# Register to receive new camera images.
cli.add_handler(pycozmo.event.EvtNewRawCameraImage, on_camera_image)
# Enable camera.
pkt = pycozmo.protocol_encoder.EnableCamera()
cli.conn.send(pkt)
while True:
if last_im:
# Get last image.
im = last_im
# Resize from 320x240 to 128x32.
im = im.resize((128, 32))
# Convert to binary image.
im = im.convert('1')
# Display the result image.
cli.display_image(im)
# Run with 25 FPS.
time.sleep(1 / 25)
pycozmo.run_program(pycozmo_program, protocol_log_level="INFO", robot_log_level="DEBUG")
| 20.927273 | 95 | 0.636838 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 313 | 0.271937 |
38cabe0c4bced6b634c68a59cf4f8f5117b14f51 | 394 | py | Python | 2020/aoc6.py | lachtanek/advent-of-code | dc83d82d46392adc073191161c2767e684d776bd | [
"MIT"
] | null | null | null | 2020/aoc6.py | lachtanek/advent-of-code | dc83d82d46392adc073191161c2767e684d776bd | [
"MIT"
] | null | null | null | 2020/aoc6.py | lachtanek/advent-of-code | dc83d82d46392adc073191161c2767e684d776bd | [
"MIT"
] | null | null | null | from functools import reduce
data = []
with open("aoc6.inp") as rf:
sets = []
for l in rf:
if l == "\n":
data.append(sets)
sets = []
else:
sets.append(set([c for c in l.strip()]))
a1 = a2 = 0
for sets in data:
a1 += len(reduce(lambda s1, s2: s1 | s2, sets))
a2 += len(reduce(lambda s1, s2: s1 & s2, sets))
print(a1, a2)
| 17.909091 | 52 | 0.5 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 14 | 0.035533 |
38cae31a92a87d74ef017b14b1ce771550e7fc51 | 513 | py | Python | hello_tradera.py | ErikAndren/hello_tradera | d318bf1c187a91f81d8452d0466cc837befafe34 | [
"MIT"
] | null | null | null | hello_tradera.py | ErikAndren/hello_tradera | d318bf1c187a91f81d8452d0466cc837befafe34 | [
"MIT"
] | null | null | null | hello_tradera.py | ErikAndren/hello_tradera | d318bf1c187a91f81d8452d0466cc837befafe34 | [
"MIT"
] | null | null | null | import zeep
import logging
logging.getLogger('zeep').setLevel(logging.ERROR)
publicServiceUrl = 'https://api.tradera.com/v3/PublicService.asmx'
appId = 'REPLACE ME WITH TRADERA ID'
appKey = 'REPLACE ME WITH TRADERA KEY'
wsdl = 'https://api.tradera.com/v3/PublicService.asmx?WSDL'
client = zeep.Client(wsdl=wsdl)
authHeader = {
'AuthenticationHeader' : {
'AppId' : appId,
'AppKey' : appKey
}
}
result = client.service.GetOfficalTime(_soapheaders = authHeader)
print(result)
| 20.52 | 66 | 0.699805 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 199 | 0.387914 |
38caeb5a78603a7d077226028381762fc6c61e0d | 1,611 | py | Python | messungen/decrypt_trace.py | tihmstar/gido_public | dcc523603b9a27b37752211715a10e30b51ce812 | [
"Unlicense"
] | 16 | 2021-04-10T16:28:00.000Z | 2021-12-12T10:15:23.000Z | messungen/decrypt_trace.py | tihmstar/gido_public | dcc523603b9a27b37752211715a10e30b51ce812 | [
"Unlicense"
] | null | null | null | messungen/decrypt_trace.py | tihmstar/gido_public | dcc523603b9a27b37752211715a10e30b51ce812 | [
"Unlicense"
] | 2 | 2021-04-10T16:32:36.000Z | 2021-04-11T14:13:45.000Z | import struct
import sys
import binascii
import tarfile
import usb
import recovery
BLOCKS_CNT = 8
if len(sys.argv) < 2:
print("Usage: %s <path>"%sys.argv[0])
exit(0)
infile = sys.argv[1]
if infile[-len(".tar.gz"):] == ".tar.gz":
print("cant open compressed file!")
exit(1)
else:
f = open(infile,"rb+")
r = f.read(8)
traces_per_file = struct.unpack("<I",r[0:4])[0]
print("traces_per_file=%s"%traces_per_file)
point_per_trace_tell = struct.unpack("<I",r[4:8])[0]
print("point_per_trace_tell=%s"%point_per_trace_tell)
didRead = 4
f.seek(didRead)
dev = recovery.acquire_device()
while True:
nop = f.read(4)
assert(len(nop) == 4)
aesInput = f.read(BLOCKS_CNT*16)
assert(len(aesInput) == BLOCKS_CNT*16)
print(binascii.hexlify(aesInput))
print("\n\n")
lastblock = bytes([0x00]*16)
aesOutput = bytes()
curblockindex = 0
while curblockindex < BLOCKS_CNT:
input = aesInput[16*curblockindex:16*(3+curblockindex)]
cmd = "d " +binascii.hexlify(input).decode()
print(cmd) #DEBUG
recovery.send_command(dev,cmd)
rsp = dev.ctrl_transfer(0xC0, 0, 0, 0, 0x600, 30000)[0:-1]
for i in range(16):
rsp[i] ^= lastblock[i]
print(binascii.hexlify(rsp))
aesOutput += bytes(rsp)
lastblock = input[-16:]
curblockindex += int(len(rsp)/(16*2))
# exit(1)
print("lol--")
print(aesOutput)
# print(bytes(aesOutput).decode())
#DEBUG
aesInput = f.read(BLOCKS_CNT*16)
print(binascii.hexlify(aesInput))
exit(1)
f.close()
| 19.409639 | 66 | 0.612042 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 204 | 0.126629 |
38caf61d9a9e8681b2748236a9dabcabd645543a | 1,075 | py | Python | ghnotifier/menu.py | iamtalhaasghar/ghnotifier | 7bbcbc32abc8ad923bff64055cb19ac042a03764 | [
"MIT"
] | 1 | 2022-02-03T05:30:22.000Z | 2022-02-03T05:30:22.000Z | ghnotifier/menu.py | iamtalhaasghar/ghnotifier | 7bbcbc32abc8ad923bff64055cb19ac042a03764 | [
"MIT"
] | 5 | 2018-10-30T13:03:24.000Z | 2022-02-03T06:06:08.000Z | ghnotifier/menu.py | iamtalhaasghar/ghnotifier | 7bbcbc32abc8ad923bff64055cb19ac042a03764 | [
"MIT"
] | 1 | 2022-02-03T06:02:02.000Z | 2022-02-03T06:02:02.000Z | #!/usr/bin/env python3
import webbrowser
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk
from ghnotifier.notifier import Notifier
from ghnotifier.settings import Settings
class Menu:
GITHUB_NOTIFICATIONS = 'https://github.com/notifications'
def __init__(self):
self.menu = Gtk.Menu()
self.create_menu()
self.menu.show_all()
def create_menu(self):
self.append('Open Notifications', self.notifications)
self.append('Settings', self.settings)
self.menu.append(Gtk.SeparatorMenuItem())
self.append('Quit', self.quit)
def append(self, name, callback):
item = Gtk.MenuItem(name)
item.connect('activate', callback)
self.menu.append(item)
@staticmethod
def notifications(source):
webbrowser.open(Menu.GITHUB_NOTIFICATIONS)
@staticmethod
def settings(source):
Settings().open()
@staticmethod
def quit(source):
Notifier.stop()
Gtk.main_quit()
def get_inner(self):
return self.menu | 21.5 | 61 | 0.655814 | 874 | 0.813023 | 0 | 0 | 243 | 0.226047 | 0 | 0 | 112 | 0.104186 |
38cb1f29849026e4492f8972be47ed86aeea596c | 118 | py | Python | apps/enterprice/apps.py | jimforit/lagou | 165593a15597012092b5e0ba34158fbc1d1c213d | [
"MIT"
] | 2 | 2019-03-11T03:58:19.000Z | 2020-03-06T06:45:28.000Z | apps/enterprice/apps.py | jimforit/lagou | 165593a15597012092b5e0ba34158fbc1d1c213d | [
"MIT"
] | 5 | 2020-06-05T20:04:20.000Z | 2021-09-08T00:53:52.000Z | apps/enterprice/apps.py | jimforit/lagou | 165593a15597012092b5e0ba34158fbc1d1c213d | [
"MIT"
] | null | null | null | from django.apps import AppConfig
class EnterpriceConfig(AppConfig):
name = 'enterprice'
verbose_name = '企业' | 19.666667 | 34 | 0.737288 | 86 | 0.704918 | 0 | 0 | 0 | 0 | 0 | 0 | 20 | 0.163934 |
38cc7e10e59cc8a253d9de4a014ba672e471157d | 1,297 | py | Python | python/utils/LaplacianLoss.py | aytewari/GVV-Differentiable-CUDA-Renderer | 5da6bdab3fd44074ae752bd8192fc2aad9fb77e6 | [
"CC-BY-4.0"
] | 40 | 2020-10-09T06:13:32.000Z | 2021-04-09T21:48:29.000Z | python/utils/LaplacianLoss.py | ayushtewari/GVV-Differentiable-CUDA-Renderer | 5da6bdab3fd44074ae752bd8192fc2aad9fb77e6 | [
"CC-BY-4.0"
] | 2 | 2020-10-10T07:16:33.000Z | 2021-03-27T09:07:57.000Z | python/utils/LaplacianLoss.py | ayushtewari/GVV-Differentiable-CUDA-Renderer | 5da6bdab3fd44074ae752bd8192fc2aad9fb77e6 | [
"CC-BY-4.0"
] | 2 | 2021-06-29T15:40:03.000Z | 2022-01-31T16:09:44.000Z |
import tensorflow as tf
########################################################################################################################
# Isometry Loss
########################################################################################################################
def getLoss(inputMeshTensor, restTensor, laplacian, numberOfEdges, rowWeight):
batchSize = tf.shape(inputMeshTensor)[0]
numberOfVertices = tf.shape(inputMeshTensor)[1]
v_r = (inputMeshTensor/1000.0) - (restTensor/1000.0)
innerSumX = tf.matmul( laplacian, tf.reshape(v_r[:, :, 0], [batchSize,numberOfVertices, 1]))
innerSumX = innerSumX * innerSumX
innerSumY = tf.matmul(laplacian, tf.reshape(v_r[:, :, 1], [batchSize,numberOfVertices, 1]))
innerSumY = innerSumY * innerSumY
innerSumZ = tf.matmul(laplacian, tf.reshape(v_r[:, :, 2], [batchSize,numberOfVertices, 1]))
innerSumZ = innerSumZ * innerSumZ
innerSum = innerSumX + innerSumY + innerSumZ
innerSum = tf.reshape(innerSum,[batchSize,numberOfVertices])
loss = tf.reduce_sum(innerSum * rowWeight)
loss = loss / tf.cast(batchSize * numberOfEdges,tf.float32)
return loss
########################################################################################################################
| 37.057143 | 120 | 0.505783 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 375 | 0.289129 |
38ccad75a3a0fee55c8afb6cdcb6e3fd50a68c98 | 1,571 | py | Python | scripts/logfetch/search.py | madhuri7112/Singularity | 11a533ecf2baaa1a4a74404b3de435e8d5b7d1a3 | [
"Apache-2.0"
] | 692 | 2015-01-02T02:30:23.000Z | 2022-03-18T08:16:05.000Z | scripts/logfetch/search.py | madhuri7112/Singularity | 11a533ecf2baaa1a4a74404b3de435e8d5b7d1a3 | [
"Apache-2.0"
] | 1,399 | 2015-01-01T10:52:44.000Z | 2022-03-17T18:27:23.000Z | scripts/logfetch/search.py | mikebell90/Singularity | 290d647ee3cd5ddfbf381d09d22fdce1896e3388 | [
"Apache-2.0"
] | 280 | 2015-01-02T02:30:33.000Z | 2022-03-03T21:08:33.000Z | import os
import re
import fnmatch
from logfetch_base import log, is_in_date_range
from termcolor import colored
def find_cached_logs(args):
matching_logs = []
log_fn_match = get_matcher(args)
for filename in os.listdir(args.dest):
if fnmatch.fnmatch(filename, log_fn_match) and in_date_range(args, filename):
log(colored('Including log {0}\n'.format(filename), 'blue'), args, True)
matching_logs.append('{0}/{1}'.format(args.dest, filename))
else:
log(colored('Excluding log {0}, not in date range\n'.format(filename), 'magenta'), args, True)
return matching_logs
def in_date_range(args, filename):
timestamps = re.findall(r"-\d{13}-", filename)
if timestamps:
return is_in_date_range(args, int(str(timestamps[-1]).replace("-", "")[0:-3]))
else:
return True
def get_matcher(args):
if args.taskId:
if 'filename' in args.file_pattern and args.logtype:
return '{0}*{1}*'.format(args.taskId, args.logtype)
else:
return '{0}*'.format(args.taskId)
elif args.deployId and args.requestId:
if 'filename' in args.file_pattern and args.logtype:
return '{0}-{1}*{2}*'.format(args.requestId, args.deployId, args.logtype)
else:
return '{0}-{1}*'.format(args.requestId, args.deployId)
else:
if 'filename' in args.file_pattern and args.logtype:
return '{0}*{1}*'.format(args.requestId, args.logtype)
else:
return '{0}*'.format(args.requestId) | 38.317073 | 106 | 0.630172 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 187 | 0.119032 |
38cd88d6a9d8806c77424757ca3b6620a2526f67 | 307 | py | Python | algorithms/in_order.py | AutuanLiu/LeetCode2019 | 8efc7c5475fd888f7d86c3b08a3c1c9e55c1ac30 | [
"MIT"
] | 1 | 2019-06-20T07:43:59.000Z | 2019-06-20T07:43:59.000Z | algorithms/in_order.py | AutuanLiu/Code-Storm2019 | 8efc7c5475fd888f7d86c3b08a3c1c9e55c1ac30 | [
"MIT"
] | null | null | null | algorithms/in_order.py | AutuanLiu/Code-Storm2019 | 8efc7c5475fd888f7d86c3b08a3c1c9e55c1ac30 | [
"MIT"
] | null | null | null | # 二叉树中序遍历的 生成器写法
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
def mid_order(root):
if not root: return
yield from mid_order(root.left)
yield root.val
yield from mid_order(root.right)
| 21.928571 | 36 | 0.648208 | 114 | 0.342342 | 136 | 0.408408 | 0 | 0 | 0 | 0 | 78 | 0.234234 |
38cee6d2267200542e95138691f1ada5ab2fedde | 1,485 | py | Python | predict_bw_lstm1.py | kyeongsoo/dash-simulation | ceccfee61d7102146e83b0a2d60d87693c871198 | [
"MIT"
] | null | null | null | predict_bw_lstm1.py | kyeongsoo/dash-simulation | ceccfee61d7102146e83b0a2d60d87693c871198 | [
"MIT"
] | null | null | null | predict_bw_lstm1.py | kyeongsoo/dash-simulation | ceccfee61d7102146e83b0a2d60d87693c871198 | [
"MIT"
] | 1 | 2020-06-06T14:02:35.000Z | 2020-06-06T14:02:35.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
##
# @file predict_bw_lstm1.py
# @author Kyeong Soo (Joseph) Kim <Kyeongsoo.Kim@xjtlu.edu.cn>
# @date 2019-04-22
# 2022-03-23 - updated for TensorFlow version 2.6
#
# @brief Predict channel bandwidth.
#
# @remarks This code is based on the nice sample code from:
# https://machinelearningmastery.com/how-to-develop-lstm-models-for-time-series-forecasting/
# import modules
import numpy as np
import tensorflow as tf
import tensorflow.keras # required for TF ver. 2.6
from skimage.util import view_as_windows
# define dataset
bws = np.load('bandwidths.npy')
X = view_as_windows(bws, 3, step=1)[:-1] # 3-sample sliding window over bws (except the last one, i.e., '[:-1]')
y = bws[3:]
# reshape from [samples, timesteps] into [samples, timesteps, features]
X = X.reshape((X.shape[0], X.shape[1], 1))
# define model
model = tf.keras.Sequential()
# model.add(tf.keras.layers.LSTM(units=50, activation='relu', input_shape=(3, 1)))
model.add(tf.keras.layers.LSTM(units=50, activation='relu'))
model.add(tf.keras.layers.Dense(1))
model.compile(optimizer='adam', loss='mse')
# fit model
model.fit(X, y, epochs=1000, verbose=0)
# demonstrate prediction
for i in range(10):
x_input = X[i]
x_input = x_input.reshape((1, 3, 1))
yhat = model.predict(x_input, verbose=0)
print(f"{','.join([str(int(i)) for i in x_input.flatten()])} -> {yhat.flatten()[0]:.2e} (true value: {int(y[i]):d})")
| 33.75 | 121 | 0.675421 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 899 | 0.605387 |
38cf20797afd513c24dc827a849f4617f931df33 | 586 | py | Python | tests/integration/test_todo.py | nomilkinmyhome/todo-list | c596fa2003630de95e55ebe7d3420d9999270c97 | [
"WTFPL"
] | 3 | 2020-10-05T16:50:02.000Z | 2021-01-01T17:36:22.000Z | tests/integration/test_todo.py | nomilkinmyhome/todo-list | c596fa2003630de95e55ebe7d3420d9999270c97 | [
"WTFPL"
] | 8 | 2020-10-05T20:59:01.000Z | 2021-01-30T12:19:15.000Z | tests/integration/test_todo.py | nomilkinmyhome/todo-list | c596fa2003630de95e55ebe7d3420d9999270c97 | [
"WTFPL"
] | null | null | null | from src.use_cases.todo import create_todo, update_todo
def test_create_todo(client, user):
payload = {
'title': 'test todo',
'description': 'very long and useful description',
}
todo = create_todo(user.id, payload)
assert todo.id == 1
assert todo.title == payload['title']
def test_update_todo(client):
todo_id = 1
old_description = 'very long and useful description'
payload = {'title': 'new title'}
todo = update_todo(todo_id, payload)
assert todo.title == payload['title']
assert todo.description == old_description
| 25.478261 | 58 | 0.668942 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 131 | 0.223549 |
38d140e56cdd3b97964478d31b7516e144cef25a | 4,818 | py | Python | src/loopHandler.py | jerryduan07/gametime | 43fbd6ae7f83c9ebf55dbedb4f98ce064c04514c | [
"BSD-3-Clause"
] | null | null | null | src/loopHandler.py | jerryduan07/gametime | 43fbd6ae7f83c9ebf55dbedb4f98ce064c04514c | [
"BSD-3-Clause"
] | null | null | null | src/loopHandler.py | jerryduan07/gametime | 43fbd6ae7f83c9ebf55dbedb4f98ce064c04514c | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
"""Exposes functions to perform a source-to-source transformation
that detects and unrolls loops in the code being analyzed.
"""
"""See the LICENSE file, located in the root directory of
the source distribution and
at http://verifun.eecs.berkeley.edu/gametime/about/LICENSE,
for details on the GameTime license and authors.
"""
import os
import subprocess
from defaults import config, sourceDir
class HandlerMode(object):
"""Represents the mode that the loop handler works in."""
#: Detect loops.
DETECTOR = 0
#: Unroll loops.
UNROLLER = 1
def _generateHandlerCommand(projectConfig, handlerMode):
"""Generates the system call that runs the loop handler
with appropriate inputs.
Arguments:
projectConfig:
:class:`~gametime.projectConfiguration.ProjectConfiguration`
object that represents the configuration of a GameTime project.
handlerMode:
Mode that the loop handler should run in.
Returns:
Appropriate system call as a list that contains the program
to be run and the proper arguments.
"""
# Set the environment variable that allows the Cilly driver to find
# the path to the configuration file for the Findlib OCaml module.
os.environ["OCAMLFIND_CONF"] = os.path.join(sourceDir,
"ocaml/conf/findlib.conf")
# Set the environment variable that allows the Cilly driver to find
# the path to the folder that contains the compiled OCaml files.
os.environ["OCAMLPATH"] = os.path.join(sourceDir, "ocaml/lib")
# Set the environment variable that configures the Cilly driver to load
# the features that will be needed for the loop handler.
os.environ["CIL_FEATURES"] = "cil.default-features,loopHandler.loopHandler"
command = []
command.append(os.path.join(config.TOOL_CIL, "bin/cilly.bat"))
command.append("--doloopHandler")
command.append("--loopHandler-detect"
if handlerMode is HandlerMode.DETECTOR
else "--loopHandler-unroll")
command.append("--loopHandler-analyze=%s" % projectConfig.func)
loopConfigFile = os.path.join(projectConfig.locationTempDir,
config.TEMP_LOOP_CONFIG)
command.append("--loopHandler-config='%s'" % loopConfigFile)
for inlineName in projectConfig.inlined:
command.append("--inline='%s'" % inlineName)
analysisFile = ("%s%s.c" % (projectConfig.locationTempNoExtension,
config.TEMP_SUFFIX_LINE_NUMS)
if handlerMode is HandlerMode.DETECTOR
else projectConfig.locationTempFile)
command.append(analysisFile)
command.append("-I'%s'" % projectConfig.locationOrigDir)
command.append("--save-temps='%s'" % projectConfig.locationTempDir)
command.append("-c")
command.append("-o")
command.append("'%s.out'" % projectConfig.locationTempNoExtension)
return command
def runDetector(projectConfig):
"""Conducts the sequence of system calls that will detect loops
for the function currently being analyzed. The output of the
detector will be placed in a loop configuration file that the
user has to modify: this file contains the line numbers of each
loop header, and the user has to specify bounds for each loops
by changing the number beside the line numbers, which is set to
1 by default.
Arguments:
projectConfig:
:class:`~gametime.projectConfiguration.ProjectConfiguration`
object that represents the configuration of a GameTime project.
Returns:
Zero if the inlining was successful; a non-zero value otherwise.
"""
command = _generateHandlerCommand(projectConfig, HandlerMode.DETECTOR)
proc = subprocess.call(command, shell=True)
return proc
def runUnroller(projectConfig):
"""Conducts the sequence of system calls that will unroll loops
in the function currently being analyzed. The output of the
detector will be a temporary file for GameTime analysis where
all of the loops have been unrolled using user-specified bounds.
Precondition: The loop detector has already been run, and the user
has already specified bounds for each loop in the loop configuration
file generated by the loop detector.
Arguments:
projectConfig:
:class:`~gametime.projectConfiguration.ProjectConfiguration`
object that represents the configuration of a GameTime project.
Returns:
Zero if the inlining was successful; a non-zero value otherwise.
"""
command = _generateHandlerCommand(projectConfig, HandlerMode.UNROLLER)
proc = subprocess.call(command, shell=True)
return proc
| 37.061538 | 79 | 0.695724 | 165 | 0.034247 | 0 | 0 | 0 | 0 | 0 | 0 | 3,057 | 0.634496 |
38d24f0b77db0580d1f6a1183215410fe0692d65 | 859 | py | Python | Python/Maths/factorielle.py | GeneralNZR/maths-and-javascript | 8a0e638e59808b1d987269dddac0b99c96c78c4a | [
"MIT"
] | 3 | 2021-10-01T06:11:28.000Z | 2021-10-04T20:50:07.000Z | Python/Maths/factorielle.py | GeneralNZR/maths-and-javascript | 8a0e638e59808b1d987269dddac0b99c96c78c4a | [
"MIT"
] | null | null | null | Python/Maths/factorielle.py | GeneralNZR/maths-and-javascript | 8a0e638e59808b1d987269dddac0b99c96c78c4a | [
"MIT"
] | null | null | null | def factorielle_rec(n: int) -> int:
"""
Description:
Factorielle méthode récursive
Paramètres:
n: {int} -- Nombre à factorielle
Retourne:
{int} -- Factorielle de n
Exemple:
>>> factorielle_rec(100)
9.332622e+157
Pour l'écriture scientifique: f"{factorielle_rec(100):e}"
"""
return 1 if n == 0 else n * factorielle_rec(n - 1)
def factorielle_it(n: int) -> int:
"""
Description:
Factorielle méthode itérative
Paramètres:
n: {int} -- Nombre à factorielle
Retourne:
{int} -- Factorielle de n
Exemple:
>>> factorielle_it(100)
9.332622e+157
Pour l'écriture scientifique: f"{factorielle_it(100):e}"
"""
result = 1
for i in range(1, n + 1):
result *= i
return result | 21.475 | 65 | 0.549476 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 650 | 0.747986 |
38d2a0a5641034d2ed2a1afa289cfad9836977ff | 8,928 | py | Python | Gui.py | LLCoolDave/ALttPEntranceRandomizer | 963ce00657321fb7eeee185fa4e8bb063bff30c5 | [
"MIT"
] | 17 | 2017-05-22T10:55:58.000Z | 2020-12-23T21:44:47.000Z | Gui.py | LLCoolDave/ALttPEntranceRandomizer | 963ce00657321fb7eeee185fa4e8bb063bff30c5 | [
"MIT"
] | 15 | 2017-05-22T12:14:55.000Z | 2019-07-19T21:00:28.000Z | Gui.py | LLCoolDave/ALttPEntranceRandomizer | 963ce00657321fb7eeee185fa4e8bb063bff30c5 | [
"MIT"
] | 15 | 2017-05-23T16:09:44.000Z | 2022-01-22T09:09:27.000Z | from Main import main, __version__ as ESVersion
from argparse import Namespace
import random
from tkinter import Checkbutton, OptionMenu, Tk, LEFT, RIGHT, BOTTOM, TOP, StringVar, IntVar, Frame, Label, W, E, Entry, Spinbox, Button, filedialog, messagebox
def guiMain(args=None):
mainWindow = Tk()
mainWindow.wm_title("Entrance Shuffle %s" % ESVersion)
topFrame = Frame(mainWindow)
rightHalfFrame = Frame(topFrame)
checkBoxFrame = Frame(rightHalfFrame)
createSpoilerVar = IntVar()
createSpoilerCheckbutton = Checkbutton(checkBoxFrame, text="Create Spoiler Log", variable=createSpoilerVar)
suppressRomVar = IntVar()
suppressRomCheckbutton = Checkbutton(checkBoxFrame, text="Do not create patched Rom", variable=suppressRomVar)
quickSwapVar = IntVar()
quickSwapCheckbutton = Checkbutton(checkBoxFrame, text="Enabled L/R Item quickswapping", variable=quickSwapVar)
dungeonItemsVar = IntVar()
dungeonItemsCheckbutton = Checkbutton(checkBoxFrame, text="Place Dungeon Items (Compasses/Maps)", onvalue=0, offvalue=1, variable=dungeonItemsVar)
beatableOnlyVar = IntVar()
beatableOnlyCheckbutton = Checkbutton(checkBoxFrame, text="Only ensure seed is beatable, not all items must be reachable", variable=beatableOnlyVar)
shuffleGanonVar = IntVar()
shuffleGanonCheckbutton = Checkbutton(checkBoxFrame, text="Include Ganon's Tower and Pyramid Hole in shuffle pool", variable=shuffleGanonVar)
createSpoilerCheckbutton.pack(expand=True, anchor=W)
suppressRomCheckbutton.pack(expand=True, anchor=W)
quickSwapCheckbutton.pack(expand=True, anchor=W)
dungeonItemsCheckbutton.pack(expand=True, anchor=W)
beatableOnlyCheckbutton.pack(expand=True, anchor=W)
shuffleGanonCheckbutton.pack(expand=True, anchor=W)
fileDialogFrame = Frame(rightHalfFrame)
romDialogFrame = Frame(fileDialogFrame)
baseRomLabel = Label(romDialogFrame, text='Base Rom')
romVar = StringVar()
romEntry = Entry(romDialogFrame, textvariable=romVar)
def RomSelect():
rom = filedialog.askopenfilename()
romVar.set(rom)
romSelectButton = Button(romDialogFrame, text='Select Rom', command=RomSelect)
baseRomLabel.pack(side=LEFT)
romEntry.pack(side=LEFT)
romSelectButton.pack(side=LEFT)
spriteDialogFrame = Frame(fileDialogFrame)
baseSpriteLabel = Label(spriteDialogFrame, text='Link Sprite')
spriteVar = StringVar()
spriteEntry = Entry(spriteDialogFrame, textvariable=spriteVar)
def SpriteSelect():
sprite = filedialog.askopenfilename()
spriteVar.set(sprite)
spriteSelectButton = Button(spriteDialogFrame, text='Select Sprite', command=SpriteSelect)
baseSpriteLabel.pack(side=LEFT)
spriteEntry.pack(side=LEFT)
spriteSelectButton.pack(side=LEFT)
romDialogFrame.pack()
spriteDialogFrame.pack()
checkBoxFrame.pack()
fileDialogFrame.pack()
drowDownFrame = Frame(topFrame)
modeFrame = Frame(drowDownFrame)
modeVar = StringVar()
modeVar.set('open')
modeOptionMenu = OptionMenu(modeFrame, modeVar, 'standard', 'open', 'swordless')
modeOptionMenu.pack(side=RIGHT)
modeLabel = Label(modeFrame, text='Game Mode')
modeLabel.pack(side=LEFT)
logicFrame = Frame(drowDownFrame)
logicVar = StringVar()
logicVar.set('noglitches')
logicOptionMenu = OptionMenu(logicFrame, logicVar, 'noglitches', 'minorglitches')
logicOptionMenu.pack(side=RIGHT)
logicLabel = Label(logicFrame, text='Game logic')
logicLabel.pack(side=LEFT)
goalFrame = Frame(drowDownFrame)
goalVar = StringVar()
goalVar.set('ganon')
goalOptionMenu = OptionMenu(goalFrame, goalVar, 'ganon', 'pedestal', 'dungeons', 'triforcehunt', 'crystals')
goalOptionMenu.pack(side=RIGHT)
goalLabel = Label(goalFrame, text='Game goal')
goalLabel.pack(side=LEFT)
difficultyFrame = Frame(drowDownFrame)
difficultyVar = StringVar()
difficultyVar.set('normal')
difficultyOptionMenu = OptionMenu(difficultyFrame, difficultyVar, 'normal', 'timed', 'timed-ohko', 'timed-countdown')
difficultyOptionMenu.pack(side=RIGHT)
difficultyLabel = Label(difficultyFrame, text='Game difficulty')
difficultyLabel.pack(side=LEFT)
algorithmFrame = Frame(drowDownFrame)
algorithmVar = StringVar()
algorithmVar.set('vt25')
algorithmOptionMenu = OptionMenu(algorithmFrame, algorithmVar, 'freshness', 'flood', 'vt21', 'vt22', 'vt25')
algorithmOptionMenu.pack(side=RIGHT)
algorithmLabel = Label(algorithmFrame, text='Item distribution algorithm')
algorithmLabel.pack(side=LEFT)
shuffleFrame = Frame(drowDownFrame)
shuffleVar = StringVar()
shuffleVar.set('full')
shuffleOptionMenu = OptionMenu(shuffleFrame, shuffleVar, 'vanilla', 'simple', 'restricted', 'full', 'madness', 'insanity', 'dungeonsfull', 'dungeonssimple')
shuffleOptionMenu.pack(side=RIGHT)
shuffleLabel = Label(shuffleFrame, text='Entrance shuffle algorithm')
shuffleLabel.pack(side=LEFT)
heartbeepFrame = Frame(drowDownFrame)
heartbeepVar = StringVar()
heartbeepVar.set('normal')
heartbeepOptionMenu = OptionMenu(heartbeepFrame, heartbeepVar, 'normal', 'half', 'quarter', 'off')
heartbeepOptionMenu.pack(side=RIGHT)
heartbeepLabel = Label(heartbeepFrame, text='Heartbeep sound rate')
heartbeepLabel.pack(side=LEFT)
modeFrame.pack(expand=True, anchor=E)
logicFrame.pack(expand=True, anchor=E)
goalFrame.pack(expand=True, anchor=E)
difficultyFrame.pack(expand=True, anchor=E)
algorithmFrame.pack(expand=True, anchor=E)
shuffleFrame.pack(expand=True, anchor=E)
heartbeepFrame.pack(expand=True, anchor=E)
bottomFrame = Frame(mainWindow)
seedLabel = Label(bottomFrame, text='Seed #')
seedVar = StringVar()
seedEntry = Entry(bottomFrame, textvariable=seedVar)
countLabel = Label(bottomFrame, text='Count')
countVar = StringVar()
countSpinbox = Spinbox(bottomFrame, from_=1, to=100, textvariable=countVar)
def generateRom():
guiargs = Namespace
guiargs.seed = int(seedVar.get()) if seedVar.get() else None
guiargs.count = int(countVar.get()) if countVar.get() != '1' else None
guiargs.mode = modeVar.get()
guiargs.logic = logicVar.get()
guiargs.goal = goalVar.get()
guiargs.difficulty = difficultyVar.get()
guiargs.algorithm = algorithmVar.get()
guiargs.shuffle = shuffleVar.get()
guiargs.heartbeep = heartbeepVar.get()
guiargs.create_spoiler = bool(createSpoilerVar.get())
guiargs.suppress_rom = bool(suppressRomVar.get())
guiargs.nodungeonitems = bool(dungeonItemsVar.get())
guiargs.beatableonly = bool(beatableOnlyVar.get())
guiargs.quickswap = bool(quickSwapVar.get())
guiargs.shuffleganon = bool(shuffleGanonVar.get())
guiargs.rom = romVar.get()
guiargs.jsonout = None
guiargs.sprite = spriteVar.get() if spriteVar.get() else None
try:
if guiargs.count is not None:
seed = guiargs.seed
for i in range(guiargs.count):
main(seed=seed, args=guiargs)
seed = random.randint(0, 999999999)
else:
main(seed=guiargs.seed, args=guiargs)
except Exception as e:
messagebox.showerror(title="Error while creating seed", message=str(e))
else:
messagebox.showinfo(title="Success", message="Rom patched successfully")
generateButton = Button(bottomFrame, text='Generate Patched Rom', command=generateRom)
seedLabel.pack(side=LEFT)
seedEntry.pack(side=LEFT)
countLabel.pack(side=LEFT)
countSpinbox.pack(side=LEFT)
generateButton.pack(side=LEFT)
drowDownFrame.pack(side=LEFT)
rightHalfFrame.pack(side=RIGHT)
topFrame.pack(side=TOP)
bottomFrame.pack(side=BOTTOM)
if args is not None:
# load values from commandline args
createSpoilerVar.set(int(args.create_spoiler))
suppressRomVar.set(int(args.suppress_rom))
if args.nodungeonitems:
dungeonItemsVar.set(int(not args.nodungeonitems))
beatableOnlyVar.set(int(args.beatableonly))
quickSwapVar.set(int(args.quickswap))
if args.count:
countVar.set(str(args.count))
if args.seed:
seedVar.set(str(args.seed))
modeVar.set(args.mode)
difficultyVar.set(args.difficulty)
goalVar.set(args.goal)
algorithmVar.set(args.algorithm)
shuffleVar.set(args.shuffle)
heartbeepVar.set(args.heartbeep)
logicVar.set(args.logic)
romVar.set(args.rom)
shuffleGanonVar.set(args.shuffleganon)
if args.sprite is not None:
spriteVar.set(args.sprite)
mainWindow.mainloop()
if __name__ == '__main__':
guiMain()
| 40.216216 | 160 | 0.701053 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 934 | 0.104615 |
38d2f4460dbc9cd2bf0ff51a444458ddda0cea71 | 11,293 | py | Python | src/dmri/coregister.py | erramuzpe/ruber | cf510a4cf9b0b15d870b6506a1593c3b2b00a3b7 | [
"MIT"
] | 2 | 2018-11-07T07:54:34.000Z | 2022-01-13T13:06:06.000Z | src/dmri/coregister.py | erramuzpe/ruber | cf510a4cf9b0b15d870b6506a1593c3b2b00a3b7 | [
"MIT"
] | null | null | null | src/dmri/coregister.py | erramuzpe/ruber | cf510a4cf9b0b15d870b6506a1593c3b2b00a3b7 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Nipype workflows to co-register anatomical MRI to diffusion MRI.
"""
from src.env import DATA
import nipype.pipeline.engine as pe
from nipype.interfaces.fsl import MultiImageMaths
from nipype.interfaces.utility import IdentityInterface, Select, Split
from nipype.algorithms.misc import Gunzip
from nipype.interfaces.io import SelectFiles, DataSink
from .._utils import flatten_list
from ..preproc import spm_coregister
from os.path import join as opj
import os.path as op
from .artifacts import run_dti_artifact_correction
from .._utils import format_pair_list
# from ..config import check_atlas_file
from ..utils import (get_datasink,
get_input_node,
get_interface_node,
remove_ext,
extend_trait_list,
get_input_file_name,
extension_duplicates,
)
def spm_anat_to_diff_coregistration(wf_name="spm_anat_to_diff_coregistration"):
""" Co-register the anatomical image and other images in anatomical space to
the average B0 image.
This estimates an affine transform from anat to diff space, applies it to
the brain mask and an atlas.
Nipype Inputs
-------------
dti_co_input.avg_b0: traits.File
path to the average B0 image from the diffusion MRI.
This image should come from a motion and Eddy currents
corrected diffusion image.
dti_co_input.anat: traits.File
path to the high-contrast anatomical image.
dti_co_input.tissues: traits.File
paths to the NewSegment c*.nii output files, in anatomical space
dti_co_input.atlas_anat: traits.File
Atlas in subject anatomical space.
Nipype Outputs
--------------
dti_co_output.anat_diff: traits.File
Anatomical image in diffusion space.
dti_co_output.tissues_diff: traits.File
Tissues images in diffusion space.
dti_co_output.brain_mask_diff: traits.File
Brain mask for diffusion image.
dti_co_output.atlas_diff: traits.File
Atlas image warped to diffusion space.
If the `atlas_file` option is an existing file and `normalize_atlas` is True.
Nipype Workflow Dependencies
----------------------------
This workflow depends on:
- spm_anat_preproc
Returns
-------
wf: nipype Workflow
"""
# specify input and output fields
in_fields = ["avg_b0", "brain_mask", "anat", "atlas_2514", "atlas_2754"]
out_fields = ["anat_diff",
"brain_mask_diff",
"atlas_2514_diff",
"atlas_2754_diff",
]
gunzip_atlas_2514 = pe.Node(Gunzip(), name="gunzip_atlas_2514")
gunzip_atlas_2754 = pe.Node(Gunzip(), name="gunzip_atlas_2754")
gunzip_anat = pe.Node(Gunzip(), name="gunzip_anat")
gunzip_brain_mask = pe.Node(Gunzip(), name="brain_mask")
coreg_atlas_2514 = pe.Node(spm_coregister(cost_function="mi"), name="coreg_atlas_2514")
# set the registration interpolation to nearest neighbour.
coreg_atlas_2514.inputs.write_interp = 0
coreg_atlas_2754 = pe.Node(spm_coregister(cost_function="mi"), name="coreg_atlas_2754")
# set the registration interpolation to nearest neighbour.
coreg_atlas_2754.inputs.write_interp = 0
# input interface
dti_input = pe.Node(IdentityInterface(fields=in_fields, mandatory_inputs=True),
name="dti_co_input")
gunzip_b0 = pe.Node(Gunzip(), name="gunzip_b0")
coreg_b0 = pe.Node(spm_coregister(cost_function="mi"), name="coreg_b0")
# co-registration
coreg_brain = pe.Node(spm_coregister(cost_function="mi"), name="coreg_brain")
# set the registration interpolation to nearest neighbour.
coreg_brain.inputs.write_interp = 0
# output interface
dti_output = pe.Node(IdentityInterface(fields=out_fields),
name="dti_co_output")
# Create the workflow object
wf = pe.Workflow(name=wf_name)
# Connect the nodes
wf.connect([(dti_input, gunzip_atlas_2514, [("atlas_2514", "in_file")]),
(dti_input, gunzip_atlas_2754, [("atlas_2754", "in_file")]),
(dti_input, gunzip_anat , [("anat", "in_file")]),
(dti_input, gunzip_b0, [("avg_b0", "in_file")]),
(dti_input, gunzip_brain_mask, [("brain_mask", "in_file")]),
# co-registration
# some of this code is not needed
(gunzip_b0, coreg_b0, [("out_file", "target")]),
(gunzip_brain_mask, coreg_b0, [("out_file", "apply_to_files")]),
(gunzip_anat, coreg_b0, [("out_file", "source")]),
(gunzip_b0, coreg_atlas_2514, [("out_file", "target")]),
(gunzip_atlas_2514, coreg_atlas_2514, [("out_file", "apply_to_files")]),
(gunzip_anat, coreg_atlas_2514, [("out_file", "source"), ]),
(gunzip_b0, coreg_atlas_2754, [("out_file", "target")]),
(gunzip_atlas_2754, coreg_atlas_2754, [("out_file", "apply_to_files")]),
(gunzip_anat, coreg_atlas_2754, [("out_file", "source"), ]),
(gunzip_b0, coreg_brain, [("out_file", "target")]),
(gunzip_brain_mask, coreg_brain, [("out_file", "apply_to_files")]),
(gunzip_anat, coreg_brain, [("out_file", "source"), ]),
# output
(coreg_atlas_2514, dti_output, [("coregistered_files", "atlas_2514_diff")]),
(coreg_atlas_2754, dti_output, [("coregistered_files", "atlas_2754_diff")]),
(coreg_b0, dti_output, [("coregistered_source", "anat_diff")]),
(coreg_brain, dti_output, [("coregistered_files", "brain_mask_diff")]),
])
return wf
def run_spm_fsl_dti_preprocessing(subject_list, session_list):
""" Attach a set of pipelines to the `main_wf` for Diffusion MR (`diff`) image processing.
- dti_artifact_correction
- spm_anat_to_diff_coregistration
- dti_tensor_fitting
Parameters
----------
main_wf: nipype Workflow
wf_name: str
Name of the preprocessing workflow
params: dict with parameter values
atlas_file: str
Path to the anatomical atlas to be transformed to diffusion MRI space.
Nipype Inputs for `main_wf`
---------------------------
Note: The `main_wf` workflow is expected to have an `input_files` and a `datasink` nodes.
input_files.select.diff: input node
datasink: nipype Node
Returns
-------
main_wf: nipype Workflow
"""
# name of output folder
output_dir = opj(DATA, 'processed')
working_dir = opj(DATA, 'interim')
# Infosource - a function free node to iterate over the list of subject names
infosource = pe.Node(IdentityInterface(fields=['subject_id',
'session_id']),
name="infosource")
infosource.iterables = [('subject_id', subject_list),
('session_id', session_list)]
# SelectFiles
templates = {'avg_b0': 'processed/diff/_session_id_{session_id}_subject_id_{subject_id}/eddy_corrected_avg_b0.nii.gz',
'brain_mask': 'processed/fmriprep/{subject_id}/{session_id}/anat/{subject_id}_{session_id}_T1w_brainmask.nii.gz',
'anat_biascorr': 'processed/fmriprep/{subject_id}/{session_id}/anat/{subject_id}_{session_id}_T1w_brain.nii.gz',
'atlas_2514': 'processed/fmriprep/{subject_id}/{session_id}/anat/{subject_id}_{session_id}_atlas_2514.nii.gz',
'atlas_2754': 'processed/fmriprep/{subject_id}/{session_id}/anat/{subject_id}_{session_id}_atlas_2754.nii.gz',
}
selectfiles = pe.Node(SelectFiles(templates,
base_directory=DATA),
name="selectfiles")
# Datasink
datasink = pe.Node(DataSink(base_directory=DATA,
container=output_dir),
name="datasink")
# The workflow boxes
coreg_dti_wf = spm_anat_to_diff_coregistration()
# dataSink output substitutions
## The base name of the 'diff' file for the substitutions
# diff_fbasename = remove_ext(op.basename(get_input_file_name(selectfiles, 'avg_b0')))
# anat_fbasename = remove_ext(op.basename(get_input_file_name(selectfiles, 'anat_biascorr')))
#
# regexp_subst = [
# (r"/brain_mask_{diff}_space\.nii$", "/brain_mask.nii"),
# (r"/eddy_corrected\.nii$", "/{diff}_eddycor.nii"),
# (r"/rc1anat_hc_corrected\.nii$", "/gm_diff.nii"),
# (r"/rc2anat_hc_corrected\.nii$", "/wm_diff.nii"),
# (r"/rc3anat_hc_corrected\.nii$", "/csf_diff.nii"),
# (r"/rmanat_hc_corrected\.nii$", "/{anat}_diff.nii"),
# ]
# regexp_subst = format_pair_list(regexp_subst, diff=diff_fbasename,
# anat=anat_fbasename)
#
# # prepare substitution for atlas_file
#
# atlas_basename = remove_ext(op.basename(get_input_file_name(selectfiles, 'atlas_anat')))
# regexp_subst.extend([
# (r"/[\w]*{atlas}.*\.nii$", "/{atlas}_{diff}_space.nii"),
# ])
# regexp_subst = format_pair_list(regexp_subst, atlas=atlas_basename,
# diff=diff_fbasename)
#
#
# regexp_subst += extension_duplicates(regexp_subst)
# datasink.inputs.regexp_substitutions = extend_trait_list(datasink.inputs.regexp_substitutions,
# regexp_subst)
wf = pe.Workflow(name='artifact')
wf.base_dir = working_dir
# input and output diffusion MRI workflow to main workflow connections
wf.connect([(infosource, selectfiles, [('subject_id', 'subject_id'),
('session_id', 'session_id')]),
(selectfiles, coreg_dti_wf, [("avg_b0", "dti_co_input.avg_b0"),]),
(selectfiles, coreg_dti_wf, [("brain_mask", "dti_co_input.brain_mask"),
("anat_biascorr", "dti_co_input.anat")
]),
(selectfiles, coreg_dti_wf, [("atlas_2514", "dti_co_input.atlas_2514")]),
(selectfiles, coreg_dti_wf, [("atlas_2754", "dti_co_input.atlas_2754")]),
(coreg_dti_wf, datasink, [("dti_co_output.atlas_2514_diff", "diff.@atlas_2514")]),
(coreg_dti_wf, datasink, [("dti_co_output.atlas_2754_diff", "diff.@atlas_2754")]),
(coreg_dti_wf, datasink, [("dti_co_output.anat_diff", "diff.@anat_diff"),
("dti_co_output.brain_mask_diff", "diff.@brain_mask"),
]),
])
wf.run()
return
| 41.981413 | 130 | 0.595679 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6,262 | 0.554503 |
38d36df65b3ed1c6bdcd5d1855bfdf3aac9db033 | 14,536 | py | Python | huaweicloud-sdk-bssintl/huaweicloudsdkbssintl/v2/model/apply_individual_realname_auths_req.py | huaweicloud/huaweicloud-sdk-python-v3 | 7a6270390fcbf192b3882bf763e7016e6026ef78 | [
"Apache-2.0"
] | 64 | 2020-06-12T07:05:07.000Z | 2022-03-30T03:32:50.000Z | huaweicloud-sdk-bssintl/huaweicloudsdkbssintl/v2/model/apply_individual_realname_auths_req.py | huaweicloud/huaweicloud-sdk-python-v3 | 7a6270390fcbf192b3882bf763e7016e6026ef78 | [
"Apache-2.0"
] | 11 | 2020-07-06T07:56:54.000Z | 2022-01-11T11:14:40.000Z | huaweicloud-sdk-bssintl/huaweicloudsdkbssintl/v2/model/apply_individual_realname_auths_req.py | huaweicloud/huaweicloud-sdk-python-v3 | 7a6270390fcbf192b3882bf763e7016e6026ef78 | [
"Apache-2.0"
] | 24 | 2020-06-08T11:42:13.000Z | 2022-03-04T06:44:08.000Z | # coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ApplyIndividualRealnameAuthsReq:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'customer_id': 'str',
'identify_type': 'int',
'verified_type': 'int',
'verified_file_url': 'list[str]',
'name': 'str',
'verified_number': 'str',
'change_type': 'int',
'xaccount_type': 'str',
'bank_card_info': 'BankCardInfoV2'
}
attribute_map = {
'customer_id': 'customer_id',
'identify_type': 'identify_type',
'verified_type': 'verified_type',
'verified_file_url': 'verified_file_url',
'name': 'name',
'verified_number': 'verified_number',
'change_type': 'change_type',
'xaccount_type': 'xaccount_type',
'bank_card_info': 'bank_card_info'
}
def __init__(self, customer_id=None, identify_type=None, verified_type=None, verified_file_url=None, name=None, verified_number=None, change_type=None, xaccount_type=None, bank_card_info=None):
"""ApplyIndividualRealnameAuthsReq - a model defined in huaweicloud sdk"""
self._customer_id = None
self._identify_type = None
self._verified_type = None
self._verified_file_url = None
self._name = None
self._verified_number = None
self._change_type = None
self._xaccount_type = None
self._bank_card_info = None
self.discriminator = None
self.customer_id = customer_id
self.identify_type = identify_type
if verified_type is not None:
self.verified_type = verified_type
self.verified_file_url = verified_file_url
self.name = name
self.verified_number = verified_number
if change_type is not None:
self.change_type = change_type
self.xaccount_type = xaccount_type
if bank_card_info is not None:
self.bank_card_info = bank_card_info
@property
def customer_id(self):
"""Gets the customer_id of this ApplyIndividualRealnameAuthsReq.
|参数名称:客户ID。| |参数约束及描述:客户ID。|
:return: The customer_id of this ApplyIndividualRealnameAuthsReq.
:rtype: str
"""
return self._customer_id
@customer_id.setter
def customer_id(self, customer_id):
"""Sets the customer_id of this ApplyIndividualRealnameAuthsReq.
|参数名称:客户ID。| |参数约束及描述:客户ID。|
:param customer_id: The customer_id of this ApplyIndividualRealnameAuthsReq.
:type: str
"""
self._customer_id = customer_id
@property
def identify_type(self):
"""Gets the identify_type of this ApplyIndividualRealnameAuthsReq.
|参数名称:认证方案:0:个人证件认证4:个人银行卡认证。这种方式下,仅仅需要上传一张个人扫脸的图片附件即可。| |参数的约束及描述:认证方案:0:个人证件认证4:个人银行卡认证。这种方式下,仅仅需要上传一张个人扫脸的图片附件即可。|
:return: The identify_type of this ApplyIndividualRealnameAuthsReq.
:rtype: int
"""
return self._identify_type
@identify_type.setter
def identify_type(self, identify_type):
"""Sets the identify_type of this ApplyIndividualRealnameAuthsReq.
|参数名称:认证方案:0:个人证件认证4:个人银行卡认证。这种方式下,仅仅需要上传一张个人扫脸的图片附件即可。| |参数的约束及描述:认证方案:0:个人证件认证4:个人银行卡认证。这种方式下,仅仅需要上传一张个人扫脸的图片附件即可。|
:param identify_type: The identify_type of this ApplyIndividualRealnameAuthsReq.
:type: int
"""
self._identify_type = identify_type
@property
def verified_type(self):
"""Gets the verified_type of this ApplyIndividualRealnameAuthsReq.
|参数名称:证件类型:0:身份证,上传的附件为3张,第1张是身份证人像面,第2张是身份证国徽面,第3张是个人手持身份证人像面;3:护照,上传的附件为3张,第1张是护照个人资料页,第2张是,护照入境盖章页,第3张是手持护照个人资料页;3:护照,上传的附件为2张,第1张是护照个人资料页,第2张是手持护照个人资料页;5:港澳通行证,上传的附件为3张,第1张是港澳居民来往内地通行证正面(人像面),第2张是港澳居民来往内地通行证反面,第3张是手持港澳居民来往内地通行证人像面;6:台湾通行证,上传的附件为3张,第1张是台湾居民来往大陆通行证正面(人像面),第2张是台湾居民来往大陆通行证反面,第3张是手持台湾居民来往大陆通行证人像面;7:海外驾照,上传的附件为2张,第1张是中国以外驾照正面照片(人像面),第2张是手持中国以外驾照人像面照片;9:港澳居民居住证,上传的附件为3张,第1张是港澳居民居住证人像面,第2张是,港澳居民居住证国徽面,第3张是手持港澳居民居住证人像面照片;10:台湾居民居住证,上传的附件为3张,第1张是台湾居民居住证人像面,第2张是台湾居民居住证国徽面,第3张是手持台湾居民居住证人像面照片。当identifyType=0的时候,该字段需要填写,否则忽略该字段的取值。| |参数的约束及描述:证件类型:0:身份证,上传的附件为3张,第1张是身份证人像面,第2张是身份证国徽面,第3张是个人手持身份证人像面;3:护照,上传的附件为3张,第1张是护照个人资料页,第2张是,护照入境盖章页,第3张是手持护照个人资料页;3:护照,上传的附件为2张,第1张是护照个人资料页,第2张是手持护照个人资料页;5:港澳通行证,上传的附件为3张,第1张是港澳居民来往内地通行证正面(人像面),第2张是港澳居民来往内地通行证反面,第3张是手持港澳居民来往内地通行证人像面;6:台湾通行证,上传的附件为3张,第1张是台湾居民来往大陆通行证正面(人像面),第2张是台湾居民来往大陆通行证反面,第3张是手持台湾居民来往大陆通行证人像面;7:海外驾照,上传的附件为2张,第1张是中国以外驾照正面照片(人像面),第2张是手持中国以外驾照人像面照片;9:港澳居民居住证,上传的附件为3张,第1张是港澳居民居住证人像面,第2张是,港澳居民居住证国徽面,第3张是手持港澳居民居住证人像面照片;10:台湾居民居住证,上传的附件为3张,第1张是台湾居民居住证人像面,第2张是台湾居民居住证国徽面,第3张是手持台湾居民居住证人像面照片。当identifyType=0的时候,该字段需要填写,否则忽略该字段的取值。|
:return: The verified_type of this ApplyIndividualRealnameAuthsReq.
:rtype: int
"""
return self._verified_type
@verified_type.setter
def verified_type(self, verified_type):
"""Sets the verified_type of this ApplyIndividualRealnameAuthsReq.
|参数名称:证件类型:0:身份证,上传的附件为3张,第1张是身份证人像面,第2张是身份证国徽面,第3张是个人手持身份证人像面;3:护照,上传的附件为3张,第1张是护照个人资料页,第2张是,护照入境盖章页,第3张是手持护照个人资料页;3:护照,上传的附件为2张,第1张是护照个人资料页,第2张是手持护照个人资料页;5:港澳通行证,上传的附件为3张,第1张是港澳居民来往内地通行证正面(人像面),第2张是港澳居民来往内地通行证反面,第3张是手持港澳居民来往内地通行证人像面;6:台湾通行证,上传的附件为3张,第1张是台湾居民来往大陆通行证正面(人像面),第2张是台湾居民来往大陆通行证反面,第3张是手持台湾居民来往大陆通行证人像面;7:海外驾照,上传的附件为2张,第1张是中国以外驾照正面照片(人像面),第2张是手持中国以外驾照人像面照片;9:港澳居民居住证,上传的附件为3张,第1张是港澳居民居住证人像面,第2张是,港澳居民居住证国徽面,第3张是手持港澳居民居住证人像面照片;10:台湾居民居住证,上传的附件为3张,第1张是台湾居民居住证人像面,第2张是台湾居民居住证国徽面,第3张是手持台湾居民居住证人像面照片。当identifyType=0的时候,该字段需要填写,否则忽略该字段的取值。| |参数的约束及描述:证件类型:0:身份证,上传的附件为3张,第1张是身份证人像面,第2张是身份证国徽面,第3张是个人手持身份证人像面;3:护照,上传的附件为3张,第1张是护照个人资料页,第2张是,护照入境盖章页,第3张是手持护照个人资料页;3:护照,上传的附件为2张,第1张是护照个人资料页,第2张是手持护照个人资料页;5:港澳通行证,上传的附件为3张,第1张是港澳居民来往内地通行证正面(人像面),第2张是港澳居民来往内地通行证反面,第3张是手持港澳居民来往内地通行证人像面;6:台湾通行证,上传的附件为3张,第1张是台湾居民来往大陆通行证正面(人像面),第2张是台湾居民来往大陆通行证反面,第3张是手持台湾居民来往大陆通行证人像面;7:海外驾照,上传的附件为2张,第1张是中国以外驾照正面照片(人像面),第2张是手持中国以外驾照人像面照片;9:港澳居民居住证,上传的附件为3张,第1张是港澳居民居住证人像面,第2张是,港澳居民居住证国徽面,第3张是手持港澳居民居住证人像面照片;10:台湾居民居住证,上传的附件为3张,第1张是台湾居民居住证人像面,第2张是台湾居民居住证国徽面,第3张是手持台湾居民居住证人像面照片。当identifyType=0的时候,该字段需要填写,否则忽略该字段的取值。|
:param verified_type: The verified_type of this ApplyIndividualRealnameAuthsReq.
:type: int
"""
self._verified_type = verified_type
@property
def verified_file_url(self):
"""Gets the verified_file_url of this ApplyIndividualRealnameAuthsReq.
|参数名称:个人证件认证时证件附件的文件URL,该URL地址必须按照顺序填写。以身份证举例,譬如身份证人像面文件名称是abc023,国徽面是def004,个人手持身份证人像面是gh007,那么这个地方需要按照abc023def004gh007的顺序填写URL(文件名称区分大小写)。以护照举例,譬如护照个人资料页文件名称是abc023,手持护照个人资料页是def004,那么这个地方需要按照abc023def004的顺序填写URL(文件名称区分大小写)。证件附件目前仅仅支持jpg、jpeg、bmp、png、gif、pdf格式,单个文件最大不超过10M。这个URL是相对URL,不需要包含桶名和download目录,只要包含download目录下的子目录和对应文件名称即可。举例如下:如果上传的证件附件在桶中的位置是:https://bucketname.obs.Endpoint.myhuaweicloud.com/download/abc023.jpg,该字段填写abc023.jpg;如果上传的证件附件在桶中的位置是:https://bucketname.obs.Endpoint.myhuaweicloud.com/download/test/abc023.jpg,该字段填写test/abc023.jpg。| |参数约束以及描述:个人证件认证时证件附件的文件URL,该URL地址必须按照顺序填写。以身份证举例,譬如身份证人像面文件名称是abc023,国徽面是def004,个人手持身份证人像面是gh007,那么这个地方需要按照abc023def004gh007的顺序填写URL(文件名称区分大小写)。以护照举例,譬如护照个人资料页文件名称是abc023,手持护照个人资料页是def004,那么这个地方需要按照abc023def004的顺序填写URL(文件名称区分大小写)。证件附件目前仅仅支持jpg、jpeg、bmp、png、gif、pdf格式,单个文件最大不超过10M。这个URL是相对URL,不需要包含桶名和download目录,只要包含download目录下的子目录和对应文件名称即可。举例如下:如果上传的证件附件在桶中的位置是:https://bucketname.obs.Endpoint.myhuaweicloud.com/download/abc023.jpg,该字段填写abc023.jpg;如果上传的证件附件在桶中的位置是:https://bucketname.obs.Endpoint.myhuaweicloud.com/download/test/abc023.jpg,该字段填写test/abc023.jpg。|
:return: The verified_file_url of this ApplyIndividualRealnameAuthsReq.
:rtype: list[str]
"""
return self._verified_file_url
@verified_file_url.setter
def verified_file_url(self, verified_file_url):
"""Sets the verified_file_url of this ApplyIndividualRealnameAuthsReq.
|参数名称:个人证件认证时证件附件的文件URL,该URL地址必须按照顺序填写。以身份证举例,譬如身份证人像面文件名称是abc023,国徽面是def004,个人手持身份证人像面是gh007,那么这个地方需要按照abc023def004gh007的顺序填写URL(文件名称区分大小写)。以护照举例,譬如护照个人资料页文件名称是abc023,手持护照个人资料页是def004,那么这个地方需要按照abc023def004的顺序填写URL(文件名称区分大小写)。证件附件目前仅仅支持jpg、jpeg、bmp、png、gif、pdf格式,单个文件最大不超过10M。这个URL是相对URL,不需要包含桶名和download目录,只要包含download目录下的子目录和对应文件名称即可。举例如下:如果上传的证件附件在桶中的位置是:https://bucketname.obs.Endpoint.myhuaweicloud.com/download/abc023.jpg,该字段填写abc023.jpg;如果上传的证件附件在桶中的位置是:https://bucketname.obs.Endpoint.myhuaweicloud.com/download/test/abc023.jpg,该字段填写test/abc023.jpg。| |参数约束以及描述:个人证件认证时证件附件的文件URL,该URL地址必须按照顺序填写。以身份证举例,譬如身份证人像面文件名称是abc023,国徽面是def004,个人手持身份证人像面是gh007,那么这个地方需要按照abc023def004gh007的顺序填写URL(文件名称区分大小写)。以护照举例,譬如护照个人资料页文件名称是abc023,手持护照个人资料页是def004,那么这个地方需要按照abc023def004的顺序填写URL(文件名称区分大小写)。证件附件目前仅仅支持jpg、jpeg、bmp、png、gif、pdf格式,单个文件最大不超过10M。这个URL是相对URL,不需要包含桶名和download目录,只要包含download目录下的子目录和对应文件名称即可。举例如下:如果上传的证件附件在桶中的位置是:https://bucketname.obs.Endpoint.myhuaweicloud.com/download/abc023.jpg,该字段填写abc023.jpg;如果上传的证件附件在桶中的位置是:https://bucketname.obs.Endpoint.myhuaweicloud.com/download/test/abc023.jpg,该字段填写test/abc023.jpg。|
:param verified_file_url: The verified_file_url of this ApplyIndividualRealnameAuthsReq.
:type: list[str]
"""
self._verified_file_url = verified_file_url
@property
def name(self):
"""Gets the name of this ApplyIndividualRealnameAuthsReq.
|参数名称:姓名。| |参数约束及描述:姓名。|
:return: The name of this ApplyIndividualRealnameAuthsReq.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this ApplyIndividualRealnameAuthsReq.
|参数名称:姓名。| |参数约束及描述:姓名。|
:param name: The name of this ApplyIndividualRealnameAuthsReq.
:type: str
"""
self._name = name
@property
def verified_number(self):
"""Gets the verified_number of this ApplyIndividualRealnameAuthsReq.
|参数名称:证件号码。| |参数约束及描述:证件号码。|
:return: The verified_number of this ApplyIndividualRealnameAuthsReq.
:rtype: str
"""
return self._verified_number
@verified_number.setter
def verified_number(self, verified_number):
"""Sets the verified_number of this ApplyIndividualRealnameAuthsReq.
|参数名称:证件号码。| |参数约束及描述:证件号码。|
:param verified_number: The verified_number of this ApplyIndividualRealnameAuthsReq.
:type: str
"""
self._verified_number = verified_number
@property
def change_type(self):
"""Gets the change_type of this ApplyIndividualRealnameAuthsReq.
|参数名称:变更类型:-1:首次实名认证| |参数的约束及描述:变更类型:-1:首次实名认证|
:return: The change_type of this ApplyIndividualRealnameAuthsReq.
:rtype: int
"""
return self._change_type
@change_type.setter
def change_type(self, change_type):
"""Sets the change_type of this ApplyIndividualRealnameAuthsReq.
|参数名称:变更类型:-1:首次实名认证| |参数的约束及描述:变更类型:-1:首次实名认证|
:param change_type: The change_type of this ApplyIndividualRealnameAuthsReq.
:type: int
"""
self._change_type = change_type
@property
def xaccount_type(self):
"""Gets the xaccount_type of this ApplyIndividualRealnameAuthsReq.
|参数名称:华为分给合作伙伴的平台标识。该标识的具体值由华为分配。获取方法请参见如何获取xaccountType的取值如何获取xaccountType的取值。| |参数约束及描述:华为分给合作伙伴的平台标识。该标识的具体值由华为分配。获取方法请参见如何获取xaccountType的取值如何获取xaccountType的取值。|
:return: The xaccount_type of this ApplyIndividualRealnameAuthsReq.
:rtype: str
"""
return self._xaccount_type
@xaccount_type.setter
def xaccount_type(self, xaccount_type):
"""Sets the xaccount_type of this ApplyIndividualRealnameAuthsReq.
|参数名称:华为分给合作伙伴的平台标识。该标识的具体值由华为分配。获取方法请参见如何获取xaccountType的取值如何获取xaccountType的取值。| |参数约束及描述:华为分给合作伙伴的平台标识。该标识的具体值由华为分配。获取方法请参见如何获取xaccountType的取值如何获取xaccountType的取值。|
:param xaccount_type: The xaccount_type of this ApplyIndividualRealnameAuthsReq.
:type: str
"""
self._xaccount_type = xaccount_type
@property
def bank_card_info(self):
"""Gets the bank_card_info of this ApplyIndividualRealnameAuthsReq.
:return: The bank_card_info of this ApplyIndividualRealnameAuthsReq.
:rtype: BankCardInfoV2
"""
return self._bank_card_info
@bank_card_info.setter
def bank_card_info(self, bank_card_info):
"""Sets the bank_card_info of this ApplyIndividualRealnameAuthsReq.
:param bank_card_info: The bank_card_info of this ApplyIndividualRealnameAuthsReq.
:type: BankCardInfoV2
"""
self._bank_card_info = bank_card_info
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ApplyIndividualRealnameAuthsReq):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 45.003096 | 1,131 | 0.717735 | 21,841 | 0.994581 | 0 | 0 | 17,858 | 0.813206 | 0 | 0 | 17,148 | 0.780874 |
38d42eb30da6ce6e341aef340a29b43528065a7b | 626 | py | Python | tests/test_pack_data.py | derekmerck/endpoint | 5b74f0b3303bbf419a6c9f71e9a4a156583bf51d | [
"MIT"
] | null | null | null | tests/test_pack_data.py | derekmerck/endpoint | 5b74f0b3303bbf419a6c9f71e9a4a156583bf51d | [
"MIT"
] | null | null | null | tests/test_pack_data.py | derekmerck/endpoint | 5b74f0b3303bbf419a6c9f71e9a4a156583bf51d | [
"MIT"
] | null | null | null | from datetime import datetime
from pprint import pprint
from cryptography.fernet import Fernet
from libsvc.utils import pack_data, unpack_data
def pack_data_test():
fkey = Fernet.generate_key()
data = {"today": datetime.today(),
"dog": "cat",
"red": "blue"}
p = pack_data(data, fkey, fields=["today", "dog"])
print(p.decode("utf8"))
u = unpack_data(p, fkey)
pprint(u)
assert u["dog"] == "cat"
today = datetime.fromisoformat(u["today"]).date()
assert today == datetime.today().date()
assert "red" not in u
if __name__ == "__main__":
pack_data_test()
| 21.586207 | 54 | 0.627796 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 78 | 0.124601 |
38d4b485fb5c15efab7f549f01ad1328142294e2 | 361 | py | Python | threading/part6.py | kryvokhyzha/examples-and-courses | 477e82ee24e6abba8a6b6d92555f2ed549ca682c | [
"MIT"
] | 1 | 2021-12-13T15:41:48.000Z | 2021-12-13T15:41:48.000Z | threading/part6.py | kryvokhyzha/examples-and-courses | 477e82ee24e6abba8a6b6d92555f2ed549ca682c | [
"MIT"
] | 15 | 2021-09-12T15:06:13.000Z | 2022-03-31T19:02:08.000Z | threading/part6.py | kryvokhyzha/examples-and-courses | 477e82ee24e6abba8a6b6d92555f2ed549ca682c | [
"MIT"
] | 1 | 2022-01-29T00:37:52.000Z | 2022-01-29T00:37:52.000Z | import threading
import queue
import time
def putting_thread(q):
while True:
print('start thread')
time.sleep(10)
q.put(5)
print('sup something')
q = queue.Queue()
t = threading.Thread(target=putting_thread, args=(q,), daemon=True)
t.start()
q.put(0)
print(q.get(), 'first item')
print('----')
print(q.get(), 'finish')
| 15.695652 | 67 | 0.617729 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 55 | 0.152355 |
38d56c4e68352399b46ecce8e483eb237d03b4c0 | 1,604 | py | Python | iris_sdk/models/portin.py | NumberAI/python-bandwidth-iris | 0e05f79d68b244812afb97e00fd65b3f46d00aa3 | [
"MIT"
] | 2 | 2020-04-13T13:47:59.000Z | 2022-02-23T20:32:41.000Z | iris_sdk/models/portin.py | bandwidthcom/python-bandwidth-iris | dbcb30569631395041b92917252d913166f7d3c9 | [
"MIT"
] | 5 | 2020-09-18T20:59:24.000Z | 2021-08-25T16:51:42.000Z | iris_sdk/models/portin.py | bandwidthcom/python-bandwidth-iris | dbcb30569631395041b92917252d913166f7d3c9 | [
"MIT"
] | 5 | 2018-12-12T14:39:50.000Z | 2020-11-17T21:42:29.000Z | #!/usr/bin/env python
from __future__ import division, absolute_import, print_function
from future.builtins import super
from iris_sdk.models.activation_status import ActivationStatus
from iris_sdk.models.base_resource import BaseResource
from iris_sdk.models.data.portin import PortInData
from iris_sdk.models.history import History
from iris_sdk.models.loas import Loas
from iris_sdk.models.notes import Notes
from iris_sdk.models.totals import Totals
XML_NAME_PORTIN = "LnpOrderResponse"
XML_NAME_PORTIN_SAVE = "LnpOrder"
XPATH_PORTIN = "/{}"
class PortIn(BaseResource, PortInData):
"""Local number portability order"""
_node_name = XML_NAME_PORTIN
_node_name_save = XML_NAME_PORTIN_SAVE
_xpath = XPATH_PORTIN
@property
def activation_status(self):
return self._activation_status
@property
def history(self):
return self._history
@property
def id(self):
return self.order_id
@id.setter
def id(self, id):
self.order_id = id
@property
def loas(self):
return self._loas
@property
def notes(self):
return self._notes
@property
def totals(self):
return self._totals
def __init__(self, parent=None, client=None):
super().__init__(parent, client)
PortInData.__init__(self)
self._activation_status = ActivationStatus(self)
self._history = History(self)
self._loas = Loas(self, client)
self._notes = Notes(self, client)
self._totals = Totals(self, client)
def save(self):
return self._post_data() | 25.460317 | 64 | 0.703865 | 1,054 | 0.657107 | 0 | 0 | 428 | 0.266833 | 0 | 0 | 90 | 0.05611 |
38d6262e5df4e8207a9017b092a7fbda44ff1889 | 9,109 | py | Python | lib/common_lib.py | JakubWS/imap-mailbox-backup-tool | 8576b9aa2a9f3392a6c657cd40247cd71a83af49 | [
"MIT"
] | null | null | null | lib/common_lib.py | JakubWS/imap-mailbox-backup-tool | 8576b9aa2a9f3392a6c657cd40247cd71a83af49 | [
"MIT"
] | null | null | null | lib/common_lib.py | JakubWS/imap-mailbox-backup-tool | 8576b9aa2a9f3392a6c657cd40247cd71a83af49 | [
"MIT"
] | null | null | null |
from __future__ import unicode_literals
from encodings import utf_8
import imaplib, datetime, time, re, requests, yaml, os, email, glob, shutil, mailbox, smtplib, ssl, hashlib
from zipfile import ZipFile
from email.header import decode_header
from os.path import exists as file_exists
from os.path import basename
from email.mime.multipart import MIMEMultipart
from email.mime.base import MIMEBase
from email.mime.text import MIMEText
from email.utils import COMMASPACE, formatdate
from email import encoders
from pathlib import Path
def log(message):
timestamp = datetime.datetime.now().strftime("%Y-%m-%d | %H:%M:%S:%f")
print(timestamp+" :: [info] "+message)
def log_error(message):
timestamp = datetime.datetime.now().strftime("%Y-%m-%d | %H:%M:%S:%f")
print(timestamp+" :: [error]"+message)
def log_fatal(message):
timestamp = datetime.datetime.now().strftime("%Y-%m-%d | %H:%M:%S:%f")
print(timestamp+" :: [fatal]"+message)
quit()
def connection_test(url, timeout):
try:
request = requests.get(url, timeout=timeout, verify=False)
return True
except (requests.ConnectionError, requests.Timeout) as exception:
return False
def test_path(path):
if file_exists(path) == True:
log("path " + path + " exist")
return True
else:
log("path " + path + " not found")
return False
def create_folder(path):
if test_path(path) == False:
log("creating directory "+path)
os.mkdir(path)
def load_configuration(path):
test_path(path)
with open(path, "r") as config_file:
config = yaml.safe_load(config_file)
return config
def open_mailbox_list(path):
log("opening mailbox list from " + path)
test_path(path)
with open(path, 'r') as mailbox_list:
mailboxes = yaml.safe_load(mailbox_list)
return mailboxes
def save_new_emails_to_eml(host, port, username, password, imap_folder, local_folder):
log("opening connection to the imap server "+host+" on port "+str(port))
mailbox = imaplib.IMAP4_SSL(host, port)
mailbox.login(username, password)
mailbox.select(imap_folder, readonly=True)
rv, data = mailbox.search(None, "(ALL)")
how_many = len(data[0].split())
log("-- Processing mailbox: " + imap_folder +", found "+str(how_many)+" messages")
new_message_counter = 0
if rv == 'OK':
message_counter = 0
for item in data[0].split():
message_counter = message_counter + 1
empty = False
saved_counter = 0
rv, data = mailbox.fetch(item,'(BODY[HEADER.FIELDS (MESSAGE-ID DATE)])')
for response_part in data:
if isinstance(response_part, tuple):
msg = email.message_from_bytes(response_part[1])
if (msg['message-id']) == None:
message_id = msg['DATE']
message_id = hashlib.md5(str(message_id).encode())
file_id = str(message_id.hexdigest())
empty = False
else:
message_id = (((str(msg['message-id'])).split("@")[0])[1:])
message_id = hashlib.md5(str(message_id).encode())
file_id = str(message_id.hexdigest())
empty = False
if empty == False:
pattern = str(os.path.join(local_folder,file_id)) + "*.eml"
if glob.glob(pattern):
for file in glob.glob(pattern):
existing_file_path = os.path.basename(file)
log("----["+str(message_counter)+"/"+str(how_many)+"] found existing message "+ existing_file_path + ". Skipping...")
else:
rv, data = mailbox.fetch(item, '(RFC822)')
for response_part in data:
saved_counter = saved_counter +1
if isinstance(response_part, tuple):
msg = email.message_from_bytes(response_part[1])
if not msg['subject']:
msg['subject'] = '[No Subject]'
subject, encoding = (decode_header(msg['subject'])[0])
if encoding == None or encoding == 'unknown-8bit':
subject = str(subject[:20]).replace('\\','')
else:
subject = str(subject).encode(encoding)
subject = (str(subject)[:20]).replace('\\','')
if not msg['date']:
msg['date'] = '[no date]'
time_of_email = (msg['date'][5:-6]).replace(" ","-")
file_id = str(message_id.hexdigest())
filename = (file_id+"__" + str(subject) + "__" + time_of_email + ".eml")
filename = re.sub(r"[\"/;:<>{}`+,=~?*|]", "", filename)
filename = re.sub(r"\r\n","__", filename)
filename = re.sub(r"\n","__", filename)
if rv != 'OK':
log_error("--- ERROR getting message: "+ str(item))
return
new_message_counter = new_message_counter + 1
log("----["+str(message_counter)+"/"+str(how_many)+"] --- saving message in file: " + filename)
file_full_path = os.path.join(local_folder,filename)
file = open(file_full_path, 'wb')
file.write(data[0][1])
file.close()
else:
log("found broken message -- skipping")
else:
log("ERROR: Unable to open mailbox "+ str(rv))
log("closing "+ username +" mailbox.")
log("saved "+str(new_message_counter)+" new messages")
def archive_backup(source_dir, output_filename):
relroot = os.path.abspath(os.path.join(source_dir, os.pardir))
with ZipFile(output_filename, "w") as zip:
for root, dirs, files in os.walk(source_dir):
# add directory (needed for empty dirs)
zip.write(root, os.path.relpath(root, relroot))
for file in files:
filename = os.path.join(root, file)
if os.path.isfile(filename): # regular files only
arcname = os.path.join(os.path.relpath(root, relroot), file)
zip.write(filename, arcname,compresslevel=9)
def clean_dir(path):
for filename in os.listdir(path):
file_path = os.path.join(path, filename)
try:
if os.path.isfile(file_path) or os.path.islink(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception as e:
print('Failed to delete %s. Reason: %s' % (file_path, e))
def roll_backups_days(path, days):
now = time.time()
log("Cleaning directory "+path+" from backups older than "+ str(days)+" days.")
for filename in os.listdir(path):
if os.path.getmtime(os.path.join(path, filename)) < now - days * 86400:
if os.path.isfile(os.path.join(path, filename)):
log("removing old backup: " + filename)
os.remove(os.path.join(path, filename))
def roll_backups_items(path, items_to_keep):
days = items_to_keep
log("Cleaning directory "+path+" from backups. Last "+str(items_to_keep)+" backup will be kept")
list_of_files = sorted(os.listdir(path)[:-days])
for filename in list_of_files:
filename_relPath = os.path.join(path,filename)
log("removing old backup: " + filename_relPath)
os.remove(filename_relPath)
def send_mail_notification(send_from, send_to, subject, text, files=[],
server="localhost", port=587, username='', password='',
use_tls=True):
msg = MIMEMultipart()
msg['From'] = send_from
msg['To'] = send_to
msg['Date'] = formatdate(localtime=True)
msg['Subject'] = subject
msg.attach(MIMEText(text))
for path in files:
part = MIMEBase('application', "octet-stream")
with open(path, 'rb') as file:
part.set_payload(file.read())
encoders.encode_base64(part)
part.add_header('Content-Disposition',
'attachment; filename={}'.format(Path(path).name))
msg.attach(part)
smtp = smtplib.SMTP(server, port)
if use_tls == True:
context = ssl._create_unverified_context()
smtp.starttls(context=context)
smtp.login(username, password)
smtp.sendmail(send_from, send_to, msg.as_string())
smtp.quit() | 42.966981 | 142 | 0.545834 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,180 | 0.129542 |