hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
421097c0f352c62da6301188c7377f912e0f1d54 | 1,465 | py | Python | modules/worker.py | strangest-quark/iConsent | 096a471a8f5c61dcb9cff5fb380ddb55848bf055 | [
"MIT"
] | 10 | 2020-08-08T13:59:10.000Z | 2020-11-13T23:13:57.000Z | modules/worker.py | strangest-quark/iConsent | 096a471a8f5c61dcb9cff5fb380ddb55848bf055 | [
"MIT"
] | 1 | 2021-09-08T02:26:48.000Z | 2021-09-08T02:26:48.000Z | modules/worker.py | strangest-quark/iConsent | 096a471a8f5c61dcb9cff5fb380ddb55848bf055 | [
"MIT"
] | 2 | 2021-07-29T07:40:59.000Z | 2022-01-28T03:20:22.000Z | import logging
from queue import Queue
from threading import Thread
from time import time
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
if __name__ == '__main__':
main()
| 28.173077 | 102 | 0.619795 |
4212519f45b1cf9dfda4da64b4b3fae6c56b03b5 | 2,420 | py | Python | src/saml2/extension/pefim.py | cnelson/pysaml2 | a30e51c271e27e4411a0243b65adbf5d7a3abb07 | [
"Apache-2.0"
] | 5,079 | 2015-01-01T03:39:46.000Z | 2022-03-31T07:38:22.000Z | desktop/core/ext-py/pysaml2-4.4.0/src/saml2/extension/pefim.py | zks888/hue | 93a8c370713e70b216c428caa2f75185ef809deb | [
"Apache-2.0"
] | 1,623 | 2015-01-01T08:06:24.000Z | 2022-03-30T19:48:52.000Z | desktop/core/ext-py/pysaml2-4.4.0/src/saml2/extension/pefim.py | zks888/hue | 93a8c370713e70b216c428caa2f75185ef809deb | [
"Apache-2.0"
] | 2,033 | 2015-01-04T07:18:02.000Z | 2022-03-28T19:55:47.000Z | #!/usr/bin/env python
import saml2
from saml2 import SamlBase
from saml2.xmldsig import KeyInfo
NAMESPACE = 'urn:net:eustix:names:tc:PEFIM:0.0:assertion'
ELEMENT_FROM_STRING = {
SPCertEnc.c_tag: spcertenc_from_string,
SPCertEncType_.c_tag: spcertenc_type__from_string,
}
ELEMENT_BY_TAG = {
'SPCertEnc': SPCertEnc,
'SPCertEncType': SPCertEncType_,
}
| 31.428571 | 80 | 0.654959 |
42144545d417abe762a3d9307033d86aace5b332 | 805 | py | Python | ontask/migrations/0004_remove_old_migration_refs.py | pinheiroo27/ontask_b | 23fee8caf4e1c5694a710a77f3004ca5d9effeac | [
"MIT"
] | 33 | 2017-12-02T04:09:24.000Z | 2021-11-07T08:41:57.000Z | ontask/migrations/0004_remove_old_migration_refs.py | pinheiroo27/ontask_b | 23fee8caf4e1c5694a710a77f3004ca5d9effeac | [
"MIT"
] | 189 | 2017-11-16T04:06:29.000Z | 2022-03-11T23:35:59.000Z | ontask/migrations/0004_remove_old_migration_refs.py | pinheiroo27/ontask_b | 23fee8caf4e1c5694a710a77f3004ca5d9effeac | [
"MIT"
] | 30 | 2017-11-30T03:35:44.000Z | 2022-01-31T03:08:08.000Z | # Generated by Django 2.2.4 on 2019-08-24 06:02
from django.db import connection as con, migrations
from psycopg2 import sql
| 26.833333 | 78 | 0.650932 |
42149897d0b37e2db558007492da879e2a80968d | 639 | py | Python | scripts/tfloc_summary.py | lldelisle/bx-python | 19ab41e0905221e3fcaaed4b74faf2d7cda0d15a | [
"MIT"
] | 122 | 2015-07-01T12:00:22.000Z | 2022-03-02T09:27:35.000Z | scripts/tfloc_summary.py | lldelisle/bx-python | 19ab41e0905221e3fcaaed4b74faf2d7cda0d15a | [
"MIT"
] | 64 | 2015-11-06T21:03:18.000Z | 2022-03-24T00:55:27.000Z | scripts/tfloc_summary.py | lldelisle/bx-python | 19ab41e0905221e3fcaaed4b74faf2d7cda0d15a | [
"MIT"
] | 60 | 2015-10-05T19:19:36.000Z | 2021-11-19T20:53:54.000Z | #!/usr/bin/env python
"""
Read TFLOC output from stdin and write out a summary in which the nth line
contains the number of sites found in the nth alignment of the input.
TODO: This is very special case, should it be here?
"""
import sys
from collections import defaultdict
counts = defaultdict(int)
max_index = -1
for line in sys.stdin:
if line[0].isdigit():
current_index = int(line)
max_index = max(current_index, max_index)
elif line[0] == "'":
counts[current_index] += 1
else:
raise ValueError("Invalid input line " + line)
for i in range(max_index + 1):
print(counts.get(i, 0))
| 22.821429 | 74 | 0.674491 |
4214b1ee9bcb816a48babcc6e1d8cfe461c7c2c0 | 3,649 | py | Python | plugins/data/bAbI/digitsDataPluginBAbI/data.py | Linda-liugongzi/DIGITS-digits-py3 | 6df5eb6972574a628b9544934518ec8dfa9c7439 | [
"BSD-3-Clause"
] | null | null | null | plugins/data/bAbI/digitsDataPluginBAbI/data.py | Linda-liugongzi/DIGITS-digits-py3 | 6df5eb6972574a628b9544934518ec8dfa9c7439 | [
"BSD-3-Clause"
] | null | null | null | plugins/data/bAbI/digitsDataPluginBAbI/data.py | Linda-liugongzi/DIGITS-digits-py3 | 6df5eb6972574a628b9544934518ec8dfa9c7439 | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
import os
from digits.utils import subclass, override, constants
from digits.extensions.data.interface import DataIngestionInterface
from .forms import DatasetForm, InferenceForm
from . import utils
from flask_babel import lazy_gettext as _
DATASET_TEMPLATE = "templates/dataset_template.html"
INFERENCE_TEMPLATE = "templates/inference_template.html"
| 31.730435 | 105 | 0.636339 |
4216517a1b9daa01aa443bee25e4880a6b96ed43 | 3,767 | py | Python | 13_TransparentOrigami/fold2.py | dandrianneDEL/PyAdventOfCode2021 | ea91186383c0855c81c7243d527de0c4dd4c0afb | [
"MIT"
] | null | null | null | 13_TransparentOrigami/fold2.py | dandrianneDEL/PyAdventOfCode2021 | ea91186383c0855c81c7243d527de0c4dd4c0afb | [
"MIT"
] | null | null | null | 13_TransparentOrigami/fold2.py | dandrianneDEL/PyAdventOfCode2021 | ea91186383c0855c81c7243d527de0c4dd4c0afb | [
"MIT"
] | null | null | null | import filehelper
fileResult = filehelper.readfile()
# ******************************************
# PART 2 - Fold plastic transparent sheet
# Finish folding the transparent paper according to the instructions. The manual says the code is always eight capital letters.
# What code do you use to activate the infrared thermal imaging camera system?
# ******************************************
matrix = Matrix(fileResult.maxX, fileResult.maxY)
matrix.fill_coords(fileResult.coords)
# Perform folds
for fold in fileResult.folds:
print(f"performing fold {fold}")
matrix = matrix.fold(fold) | 34.87963 | 127 | 0.535439 |
421750365075d0ccd2892de6546549e569376c1b | 208 | py | Python | complete/01 - 10/Problem1/main.py | this-jacob/project-euler | 8f9e700e2875e84d081eade44fd2107db0a0ae12 | [
"MIT"
] | null | null | null | complete/01 - 10/Problem1/main.py | this-jacob/project-euler | 8f9e700e2875e84d081eade44fd2107db0a0ae12 | [
"MIT"
] | null | null | null | complete/01 - 10/Problem1/main.py | this-jacob/project-euler | 8f9e700e2875e84d081eade44fd2107db0a0ae12 | [
"MIT"
] | null | null | null |
if __name__ == '__main__':
main()
| 13.866667 | 28 | 0.408654 |
4219aa019cf5a624b152bb0ddf85c0a457ed2c73 | 2,416 | py | Python | webapp/scan_comments.py | ctrl-meta-f/ngk | 6d9122ee84cc7420f9b135556c7b03e9b20428e4 | [
"BSD-2-Clause"
] | null | null | null | webapp/scan_comments.py | ctrl-meta-f/ngk | 6d9122ee84cc7420f9b135556c7b03e9b20428e4 | [
"BSD-2-Clause"
] | null | null | null | webapp/scan_comments.py | ctrl-meta-f/ngk | 6d9122ee84cc7420f9b135556c7b03e9b20428e4 | [
"BSD-2-Clause"
] | null | null | null | import logging
import time
import requests
import lxml.etree
import re
import os
from schema import ScopedSession, SyncState
logging.basicConfig(
filename=os.getenv("LOG_FILE", "../logs/scan_comments.log"),
format="%(asctime)s %(levelname)s %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=logging.DEBUG)
COMMENTS_URL = "http://govnokod.ru/comments"
FAST_DELAY = 15
SLOW_DELAY = 60
FAST_TO_SLOW_STEPS = 20
logging.info("=== started ===")
fast_requests = 0
while True:
try:
comments = fetch_latest_comments()
has_updates = update_sync_states(comments)
if has_updates:
fast_requests = FAST_TO_SLOW_STEPS
except Exception as e:
logging.exception(e)
fast_requests = 0
if fast_requests > 0:
delay = FAST_DELAY
fast_requests -= 1
else:
delay = SLOW_DELAY
logging.debug("Sleeping for %d seconds (%d fast requests left)...", delay, fast_requests)
time.sleep(delay)
| 29.463415 | 134 | 0.631623 |
421a32da4769d80ffba1268d31b7a676642e60fc | 1,009 | py | Python | s3prl/upstream/example/hubconf.py | hhhaaahhhaa/s3prl | a469787f05c42196c4d989555082f5fd9dcbe8a6 | [
"Apache-2.0"
] | 856 | 2021-01-15T15:40:32.000Z | 2022-03-31T07:08:17.000Z | s3prl/upstream/example/hubconf.py | hhhaaahhhaa/s3prl | a469787f05c42196c4d989555082f5fd9dcbe8a6 | [
"Apache-2.0"
] | 210 | 2021-01-15T13:28:50.000Z | 2022-03-30T06:13:51.000Z | s3prl/upstream/example/hubconf.py | hhhaaahhhaa/s3prl | a469787f05c42196c4d989555082f5fd9dcbe8a6 | [
"Apache-2.0"
] | 208 | 2021-01-15T03:03:12.000Z | 2022-03-31T08:33:27.000Z | from .expert import UpstreamExpert as _UpstreamExpert
def customized_upstream(*args, **kwargs):
"""
To enable your customized pretrained model, you only need to implement
upstream/example/expert.py and leave this file as is. This file is
used to register the UpstreamExpert in upstream/example/expert.py
The following is a brief introduction of the registration mechanism.
The s3prl/hub.py will collect all the entries registered in this file
(callable variables without the underscore prefix) as a centralized
upstream factory. One can pick up this upstream from the factory via
1.
from s3prl.hub import customized_upstream
model = customized_upstream(ckpt, model_config)
2.
model = torch.hub.load(
'your_s3prl_path',
'customized_upstream',
ckpt,
model_config,
source='local',
)
Our run_downstream.py and downstream/runner.py follows the first usage
"""
return _UpstreamExpert(*args, **kwargs)
| 32.548387 | 74 | 0.716551 |
421a86ab2fcc5ca9b6f576b1a9c163c17517de0f | 463 | py | Python | g-code-testing/g_code_parsing/g_code_functionality_defs/thermocycler/set_ramp_rate_g_code_functionality_def.py | Opentrons/protocol_framework | ebbd6b2fe984edd6ecfcbf1dbe040db7f7356b9f | [
"Apache-2.0"
] | null | null | null | g-code-testing/g_code_parsing/g_code_functionality_defs/thermocycler/set_ramp_rate_g_code_functionality_def.py | Opentrons/protocol_framework | ebbd6b2fe984edd6ecfcbf1dbe040db7f7356b9f | [
"Apache-2.0"
] | null | null | null | g-code-testing/g_code_parsing/g_code_functionality_defs/thermocycler/set_ramp_rate_g_code_functionality_def.py | Opentrons/protocol_framework | ebbd6b2fe984edd6ecfcbf1dbe040db7f7356b9f | [
"Apache-2.0"
] | null | null | null | from typing import Dict
from g_code_parsing.g_code_functionality_defs.g_code_functionality_def_base import (
GCodeFunctionalityDefBase,
)
| 33.071429 | 84 | 0.740821 |
421c7e1609af23f9ed8e7709fd3cc2ca7ae61d73 | 19,452 | py | Python | src/mrio.py | ElcoK/MRIA_Argentina | 45194eb738c725276c3667078ac8d229554b550e | [
"MIT"
] | null | null | null | src/mrio.py | ElcoK/MRIA_Argentina | 45194eb738c725276c3667078ac8d229554b550e | [
"MIT"
] | null | null | null | src/mrio.py | ElcoK/MRIA_Argentina | 45194eb738c725276c3667078ac8d229554b550e | [
"MIT"
] | 2 | 2021-06-28T11:51:17.000Z | 2022-01-10T06:49:01.000Z | import os,sys
import pandas as pd
import numpy as np
import subprocess
from tqdm import tqdm
from ras_method import ras_method
import warnings
warnings.filterwarnings('ignore')
def est_trade_value(x,output_new,sector):
"""
Function to estimate the trade value between two sectors
"""
if (sector is not 'other1') & (sector is not 'other2'):
sec_output = output_new.sum(axis=1).loc[output_new.sum(axis=1).index.get_level_values(1) == sector].reset_index()
else:
sec_output = output_new.sum(axis=1).loc[output_new.sum(axis=1).index.get_level_values(1) == 'IMP'].reset_index()
x['gdp'] = x.gdp*min(sec_output.loc[sec_output.region==x.reg1].values[0][2],sec_output.loc[sec_output.region==x.reg2].values[0][2])
return x
def estimate(table='INDEC',year=2015,print_output=False,print_progress=True):
"""
Function to create a province-level MRIO table, based on a national IO table. The default is the INDEC table.
"""
data_path = os.path.join('..','data')
# load sector data
sectors = list(pd.read_excel(os.path.join(data_path,'other_sources',
'industry_high_level_classification.xlsx'))['SEC_CODE'].values)
# load provincial mappers
reg_mapper = pd.read_excel(os.path.join(data_path,'INDEC','sh_cou_06_16.xls'),sheet_name='reg_mapper',header=None).iloc[:,:2]
reg_mapper = dict(zip(reg_mapper[0],reg_mapper[1]))
# load provincial data
prov_data = pd.read_excel(os.path.join(data_path,'INDEC','PIB_provincial_06_17.xls'),sheet_name='VBP',
skiprows=3,index_col=[0],header=[0],nrows=71)
prov_data = prov_data.loc[[x.isupper() for x in prov_data.index],:]
prov_data.columns = [x.replace(' ','_') for x in ['Ciudad de Buenos Aires', 'Buenos Aires', 'Catamarca', 'Cordoba',
'Corrientes', 'Chaco', 'Chubut', 'Entre Rios', 'Formosa', 'Jujuy',
'La Pampa', 'La Rioja', 'Mendoza', 'Misiones', 'Neuquen', 'Rio Negro',
'Salta', 'San Juan', 'San Luis', 'Santa Cruz', 'Santa Fe',
'Santiago del Estero', 'Tucuman', 'Tierra del Fuego',
'No distribuido', 'Total']]
region_names = list(prov_data.columns)[:-2]
prov_data.index = sectors+['TOTAL']
prov_data = prov_data.replace(0, 1)
### Create proxy data for first iteration
sectors+['other1','other2']
# proxy level 2
proxy_reg_arg = pd.DataFrame(prov_data.iloc[-1,:24]/prov_data.iloc[-1,:24].sum()).reset_index()
proxy_reg_arg['year'] = 2016
proxy_reg_arg = proxy_reg_arg[['year','index','TOTAL']]
proxy_reg_arg.columns = ['year','id','gdp']
proxy_reg_arg.to_csv(os.path.join('..','mrio_downscaling','proxy_reg_arg.csv'),index=False)
# proxy level 4
for iter_,sector in enumerate(sectors+['other1','other2']):
if (sector is not 'other1') & (sector is not 'other2'):
proxy_sector = pd.DataFrame(prov_data.iloc[iter_,:24]/prov_data.iloc[iter_,:24].sum()).reset_index()
proxy_sector['year'] = 2016
proxy_sector['sector'] = 'sec{}'.format(sector)
proxy_sector = proxy_sector[['year','sector','index',sector]]
proxy_sector.columns = ['year','sector','region','gdp']
proxy_sector.to_csv(os.path.join('..','mrio_downscaling','proxy_sec{}.csv'.format(sector)),index=False)
else:
proxy_sector = pd.DataFrame(prov_data.iloc[-1,:24]/prov_data.iloc[-1,:24].sum()).reset_index()
proxy_sector['year'] = 2016
proxy_sector['sector'] = sector+'1'
proxy_sector = proxy_sector[['year','sector','index','TOTAL']]
proxy_sector.columns = ['year','sector','region','gdp']
proxy_sector.to_csv(os.path.join('..','mrio_downscaling','proxy_{}.csv'.format(sector)),index=False)
# proxy level 18
mi_index = pd.MultiIndex.from_product([sectors+['other1','other2'], region_names, sectors+['other1','other2'], region_names],
names=['sec1', 'reg1','sec2','reg2'])
for iter_,sector in enumerate(sectors+['other1','other2']):
if (sector is not 'other1') & (sector is not 'other2'):
proxy_trade = pd.DataFrame(columns=['year','gdp'],index= mi_index).reset_index()
proxy_trade['year'] = 2016
proxy_trade['gdp'] = 0
proxy_trade = proxy_trade.query("reg1 != reg2")
proxy_trade = proxy_trade.loc[proxy_trade.sec1 == sector]
proxy_trade['sec1'] = proxy_trade.sec1.apply(change_name)
proxy_trade['sec2'] = proxy_trade.sec2.apply(change_name)
proxy_trade = proxy_trade[['year','sec1','reg1','sec2','reg2','gdp']]
proxy_trade.columns = ['year','sector','region','sector','region','gdp']
proxy_trade.to_csv(os.path.join('..','mrio_downscaling','proxy_trade_sec{}.csv'.format(sector)),index=False)
else:
proxy_trade = pd.DataFrame(columns=['year','gdp'],index= mi_index).reset_index()
proxy_trade['year'] = 2016
proxy_trade['gdp'] = 0
proxy_trade = proxy_trade.query("reg1 != reg2")
proxy_trade = proxy_trade.loc[proxy_trade.sec1 == sector]
proxy_trade['sec1'] = proxy_trade.sec1.apply(change_name)
proxy_trade['sec2'] = proxy_trade.sec2.apply(change_name)
proxy_trade = proxy_trade[['year','sec1','reg1','sec2','reg2','gdp']]
proxy_trade.columns = ['year','sector','region','sector','region','gdp']
proxy_trade.to_csv(os.path.join('..','mrio_downscaling','proxy_trade_{}.csv'.format(sector)),index=False)
"""
Create first version of MRIO for Argentina, without trade
"""
### save basetable for disaggregation usin the specific source:
basetable = pd.read_csv(os.path.join(data_path,'national_tables','{}_{}.csv'.format(year,table)),index_col=[0])
basetable.to_csv(os.path.join('..','mrio_downscaling','basetable.csv'),header=False,index=False)
### run libmrio
p = subprocess.Popen([r'..\mrio_downscaling\mrio_disaggregate', 'settings_notrade.yml'],
cwd=os.path.join('..','mrio_downscaling'))
p.wait()
### load data and reorder
region_names_list = [item for sublist in [[x]*(len(sectors)+2) for x in region_names]
for item in sublist]
rows = ([x for x in sectors+['VA','IMP']])*len(region_names)
cols = ([x for x in sectors+['FD','EXP']])*len(region_names)
index_mi = pd.MultiIndex.from_arrays([region_names_list, rows], names=('region', 'row'))
column_mi = pd.MultiIndex.from_arrays([region_names_list, cols], names=('region', 'col'))
MRIO = pd.read_csv(os.path.join('..','mrio_downscaling','output1.csv'),header=None,index_col=None)
MRIO.index = index_mi
MRIO.columns = column_mi
# create predefined index and col, which is easier to read
sector_only = [x for x in sectors]*len(region_names)
col_only = ['FD']*len(region_names)
region_col = [item for sublist in [[x]*len(sectors) for x in region_names] for item in sublist] + \
[item for sublist in [[x]*1 for x in region_names] for item in sublist]
column_mi_reorder = pd.MultiIndex.from_arrays(
[region_col, sector_only+col_only], names=('region', 'col'))
# sum va and imports
valueA = MRIO.xs('VA', level=1, axis=0).sum(axis=0)
valueA.drop('FD', level=1,axis=0,inplace=True)
valueA.drop('EXP', level=1,axis=0,inplace=True)
imports = MRIO.xs('IMP', level=1, axis=0).sum(axis=0)
imports.drop('FD', level=1,axis=0,inplace=True)
imports.drop('EXP', level=1,axis=0,inplace=True)
FinalD = MRIO.xs('FD', level=1, axis=1).sum(axis=1)
FinalD.drop('VA', level=1,axis=0,inplace=True)
FinalD.drop('IMP', level=1,axis=0,inplace=True)
Export = MRIO.xs('EXP', level=1, axis=1).sum(axis=1)
Export.drop('VA', level=1,axis=0,inplace=True)
Export.drop('IMP', level=1,axis=0,inplace=True)
output_new = MRIO.copy()
"""
Balance first MRIO version
"""
# convert to numpy matrix
X0 = MRIO.as_matrix()
# get sum of rows and columns
u = X0.sum(axis=1)
v = X0.sum(axis=0)
# and only keep T
v[:(len(u)-2)] = u[:-2]
# apply RAS method to rebalance the table
X1 = ras_method(X0, u, v, eps=1e-5,print_out=print_output)
#translate to pandas dataframe
output_new = pd.DataFrame(X1)
output_new.index = index_mi
output_new.columns = column_mi
if print_progress:
print('NOTE : Balanced MRIO table without trade finished using {} data'.format(table))
"""
Create second version of MRIO for Argentina, with trade
"""
### Load OD matrix
od_matrix_total = pd.DataFrame(pd.read_excel(os.path.join(data_path,'OD_data','province_ods.xlsx'),
sheet_name='total',index_col=[0,1],usecols =[0,1,2,3,4,5,6,7])).unstack(1).fillna(0)
od_matrix_total.columns.set_levels(['A','G','C','D','B','I'],level=0,inplace=True)
od_matrix_total.index = od_matrix_total.index.map(reg_mapper)
od_matrix_total = od_matrix_total.stack(0)
od_matrix_total.columns = od_matrix_total.columns.map(reg_mapper)
od_matrix_total = od_matrix_total.swaplevel(i=-2, j=-1, axis=0)
od_matrix_total = od_matrix_total.loc[:, od_matrix_total.columns.notnull()]
### Create proxy data
# proxy level 14
mi_index = pd.MultiIndex.from_product([sectors+['other1','other2'], region_names, region_names],
names=['sec1', 'reg1','reg2'])
for iter_,sector in enumerate((sectors+['other1','other2'])):
if sector in ['A','G','C','D','B','I']:
proxy_trade = (od_matrix_total.sum(level=1).divide(od_matrix_total.sum(level=1).sum(axis=1),axis='rows')).stack(0).reset_index()
proxy_trade.columns = ['reg1','reg2','gdp']
proxy_trade['year'] = 2016
proxy_trade = proxy_trade.apply(lambda x: est_trade_value(x,output_new,sector),axis=1)
proxy_trade['sec1'] = 'sec{}'.format(sector)
proxy_trade = proxy_trade[['year','sec1','reg1','reg2','gdp']]
proxy_trade.columns = ['year','sector','region','region','gdp']
proxy_trade.to_csv(os.path.join('..','mrio_downscaling','proxy_trade14_sec{}.csv'.format(sector)),index=False)
elif (sector is not 'other1') & (sector is not 'other2') & (sector not in ['A','G','C','D','B','I']): # & (sector not in ['L','M','N','O','P']):
proxy_trade = (od_matrix_total.sum(level=1).divide(od_matrix_total.sum(level=1).sum(axis=1),axis='rows')).stack(0).reset_index()
#proxy_trade[0].loc[(proxy_trade.origin_province == proxy_trade.destination_province)] = 0.9
#proxy_trade[0].loc[~(proxy_trade.origin_province == proxy_trade.destination_province)] = 0.1
proxy_trade.columns = ['reg1','reg2','gdp']
proxy_trade['year'] = 2016
proxy_trade = proxy_trade.apply(lambda x: est_trade_value(x,output_new,sector),axis=1)
proxy_trade['sec1'] = 'sec{}'.format(sector)
proxy_trade = proxy_trade[['year','sec1','reg1','reg2','gdp']]
proxy_trade.columns = ['year','sector','region','region','gdp']
proxy_trade.to_csv(os.path.join('..','mrio_downscaling','proxy_trade14_sec{}.csv'.format(sector)),index=False)
else:
proxy_trade = (od_matrix_total.sum(level=1).divide(od_matrix_total.sum(level=1).sum(axis=1),axis='rows')).stack(0).reset_index()
proxy_trade.columns = ['reg1','reg2','gdp']
proxy_trade['year'] = 2016
proxy_trade = proxy_trade.apply(lambda x: est_trade_value(x,output_new,sector),axis=1)
proxy_trade['sec1'] = sector+'1'
proxy_trade = proxy_trade[['year','sec1','reg1','reg2','gdp']]
proxy_trade.columns = ['year','sector','region','region','gdp']
proxy_trade.to_csv(os.path.join('..','mrio_downscaling','proxy_trade14_{}.csv'.format(sector)),index=False)
# proxy level 18
mi_index = pd.MultiIndex.from_product([sectors+['other1','other2'], region_names, sectors+['other1','other2'], region_names],
names=['sec1', 'reg1','sec2','reg2'])
for iter_,sector in enumerate((sectors+['other1','other2'])):
if (sector is not 'other1') & (sector is not 'other2'):
proxy_trade = pd.DataFrame(columns=['year','gdp'],index= mi_index).reset_index()
proxy_trade['year'] = 2016
proxy_trade['gdp'] = 0
proxy_trade = proxy_trade.query("reg1 != reg2")
proxy_trade = proxy_trade.loc[proxy_trade.sec1 == sector]
proxy_trade = proxy_trade.loc[proxy_trade.sec2.isin(['L','M','N','O','P'])]
proxy_trade['sec1'] = proxy_trade.sec1.apply(change_name)
proxy_trade['sec2'] = proxy_trade.sec2.apply(change_name)
proxy_trade = proxy_trade.query("reg1 == reg2")
proxy_trade = proxy_trade[['year','sec1','reg1','sec2','reg2','gdp']]
proxy_trade.columns = ['year','sector','region','sector','region','gdp']
proxy_trade.to_csv(os.path.join('..','mrio_downscaling','proxy_trade_sec{}.csv'.format(sector)),index=False)
else:
proxy_trade = pd.DataFrame(columns=['year','gdp'],index= mi_index).reset_index()
proxy_trade['year'] = 2016
proxy_trade['gdp'] = 0
proxy_trade = proxy_trade.query("reg1 != reg2")
proxy_trade = proxy_trade.loc[proxy_trade.sec1 == sector]
proxy_trade = proxy_trade.loc[proxy_trade.sec2.isin(['L','M','N','O','P'])]
proxy_trade['sec1'] = proxy_trade.sec1.apply(change_name)
proxy_trade['sec2'] = proxy_trade.sec2.apply(change_name)
proxy_trade = proxy_trade.query("reg1 == reg2")
proxy_trade = proxy_trade[['year','sec1','reg1','sec2','reg2','gdp']]
proxy_trade.columns = ['year','sector','region','sector','region','gdp']
proxy_trade.to_csv(os.path.join('..','mrio_downscaling','proxy_trade_{}.csv'.format(sector)),index=False)
### run libmrio
p = subprocess.Popen([r'..\mrio_downscaling\mrio_disaggregate', 'settings_trade.yml'],
cwd=os.path.join('..','mrio_downscaling'))
p.wait()
# load data and reorder
region_names_list = [item for sublist in [[x]*(len(sectors)+2) for x in region_names]
for item in sublist]
rows = ([x for x in sectors+['VA','IMP']])*len(region_names)
cols = ([x for x in sectors+['FD','EXP']])*len(region_names)
index_mi = pd.MultiIndex.from_arrays([region_names_list, rows], names=('region', 'row'))
column_mi = pd.MultiIndex.from_arrays([region_names_list, cols], names=('region', 'col'))
MRIO = pd.read_csv(os.path.join('..','mrio_downscaling','output2.csv'),header=None,index_col=None)
MRIO.index = index_mi
MRIO.columns = column_mi
# create predefined index and col, which is easier to read
sector_only = [x for x in sectors]*len(region_names)
col_only = ['FD','EXP']*len(region_names)
region_col = [item for sublist in [[x]*len(sectors) for x in region_names] for item in sublist] + \
[item for sublist in [[x]*2 for x in region_names] for item in sublist]
column_mi_reorder = pd.MultiIndex.from_arrays(
[region_col, sector_only+col_only], names=('region', 'col'))
# sum va and imports
valueA = pd.DataFrame(MRIO.loc[MRIO.index.get_level_values(1) == 'VA'].sum(axis='index'))
valueA.columns = pd.MultiIndex.from_product([['Total'],['ValueA']],names=['region','row'])
IMP = pd.DataFrame(MRIO.loc[MRIO.index.get_level_values(1) == 'IMP'].sum(axis='index'))
IMP.columns = pd.MultiIndex.from_product([['Total'],['IMP']],names=['region','row'])
output = pd.concat([MRIO.loc[~MRIO.index.get_level_values(1).isin(['FD','EXP'])]])
output = output.drop(['VA','IMP'], level=1)
output = pd.concat([output,valueA.T,IMP.T])
output = output.reindex(column_mi_reorder, axis='columns')
mrio_arg = ras_method(np.array(output).T,np.array(list(output.sum(axis=1))[:384]+list(output.sum(axis=0)[-48:])),
np.array(list(output.sum(axis=1))[:384]+[output.loc[('Total','ValueA'),:].sum(),output.loc[('Total','IMP'),:].sum()]),
eps=1e-3,print_out=print_output)
mrio_argentina = pd.DataFrame(mrio_arg.T,index=output.index,columns=output.columns)
mrio_argentina.to_csv(os.path.join(data_path,'MRIO','MRIO_Argentina_{}_{}.csv'.format(table,year)))
if print_progress:
print('NOTE : Balanced MRIO table with trade finished using {} data'.format(table))
def prepare_table_mria(table='INDEC',year='2015',print_output=True):
"""
Convert MRIO table to an excel file in which all elements of the table are disaggregated.
"""
data_path = os.path.join('..','data')
# load table
MRIO = pd.read_csv(os.path.join(data_path,'MRIO','MRIO_Argentina_{}_{}.csv'.format(table,year)),index_col=[0,1],header=[0,1])
Xnew = MRIO.copy()
Xnew = Xnew+1e-6
# write to excel
writer = pd.ExcelWriter(os.path.join(data_path,'MRIO', 'mrio_argentina_disaggregated_{}_{}.xlsx'.format(table,year)))
# write T
df_T = Xnew.iloc[:384, :384]
df_T.columns = df_T.columns.droplevel()
df_labels_T = pd.DataFrame(df_T.reset_index()[['region', 'row']])
df_T.reset_index(inplace=True, drop=True)
df_T.to_excel(writer, 'T', index=False, header=False)
df_labels_T.to_excel(writer, 'labels_T', index=False, header=False)
# write FD
df_FD = Xnew.iloc[:384, 384:].iloc[:, Xnew.iloc[:384, 384:].columns.get_level_values(1)=='FD']
df_labels_FD = pd.DataFrame(list(df_FD.columns))
df_FD.columns = df_FD.columns.droplevel()
df_FD.reset_index(inplace=True, drop=True)
df_FD.to_excel(writer, 'FD', index=False, header=False)
df_labels_FD.to_excel(writer, 'labels_FD', index=False, header=False)
# write ExpROW
df_ExpROW = pd.DataFrame(Xnew.iloc[:384, 384:].iloc[:, Xnew.iloc[:384, 384:].columns.get_level_values(1)=='EXP'].sum(axis=1))
df_labels_ExpROW = pd.DataFrame(['Export'])
df_ExpROW.reset_index(inplace=True, drop=True)
df_ExpROW.to_excel(writer, 'ExpROW', index=False, header=False)
df_labels_ExpROW.reset_index(inplace=True, drop=True)
df_labels_ExpROW.columns = ['Export']
df_labels_ExpROW.to_excel(writer, 'labels_ExpROW', index=False, header=False)
# write VA
df_VA = pd.DataFrame(Xnew.iloc[384:, :409].T[('Total', 'ValueA')])
df_VA.columns = ['VA']
df_VA['imports'] = pd.DataFrame(Xnew.iloc[384:, :].T[('Total', 'IMP')])
df_VA.reset_index(inplace=True, drop=True)
df_VA.to_excel(writer, 'VA', index=False, header=False)
df_labels_VA = pd.DataFrame(['Import', 'VA']).T
df_labels_VA.to_excel(writer, 'labels_VA', index=False, header=False)
# save excel
writer.save()
if print_output:
print('NOTE : MRIO table ready to use for MRIA model using {} data'.format(table))
if __name__ == "__main__":
estimate(table='GTAP',year='2014',print_output=True)
prepare_table_mria(table='GTAP',year='2014',print_output=True) | 49.24557 | 154 | 0.635359 |
421c88021499b88620b09442779453fef21cf565 | 1,212 | py | Python | task_manager/users/forms.py | Ritesh-Aggarwal/Task-Manager-Django | b8f8df10b0b0a9cc9cd27346a0b5d4d5892d2f24 | [
"MIT"
] | null | null | null | task_manager/users/forms.py | Ritesh-Aggarwal/Task-Manager-Django | b8f8df10b0b0a9cc9cd27346a0b5d4d5892d2f24 | [
"MIT"
] | null | null | null | task_manager/users/forms.py | Ritesh-Aggarwal/Task-Manager-Django | b8f8df10b0b0a9cc9cd27346a0b5d4d5892d2f24 | [
"MIT"
] | null | null | null | from django import forms
from django.contrib.auth import get_user_model
from django.contrib.auth.forms import (
AuthenticationForm,
UserCreationForm,
UsernameField,
)
User = get_user_model()
| 26.347826 | 77 | 0.605611 |
421cd1f840cd074e3eb92df46eaaf5c4a3768113 | 1,891 | py | Python | boa3/model/builtin/interop/oracle/oracletype.py | hal0x2328/neo3-boa | 6825a3533384cb01660773050719402a9703065b | [
"Apache-2.0"
] | 25 | 2020-07-22T19:37:43.000Z | 2022-03-08T03:23:55.000Z | boa3/model/builtin/interop/oracle/oracletype.py | hal0x2328/neo3-boa | 6825a3533384cb01660773050719402a9703065b | [
"Apache-2.0"
] | 419 | 2020-04-23T17:48:14.000Z | 2022-03-31T13:17:45.000Z | boa3/model/builtin/interop/oracle/oracletype.py | hal0x2328/neo3-boa | 6825a3533384cb01660773050719402a9703065b | [
"Apache-2.0"
] | 15 | 2020-05-21T21:54:24.000Z | 2021-11-18T06:17:24.000Z | from __future__ import annotations
from typing import Any, Dict, Optional
from boa3.model.method import Method
from boa3.model.property import Property
from boa3.model.type.classes.classarraytype import ClassArrayType
from boa3.model.variable import Variable
_Oracle = OracleType()
| 27.014286 | 95 | 0.657324 |
42228f1e28d8899ed8da922c4eb2bd3b92ca4e69 | 191 | py | Python | photo-hub/api/pagination.py | RodionChachura/photo-hub | 20ec008076a34cb09b289fda0557e2efc7e06232 | [
"MIT"
] | null | null | null | photo-hub/api/pagination.py | RodionChachura/photo-hub | 20ec008076a34cb09b289fda0557e2efc7e06232 | [
"MIT"
] | null | null | null | photo-hub/api/pagination.py | RodionChachura/photo-hub | 20ec008076a34cb09b289fda0557e2efc7e06232 | [
"MIT"
] | null | null | null | from rest_framework.pagination import PageNumberPagination | 31.833333 | 58 | 0.811518 |
4222c98b7de332bf9b4c1cc8bba790b9eea99314 | 1,021 | py | Python | wiiu.py | RN-JK/UBIART-Texture-Decoder | 71e190c12b1b8813dcda1f26cd115d9f89cc7619 | [
"MIT"
] | null | null | null | wiiu.py | RN-JK/UBIART-Texture-Decoder | 71e190c12b1b8813dcda1f26cd115d9f89cc7619 | [
"MIT"
] | null | null | null | wiiu.py | RN-JK/UBIART-Texture-Decoder | 71e190c12b1b8813dcda1f26cd115d9f89cc7619 | [
"MIT"
] | 1 | 2021-11-29T05:57:55.000Z | 2021-11-29T05:57:55.000Z | import os, glob
try:
os.mkdir("output")
except:
pass
wiiudir="input/wiiu"
try:
os.makedirs(wiiudir)
print('The directories have been made.')
input('Insert your textures in input/wiiu and then run the tool again to convert it.')
except:
pass
dir = 'input/temp'
try:
os.makedirs(dir)
except:
pass
try:
for ckdtextures in os.listdir(wiiudir):
with open(wiiudir+'/'+ckdtextures,'rb') as f:
f.read(44)
data = f.read()
dds=open('input/temp/'+ckdtextures.replace('.tga.ckd','.gtx').replace('.png.ckd','.gtx'),'wb')
dds.write(data)
dds.close()
except:
pass
try:
for gtx in os.listdir(dir):
print('making '+gtx.replace(".gtx","")+'...')
os.system("texconv2 -i input/temp/"+gtx+" -o output/"+gtx.replace(".gtx",".dds"))
except:
pass
filelist = glob.glob(os.path.join(dir, "*"))
for f in filelist:
os.remove(f)
os.rmdir(dir) | 18.563636 | 103 | 0.5524 |
4223f6babdeae509fede80d613a39bd2530fc8ee | 470 | py | Python | jp.atcoder/abc046/arc062_a/8984820.py | kagemeka/atcoder-submissions | 91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e | [
"MIT"
] | 1 | 2022-02-09T03:06:25.000Z | 2022-02-09T03:06:25.000Z | jp.atcoder/abc046/arc062_a/8984820.py | kagemeka/atcoder-submissions | 91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e | [
"MIT"
] | 1 | 2022-02-05T22:53:18.000Z | 2022-02-09T01:29:30.000Z | jp.atcoder/abc046/arc062_a/8984820.py | kagemeka/atcoder-submissions | 91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e | [
"MIT"
] | null | null | null | import sys
n = int(sys.stdin.readline().rstrip())
ab = map(int, sys.stdin.read().split())
ab = list(zip(ab, ab))
if __name__ == "__main__":
ans = main()
print(ans)
| 18.076923 | 40 | 0.431915 |
422402f1cd18573550063c08ebfde34d14018e34 | 5,187 | py | Python | pycsw/pycsw/plugins/profiles/profile.py | Geosoft2/Geosoftware-II-AALLH | bdb61d9a1111b9082ec2b9f309998c5f2166975e | [
"MIT"
] | 118 | 2015-01-07T00:24:09.000Z | 2022-03-19T15:35:43.000Z | pycsw/pycsw/plugins/profiles/profile.py | Geosoft2/Geosoftware-II-AALLH | bdb61d9a1111b9082ec2b9f309998c5f2166975e | [
"MIT"
] | 319 | 2015-01-06T23:51:46.000Z | 2022-03-20T11:22:57.000Z | pycsw/pycsw/plugins/profiles/profile.py | Geosoft2/Geosoftware-II-AALLH | bdb61d9a1111b9082ec2b9f309998c5f2166975e | [
"MIT"
] | 113 | 2015-01-07T00:42:23.000Z | 2022-02-19T18:05:08.000Z | # -*- coding: utf-8 -*-
# =================================================================
#
# Authors: Tom Kralidis <tomkralidis@gmail.com>
# Angelos Tzotsos <tzotsos@gmail.com>
#
# Copyright (c) 2015 Tom Kralidis
# Copyright (c) 2015 Angelos Tzotsos
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# =================================================================
import os
import warnings
def load_profiles(path, cls, profiles):
''' load CSW profiles, return dict by class name '''
aps = {}
aps['plugins'] = {}
aps['loaded'] = {}
for prof in profiles.split(','):
# fgdc, atom, dif, gm03 are supported in core
# no need to specify them explicitly anymore
# provide deprecation warning
# https://github.com/geopython/pycsw/issues/118
if prof in ['fgdc', 'atom', 'dif', 'gm03']:
warnings.warn('%s is now a core module, and does not need to be'
' specified explicitly. So you can remove %s from '
'server.profiles' % (prof, prof))
else:
modulename='%s.%s.%s' % (path.replace(os.sep, '.'), prof, prof)
look_for_subclass(modulename)
return aps
| 36.528169 | 78 | 0.630037 |
4224f59023f612daa74db320160910b42cc05439 | 3,897 | py | Python | push-package.py | OpenTrustGroup/scripts | 31ca2ca5bae055113c6f92a2eb75b0c7528902b3 | [
"BSD-3-Clause"
] | null | null | null | push-package.py | OpenTrustGroup/scripts | 31ca2ca5bae055113c6f92a2eb75b0c7528902b3 | [
"BSD-3-Clause"
] | null | null | null | push-package.py | OpenTrustGroup/scripts | 31ca2ca5bae055113c6f92a2eb75b0c7528902b3 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# Copyright 2017 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import errno
import json
import os
import subprocess
import sys
import tempfile
DEFAULT_DST_ROOT = '/system'
DEFAULT_OUT_DIR = 'out/debug-x64'
if __name__ == '__main__':
sys.exit(main())
| 26.691781 | 79 | 0.647164 |
42260da2bac2d4e5c90292ee2d38da85618b72ad | 2,355 | py | Python | tests/e2e/registry/test_registry_image_push_pull.py | OdedViner/ocs-ci | e8a3de82650e02cf8fa67284a67c36ced34a480b | [
"MIT"
] | null | null | null | tests/e2e/registry/test_registry_image_push_pull.py | OdedViner/ocs-ci | e8a3de82650e02cf8fa67284a67c36ced34a480b | [
"MIT"
] | null | null | null | tests/e2e/registry/test_registry_image_push_pull.py | OdedViner/ocs-ci | e8a3de82650e02cf8fa67284a67c36ced34a480b | [
"MIT"
] | null | null | null | import logging
import pytest
from ocs_ci.framework.testlib import workloads, E2ETest, ignore_leftovers
from ocs_ci.ocs import ocp, registry, constants
from ocs_ci.framework import config
from ocs_ci.ocs.exceptions import UnexpectedBehaviour
logger = logging.getLogger(__name__)
| 36.796875 | 100 | 0.6862 |
42274dc240f54ea288091543468dd2eda53a4feb | 55 | py | Python | tOYOpy/settings.py | fkab/tOYO | b0a7be760a45edd795b8734ce2e5f1ccec35091b | [
"MIT"
] | null | null | null | tOYOpy/settings.py | fkab/tOYO | b0a7be760a45edd795b8734ce2e5f1ccec35091b | [
"MIT"
] | null | null | null | tOYOpy/settings.py | fkab/tOYO | b0a7be760a45edd795b8734ce2e5f1ccec35091b | [
"MIT"
] | null | null | null | elements = {
'em': '',
'blockquote': '<br/>'
}
| 11 | 25 | 0.4 |
4227bfd2b04f47e94ab893e1b523dca4551e38fc | 312 | py | Python | 1.6.py | kevrodg/pynet | 5142b1b75cda658a99348e3550da1c198e7d049e | [
"Apache-2.0"
] | null | null | null | 1.6.py | kevrodg/pynet | 5142b1b75cda658a99348e3550da1c198e7d049e | [
"Apache-2.0"
] | null | null | null | 1.6.py | kevrodg/pynet | 5142b1b75cda658a99348e3550da1c198e7d049e | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import json
import yaml
my_list = [0, 1, 2, 3, 'whatever', 'hello', {'attribs': [0, 1, 2, 3, 4], 'ip_addr': '10.10.10.239'}]
with open("my_file.json", "w") as f:
json.dump(my_list, f)
with open("my_file.yaml", "w") as f:
f.write(yaml.dump(my_list, default_flow_style=False))
| 20.8 | 101 | 0.61859 |
42287378bd11599427298e72d96640a19c6fbb44 | 322 | py | Python | jp.atcoder/abc069/arc080_a/11903517.py | kagemeka/atcoder-submissions | 91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e | [
"MIT"
] | 1 | 2022-02-09T03:06:25.000Z | 2022-02-09T03:06:25.000Z | jp.atcoder/abc069/arc080_a/11903517.py | kagemeka/atcoder-submissions | 91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e | [
"MIT"
] | 1 | 2022-02-05T22:53:18.000Z | 2022-02-09T01:29:30.000Z | jp.atcoder/abc069/arc080_a/11903517.py | kagemeka/atcoder-submissions | 91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e | [
"MIT"
] | null | null | null | import sys
n, *a = map(int, sys.stdin.read().split())
if __name__ == "__main__":
main()
| 16.947368 | 62 | 0.406832 |
422874e1c950eddb051f58c230d75405855070fc | 2,277 | py | Python | tests/test_url_enc_dec.py | FWidm/poe-profile | 08190dfab88758081ce1ddcd30a43081e2d7863f | [
"MIT"
] | 1 | 2018-12-02T19:48:09.000Z | 2018-12-02T19:48:09.000Z | tests/test_url_enc_dec.py | FWidm/poe-profile | 08190dfab88758081ce1ddcd30a43081e2d7863f | [
"MIT"
] | null | null | null | tests/test_url_enc_dec.py | FWidm/poe-profile | 08190dfab88758081ce1ddcd30a43081e2d7863f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import logging
import sys
import unittest
from src.util.tree_codec import encode_hashes, decode_url
url = 'AAAABAMDAQQHBLMGSQj0Dc0OPA5cES0UIBRxFScWbxhWGF0YkRo4HM4c3CSqJy8o-itQLJwy0TWSNuk6UjpYOuE8LUGHRARFR0V-RZ1Ms025TeNQR' \
'1NSVcZZ81qRXz9mnmebaGVodGpDaqxq-mvbcg9yqXasfIN99YIHgseDX4PMg9uFYIhAjLGOvo8akDOQVZLBmK2a4JuKogCmV6asqH2qxKyYrKqtja' \
'3xrj6vp7c-uJO8n7zqvk_AZsT2xq7MvM9-0B_Tj9P72L3ZXtl82mLfsONq5FHqGOvu7IPsiu8O7-vwH_JF8933MvfX-Ov56PrS_Ev-Cv5U_oH-jw=='
decoded = (4, 3, 3, 1, [1031, 1203, 1609, 2292, 3533, 3644, 3676, 4397, 5152, 5233, 5415, 5743, 6230, 6237, 6289,
6712,
7374, 7388, 9386, 10031, 10490, 11088, 11420, 13009, 13714, 14057, 14930, 14936, 15073,
15405,
16775, 17412, 17735, 17790, 17821, 19635, 19897, 19939, 20551, 21330, 21958, 23027, 23185,
24383,
26270, 26523, 26725, 26740, 27203, 27308, 27386, 27611, 29199, 29353, 30380, 31875, 32245,
33287,
33479, 33631, 33740, 33755, 34144, 34880, 36017, 36542, 36634, 36915, 36949, 37569, 39085,
39648,
39818, 41472, 42583, 42668, 43133, 43716, 44184, 44202, 44429, 44529, 44606, 44967, 46910,
47251,
48287, 48362, 48719, 49254, 50422, 50862, 52412, 53118, 53279, 54159, 54267, 55485, 55646,
55676,
55906, 57264, 58218, 58449, 59928, 60398, 60547, 60554, 61198, 61419, 61471, 62021, 62429,
63282,
63447, 63723, 63976, 64210, 64587, 65034, 65108, 65153, 65167])
if __name__ == '__main__':
logger = logging.getLogger()
logger.level = logging.DEBUG
stream_handler = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
unittest.main()
| 42.962264 | 123 | 0.62231 |
422975ef7721aeaa44f60c6499ab2952315acfbe | 262 | py | Python | _test/registry/reg04.py | javacommons/commonthread | dff8b39d7c86729e4711b669bcec8eab6f146659 | [
"Unlicense"
] | null | null | null | _test/registry/reg04.py | javacommons/commonthread | dff8b39d7c86729e4711b669bcec8eab6f146659 | [
"Unlicense"
] | null | null | null | _test/registry/reg04.py | javacommons/commonthread | dff8b39d7c86729e4711b669bcec8eab6f146659 | [
"Unlicense"
] | null | null | null | # source http://itasuke.hatenablog.com/entry/2018/01/08/133510
import winreg
newkey = winreg.CreateKeyEx(winreg.HKEY_CURRENT_USER, r'Software\__javacommons__\abc')
newkey.Close()
winreg.DeleteKeyEx(winreg.HKEY_CURRENT_USER, r'Software\__javacommons__\abc')
| 43.666667 | 87 | 0.80916 |
422a7283e956bcdda7358ae083a9c572a8121dd9 | 8,289 | py | Python | setuptools-37.0.0/pkg_resources/tests/test_working_set.py | coderlongren/PreliminaryPython | b5c7a87e41842c57aabb660de1514cba19c8bd78 | [
"MIT"
] | 1 | 2017-09-19T15:21:50.000Z | 2017-09-19T15:21:50.000Z | setuptools-37.0.0/pkg_resources/tests/test_working_set.py | coderlongren/PreliminaryPython | b5c7a87e41842c57aabb660de1514cba19c8bd78 | [
"MIT"
] | null | null | null | setuptools-37.0.0/pkg_resources/tests/test_working_set.py | coderlongren/PreliminaryPython | b5c7a87e41842c57aabb660de1514cba19c8bd78 | [
"MIT"
] | 4 | 2017-05-12T09:18:16.000Z | 2020-08-27T03:26:16.000Z | import inspect
import re
import textwrap
import pytest
import pkg_resources
from .test_resources import Metadata
def parametrize_test_working_set_resolve(*test_list):
idlist = []
argvalues = []
for test in test_list:
(
name,
installed_dists,
installable_dists,
requirements,
expected1, expected2
) = [
strip_comments(s.lstrip()) for s in
textwrap.dedent(test).lstrip().split('\n\n', 5)
]
installed_dists = list(parse_distributions(installed_dists))
installable_dists = list(parse_distributions(installable_dists))
requirements = list(pkg_resources.parse_requirements(requirements))
for id_, replace_conflicting, expected in (
(name, False, expected1),
(name + '_replace_conflicting', True, expected2),
):
idlist.append(id_)
expected = strip_comments(expected.strip())
if re.match('\w+$', expected):
expected = getattr(pkg_resources, expected)
assert issubclass(expected, Exception)
else:
expected = list(parse_distributions(expected))
argvalues.append(pytest.param(installed_dists, installable_dists,
requirements, replace_conflicting,
expected))
return pytest.mark.parametrize('installed_dists,installable_dists,'
'requirements,replace_conflicting,'
'resolved_dists_or_exception',
argvalues, ids=idlist)
| 17.304802 | 87 | 0.55447 |
422abcc408966dc47c31fc1259795d32236b4832 | 629 | py | Python | setup.py | Sigel1/yolo-tf2 | a11c856e601c23220fc2afce7c93e9f8eb4fd339 | [
"MIT"
] | null | null | null | setup.py | Sigel1/yolo-tf2 | a11c856e601c23220fc2afce7c93e9f8eb4fd339 | [
"MIT"
] | null | null | null | setup.py | Sigel1/yolo-tf2 | a11c856e601c23220fc2afce7c93e9f8eb4fd339 | [
"MIT"
] | null | null | null | from setuptools import find_packages, setup
install_requires = [dep.strip() for dep in open('requirements.txt')]
setup(
name='yolo_tf2',
version='1.5',
packages=find_packages(),
url='https://github.com/schissmantics/yolo-tf2',
license='MIT',
author='schismantics',
author_email='schissmantics@outlook.com',
description='yolo(v3/v4) implementation in keras and tensorflow 2.5',
setup_requires=['numpy==1.19.5'],
install_requires=install_requires,
python_requires='>=3.7',
entry_points={
'console_scripts': [
'yolotf2=yolo_tf2.cli:execute',
],
},
)
| 27.347826 | 73 | 0.659777 |
422b4572706867cc810fb195c7e12772e8a93c86 | 324 | py | Python | nngeometry/object/__init__.py | amyami187/nngeometry | cb516da3f7a019e148f48ff3ef3bed0cdae0d184 | [
"MIT"
] | 103 | 2020-03-19T08:47:29.000Z | 2022-03-29T00:54:38.000Z | nngeometry/object/__init__.py | amyami187/nngeometry | cb516da3f7a019e148f48ff3ef3bed0cdae0d184 | [
"MIT"
] | 29 | 2021-01-07T13:39:20.000Z | 2022-03-29T14:52:21.000Z | nngeometry/object/__init__.py | amyami187/nngeometry | cb516da3f7a019e148f48ff3ef3bed0cdae0d184 | [
"MIT"
] | 11 | 2020-11-09T01:07:12.000Z | 2022-03-29T00:54:41.000Z | from .pspace import (PMatDense, PMatBlockDiag, PMatDiag,
PMatLowRank, PMatImplicit,
PMatKFAC, PMatEKFAC, PMatQuasiDiag)
from .vector import (PVector, FVector)
from .fspace import (FMatDense,)
from .map import (PushForwardDense, PushForwardImplicit,
PullBackDense)
| 40.5 | 56 | 0.66358 |
422e18702f6c683f268a4b49395a514801fec437 | 834 | py | Python | vkwave/bots/core/dispatching/dp/middleware/middleware.py | YorkDW/vkwave | 86b0278f15f398217a8211007c44651b6145831b | [
"MIT"
] | null | null | null | vkwave/bots/core/dispatching/dp/middleware/middleware.py | YorkDW/vkwave | 86b0278f15f398217a8211007c44651b6145831b | [
"MIT"
] | null | null | null | vkwave/bots/core/dispatching/dp/middleware/middleware.py | YorkDW/vkwave | 86b0278f15f398217a8211007c44651b6145831b | [
"MIT"
] | null | null | null | from abc import ABC, abstractmethod
from typing import List, NewType
from vkwave.bots.core.dispatching.events.base import BaseEvent
MiddlewareResult = NewType("MiddlewareResult", bool)
| 29.785714 | 84 | 0.715827 |
422e499271a923bf090aefdbe25c5651121859de | 3,517 | py | Python | plot_scripts/try_networkx.py | gabrielasuchopar/arch2vec | 1fc47d2cc7d63832e0d6337b8482669366b4aef2 | [
"Apache-2.0"
] | 35 | 2020-10-22T03:58:23.000Z | 2022-03-21T12:55:35.000Z | plot_scripts/try_networkx.py | gabrielasuchopar/arch2vec | 1fc47d2cc7d63832e0d6337b8482669366b4aef2 | [
"Apache-2.0"
] | 1 | 2021-06-03T13:49:47.000Z | 2021-06-06T02:02:11.000Z | plot_scripts/try_networkx.py | gabrielasuchopar/arch2vec | 1fc47d2cc7d63832e0d6337b8482669366b4aef2 | [
"Apache-2.0"
] | 9 | 2020-10-22T14:13:53.000Z | 2022-03-21T08:06:12.000Z | import networkx as nx
import numpy as np
import matplotlib.pyplot as plt
if __name__ == '__main__':
adj1 = np.array([[0, 1, 1, 1, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 0]])
op1 = ['in', 'conv1x1', 'conv3x3', 'mp3x3', 'out']
adj2 = np.array([[0, 1, 1, 1, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 0]])
op2 = ['in', 'conv1x1', 'mp3x3', 'conv3x3', 'out']
adj3 = np.array([[0, 1, 1, 1, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0]])
op3 = ['in', 'conv1x1', 'conv3x3', 'mp3x3', 'out','out2']
adj4 = np.array([[0, 1, 1, 1, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]])
op4 = np.array([[1, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 0]])
adj4, op4 = preprocess_adj_op(adj4, op4)
G1 = gen_graph(adj1, op1)
G2 = gen_graph(adj2, op2)
G3 = gen_graph(adj3, op3)
G4 = gen_graph(adj4, op4)
plt.subplot(141)
nx.draw(G1, with_labels=True, font_weight='bold')
plt.subplot(142)
nx.draw(G2, with_labels=True, font_weight='bold')
plt.subplot(143)
nx.draw(G3, with_labels=True, font_weight='bold')
plt.subplot(144)
nx.draw(G4, with_labels=True, font_weight='bold')
nx.graph_edit_distance(G1,G2, node_match=node_match, edge_match=edge_match)
nx.graph_edit_distance(G2,G3, node_match=node_match, edge_match=edge_match) | 30.582609 | 142 | 0.477396 |
422eaaa92344214317cacbe394deaa82d7096b9d | 6,552 | py | Python | endpoints/v2/errors.py | giuseppe/quay | a1b7e4b51974edfe86f66788621011eef2667e6a | [
"Apache-2.0"
] | 2,027 | 2019-11-12T18:05:48.000Z | 2022-03-31T22:25:04.000Z | endpoints/v2/errors.py | giuseppe/quay | a1b7e4b51974edfe86f66788621011eef2667e6a | [
"Apache-2.0"
] | 496 | 2019-11-12T18:13:37.000Z | 2022-03-31T10:43:45.000Z | endpoints/v2/errors.py | giuseppe/quay | a1b7e4b51974edfe86f66788621011eef2667e6a | [
"Apache-2.0"
] | 249 | 2019-11-12T18:02:27.000Z | 2022-03-22T12:19:19.000Z | import bitmath
| 32.435644 | 100 | 0.654609 |
422f10e008ebbf5692ddbc20cb4464f21ab48808 | 3,956 | py | Python | scoreboard.py | TheLurkingCat/scoreboard | 9c292fc8573e7bf8539cb20a813c2147ddd0c923 | [
"MIT"
] | null | null | null | scoreboard.py | TheLurkingCat/scoreboard | 9c292fc8573e7bf8539cb20a813c2147ddd0c923 | [
"MIT"
] | null | null | null | scoreboard.py | TheLurkingCat/scoreboard | 9c292fc8573e7bf8539cb20a813c2147ddd0c923 | [
"MIT"
] | null | null | null | '''
LICENSE: MIT license
This module can help us know about who can ask when
we have troubles in some buggy codes while solving problems.
'''
from asyncio import gather, get_event_loop
from pandas import DataFrame, set_option
from online_judge import Online_Judge
loop = get_event_loop()
set_option('display.max_colwidth', -1)
| 35.63964 | 167 | 0.548787 |
422f98ebeb65b657f8b008da4345d8f0e09f42c7 | 10,406 | py | Python | custom_transforms.py | zyxu1996/Efficient-Transformer | 106347186d13e106e9129d25b72e2fd491c54452 | [
"Apache-2.0"
] | 22 | 2021-10-13T05:10:15.000Z | 2022-03-17T12:01:40.000Z | custom_transforms.py | zyXu1996/Efficient-Transformer | efd87d734d5835eccb5b624c5e7ca3a5a08f318b | [
"Apache-2.0"
] | null | null | null | custom_transforms.py | zyXu1996/Efficient-Transformer | efd87d734d5835eccb5b624c5e7ca3a5a08f318b | [
"Apache-2.0"
] | 4 | 2021-11-08T10:30:23.000Z | 2022-02-16T05:07:25.000Z | import torch
import random
import numpy as np
import cv2
import os
import torch.nn as nn
from torchvision import transforms
def edge_contour(label, edge_width=3):
import cv2
cuda_type = label.is_cuda
label = label.cpu().numpy().astype(np.int)
b, h, w = label.shape
edge = np.zeros(label.shape)
# right
edge_right = edge[:, 1:h, :]
edge_right[(label[:, 1:h, :] != label[:, :h - 1, :]) & (label[:, 1:h, :] != 255)
& (label[:, :h - 1, :] != 255)] = 1
# up
edge_up = edge[:, :, :w - 1]
edge_up[(label[:, :, :w - 1] != label[:, :, 1:w])
& (label[:, :, :w - 1] != 255)
& (label[:, :, 1:w] != 255)] = 1
# upright
edge_upright = edge[:, :h - 1, :w - 1]
edge_upright[(label[:, :h - 1, :w - 1] != label[:, 1:h, 1:w])
& (label[:, :h - 1, :w - 1] != 255)
& (label[:, 1:h, 1:w] != 255)] = 1
# bottomright
edge_bottomright = edge[:, :h - 1, 1:w]
edge_bottomright[(label[:, :h - 1, 1:w] != label[:, 1:h, :w - 1])
& (label[:, :h - 1, 1:w] != 255)
& (label[:, 1:h, :w - 1] != 255)] = 1
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (edge_width, edge_width))
for i in range(edge.shape[0]):
edge[i] = cv2.dilate(edge[i], kernel)
# edge[edge == 1] = 255 # view edge
# import random
# cv2.imwrite(os.path.join('./edge', '{}.png'.format(random.random())), edge[0])
if cuda_type:
edge = torch.from_numpy(edge).cuda()
else:
edge = torch.from_numpy(edge)
return edge
if __name__ == '__main__':
path = './data/vaihingen/annotations/labels'
filelist = os.listdir(path)
for file in filelist:
print(file)
img = cv2.imread(os.path.join(path, file), cv2.IMREAD_UNCHANGED)
img = torch.from_numpy(img).unsqueeze(dim=0).repeat(2, 1, 1)
img = edge_contour(img)
# cv2.imwrite(os.path.join(save_path, os.path.splitext(file)[0] + '.png'), gray)
| 36.384615 | 106 | 0.540746 |
423075718e222b99f83bdb4ab73a14063da9d0ee | 37,354 | py | Python | ui/staff.py | AryaStarkSakura/Stylized-Neural-Painting | 0502c9f12eb582fe2ebd0ffdc7008dc81cefa74c | [
"CC0-1.0"
] | null | null | null | ui/staff.py | AryaStarkSakura/Stylized-Neural-Painting | 0502c9f12eb582fe2ebd0ffdc7008dc81cefa74c | [
"CC0-1.0"
] | null | null | null | ui/staff.py | AryaStarkSakura/Stylized-Neural-Painting | 0502c9f12eb582fe2ebd0ffdc7008dc81cefa74c | [
"CC0-1.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'staff.ui'
#
# Created by: PyQt5 UI code generator 5.13.0
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
| 46.750939 | 101 | 0.591235 |
42308174a4346509fdf47445522e3c2f26a6c431 | 2,171 | py | Python | dataset.py | ceyzaguirre4/mac-network-pytorch | ad2deefc8a987ab92f4911d3d98631f22d0ae44a | [
"MIT"
] | 4 | 2020-04-08T22:19:19.000Z | 2020-10-28T23:22:12.000Z | dataset.py | ceyzaguirre4/mac-network-pytorch | ad2deefc8a987ab92f4911d3d98631f22d0ae44a | [
"MIT"
] | null | null | null | dataset.py | ceyzaguirre4/mac-network-pytorch | ad2deefc8a987ab92f4911d3d98631f22d0ae44a | [
"MIT"
] | 3 | 2020-06-27T02:47:02.000Z | 2021-10-08T13:19:05.000Z | import os
import pickle
import numpy as np
from PIL import Image
import torch
from torch.utils.data import Dataset
from torchvision import transforms
import h5py
from transforms import Scale
transform = transforms.Compose([
Scale([224, 224]),
transforms.Pad(4),
transforms.RandomCrop([224, 224]),
transforms.ToTensor(),
transforms.Normalize(mean=[0.5, 0.5, 0.5],
std=[0.5, 0.5, 0.5])
])
| 29.739726 | 83 | 0.609857 |
4230af0cdb6333a2256b37fbde92023b5213c5d6 | 1,445 | py | Python | tests/distributions/test_log_normal.py | thomasaarholt/xgboost-distribution | 8ee00f7f0dcaadcb345ebcb15534287081aa987b | [
"MIT"
] | 17 | 2021-08-14T10:23:54.000Z | 2022-01-08T11:54:48.000Z | tests/distributions/test_log_normal.py | thomasaarholt/xgboost-distribution | 8ee00f7f0dcaadcb345ebcb15534287081aa987b | [
"MIT"
] | 17 | 2021-06-22T02:23:53.000Z | 2022-03-02T16:03:21.000Z | tests/distributions/test_log_normal.py | thomasaarholt/xgboost-distribution | 8ee00f7f0dcaadcb345ebcb15534287081aa987b | [
"MIT"
] | 6 | 2021-08-18T18:52:13.000Z | 2021-11-19T08:36:50.000Z | import pytest
import numpy as np
import pandas as pd
from xgboost_distribution.distributions import LogNormal
def test_loss(lognormal):
loss_name, loss_value = lognormal.loss(
# fmt: off
y=np.array([0, ]),
params=np.array([[1, 0], ]),
)
assert loss_name == "LogNormalError"
assert loss_value == np.inf
| 24.083333 | 85 | 0.600692 |
4230f1879c1a68f9bf6052b16b5fb1dd036ba09b | 14,169 | py | Python | script/forecasting/forecaster.py | bialesdaniel/noisepage | 44ca689bd818b1bd39b84a7fe5148ddaa65a61eb | [
"MIT"
] | null | null | null | script/forecasting/forecaster.py | bialesdaniel/noisepage | 44ca689bd818b1bd39b84a7fe5148ddaa65a61eb | [
"MIT"
] | null | null | null | script/forecasting/forecaster.py | bialesdaniel/noisepage | 44ca689bd818b1bd39b84a7fe5148ddaa65a61eb | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""
Main script for workload forecasting.
Example usage:
- Generate data (runs OLTP benchmark on the built database) and perform training, and save the trained model
./forecaster --gen_data --models=LSTM --model_save_path=model.pickle
- Use the trained models (LSTM) to generate predictions.
./forecaster --model_load_path=model.pickle --test_file=test_query.csv --test_model=LSTM
TODO:
- Better metrics for training and prediction (currently not focusing on models' accuracy yet)
- Multiple models (currently only simple-one-layer-untuned LSTM used)
- API and interaction with Pilot
"""
import argparse
import json
import pickle
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ..testing.self_driving.constants import (DEFAULT_ITER_NUM,
DEFAULT_QUERY_TRACE_FILE,
DEFAULT_TPCC_WEIGHTS,
DEFAULT_WORKLOAD_PATTERN)
from ..testing.self_driving.forecast import gen_oltp_trace
from ..testing.util.constants import LOG
from .cluster import QueryCluster
from .data_loader import DataLoader
from .models import ForecastModel, get_models
# Interval duration for aggregation in microseconds
INTERVAL_MICRO_SEC = 500000
# Number of Microseconds per second
MICRO_SEC_PER_SEC = 1000000
# Number of data points in a sequence
SEQ_LEN = 10 * MICRO_SEC_PER_SEC // INTERVAL_MICRO_SEC
# Number of data points for the horizon
HORIZON_LEN = 30 * MICRO_SEC_PER_SEC // INTERVAL_MICRO_SEC
# Number of data points for testing set
EVAL_DATA_SIZE = 2 * SEQ_LEN + HORIZON_LEN
argp = argparse.ArgumentParser(description="Query Load Forecaster")
# Generation stage related options
argp.add_argument(
"--gen_data",
default=False,
action="store_true",
help="If specified, OLTP benchmark would be downloaded and built to generate the query trace data")
argp.add_argument(
"--tpcc_weight",
type=str,
default=DEFAULT_TPCC_WEIGHTS,
help="Workload weights for the TPCC")
argp.add_argument(
"--tpcc_rates",
nargs="+",
default=DEFAULT_WORKLOAD_PATTERN,
help="Rate array for the TPCC workload")
argp.add_argument(
"--pattern_iter",
type=int,
default=DEFAULT_ITER_NUM,
help="Number of iterations the DEFAULT_WORKLOAD_PATTERN should be run")
argp.add_argument("--trace_file", default=DEFAULT_QUERY_TRACE_FILE,
help="Path to the query trace file", metavar="FILE")
# Model specific
argp.add_argument("--models", nargs='+', type=str, help="Models to use")
argp.add_argument("--models_config", type=str, metavar="FILE",
help="Models and init arguments JSON config file")
argp.add_argument("--seq_len", type=int, default=SEQ_LEN,
help="Length of one sequence in number of data points")
argp.add_argument(
"--horizon_len",
type=int,
default=HORIZON_LEN,
help="Length of the horizon in number of data points, "
"aka, how many further in the a sequence is used for prediction"
)
# Training stage related options
argp.add_argument("--model_save_path", metavar="FILE",
help="Where the model trained will be stored")
argp.add_argument(
"--eval_size",
type=int,
default=EVAL_DATA_SIZE,
help="Length of the evaluation data set length in number of data points")
argp.add_argument("--lr", type=float, default=0.001, help="Learning rate")
argp.add_argument("--epochs", type=int, default=10,
help="Number of epochs for training")
# Testing stage related options
argp.add_argument(
"--model_load_path",
default="model.pickle",
metavar="FILE",
help="Where the model should be loaded from")
argp.add_argument(
"--test_file",
help="Path to the test query trace file",
metavar="FILE")
argp.add_argument(
"--test_model",
type=str,
help="Model to be used for forecasting"
)
def parse_model_config(model_names: Optional[List[str]],
models_config: Optional[str]) -> Dict:
"""
Load models from
:param model_names: List of model names
:param models_config: JSON model config file
:return: Merged model config Dict
"""
model_kwargs = dict([(model_name, {}) for model_name in model_names])
if models_config is not None:
with open(models_config, 'r') as f:
custom_config = json.load(f)
# Simple and non-recursive merging of options
model_kwargs.update(custom_config)
if len(model_kwargs) < 1:
raise ValueError("At least 1 model needs to be used.")
return model_kwargs
if __name__ == "__main__":
args = argp.parse_args()
if args.test_file is None:
# Parse models arguments
models_kwargs = parse_model_config(args.models, args.models_config)
# Generate OLTP trace file
if args.gen_data:
gen_oltp_trace(
tpcc_weight=args.tpcc_weight,
tpcc_rates=args.tpcc_rates,
pattern_iter=args.pattern_iter)
trace_file = DEFAULT_QUERY_TRACE_FILE
else:
trace_file = args.trace_file
forecaster = Forecaster(
trace_file=trace_file,
interval_us=INTERVAL_MICRO_SEC,
seq_len=args.seq_len,
eval_size=args.eval_size,
horizon_len=args.horizon_len)
models = forecaster.train(models_kwargs)
# Save the model
if args.model_save_path:
with open(args.model_save_path, "wb") as f:
pickle.dump(models, f)
else:
# Do inference on a trained model
with open(args.model_load_path, "rb") as f:
models = pickle.load(f)
forecaster = Forecaster(
trace_file=args.test_file,
test_mode=True,
interval_us=INTERVAL_MICRO_SEC,
seq_len=args.seq_len,
eval_size=args.eval_size,
horizon_len=args.horizon_len)
# FIXME:
# Assuming all the queries in the current trace file are from
# the same cluster for now
query_pred = forecaster.predict(0, models[0][args.test_model])
# TODO:
# How are we consuming predictions?
for qid, ts in query_pred.items():
LOG.info(f"[Query: {qid}] pred={ts[:10]}")
| 36.145408 | 118 | 0.619239 |
4231a5537ad061f7ccafef21420ba06d2605d9cf | 66,059 | py | Python | tests/test_master/test_jobtypes_api.py | guidow/pyfarm-master | d41c8f1eb5bfefb8400d400bcecadf197bcfb80a | [
"Apache-2.0"
] | null | null | null | tests/test_master/test_jobtypes_api.py | guidow/pyfarm-master | d41c8f1eb5bfefb8400d400bcecadf197bcfb80a | [
"Apache-2.0"
] | null | null | null | tests/test_master/test_jobtypes_api.py | guidow/pyfarm-master | d41c8f1eb5bfefb8400d400bcecadf197bcfb80a | [
"Apache-2.0"
] | null | null | null | # No shebang line, this module is meant to be imported
#
# Copyright 2013 Oliver Palmer
# Copyright 2014 Ambient Entertainment GmbH & Co. KG
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from json import dumps
# test class must be loaded first
from pyfarm.master.testutil import BaseTestCase
BaseTestCase.build_environment()
from pyfarm.master.application import get_api_blueprint
from pyfarm.master.entrypoints import load_api
from pyfarm.models.jobtype import JobType, JobTypeVersion
code = """from pyfarm.jobtypes.core.jobtype import JobType
class TestJobType(JobType):
def get_command(self):
return "/usr/bin/touch"
def get_arguments(self):
return [os.path.join(
self.assignment_data["job"]["data"]["path"],
"%04d" % self.assignment_data[\"tasks\"][0][\"frame\"])]
"""
| 37.6834 | 81 | 0.473592 |
4231fa59a3b40941c8f8953e4a8dd3df4f032a6f | 742 | py | Python | imagekit/hashers.py | radicalgraphics/django-imagekit | e36290b4eef1faaf6ad864d3493df1458ef96fbb | [
"BSD-3-Clause"
] | null | null | null | imagekit/hashers.py | radicalgraphics/django-imagekit | e36290b4eef1faaf6ad864d3493df1458ef96fbb | [
"BSD-3-Clause"
] | null | null | null | imagekit/hashers.py | radicalgraphics/django-imagekit | e36290b4eef1faaf6ad864d3493df1458ef96fbb | [
"BSD-3-Clause"
] | null | null | null | from copy import copy
from hashlib import md5
from pickle import Pickler, MARK, DICT
from types import DictionaryType
from .lib import StringIO
| 23.1875 | 53 | 0.661725 |
423268278bdfbc38d38322d8349807e008e76abd | 1,262 | py | Python | sun.py | funxiun/AstroAlgorithms4Python | 98098956daba2706c993fa6370d8cdfa4013cb8d | [
"Unlicense"
] | 7 | 2018-09-29T11:35:40.000Z | 2022-01-11T14:06:44.000Z | sun.py | funxiun/AstroAlgorithms4Python | 98098956daba2706c993fa6370d8cdfa4013cb8d | [
"Unlicense"
] | null | null | null | sun.py | funxiun/AstroAlgorithms4Python | 98098956daba2706c993fa6370d8cdfa4013cb8d | [
"Unlicense"
] | 8 | 2018-09-29T11:36:01.000Z | 2021-10-17T15:25:55.000Z | '''Meeus: Astronomical Algorithms (2nd ed.), chapter 25'''
import math
from nutation_ecliptic import ecliptic
from constants import AU
def coordinates(jd):
'''equatorial coordinates of Sun'''
lon=math.radians(longitude(jd))
eps=math.radians(ecliptic(jd))
ra=math.degrees(math.atan2(math.cos(eps)*math.sin(lon),math.cos(lon)))
dec=math.degrees(math.asin(math.sin(eps)*math.sin(lon)))
return ra,dec
def longitude(jd):
'''longitude of Sun'''
T=(jd-2451545)/36525.
L=math.radians(280.46646+36000.76983*T+0.0003032*T**2)
M=math.radians(357.52911+35999.05029*T-0.0001537*T**2)
C=math.radians((1.914602-0.004817*T-0.000014*T**2)*math.sin(M)+(0.019993-0.000101*T)*math.sin(2*M)+0.000289*math.sin(3*M))
lon=L+C
return math.degrees(lon)
def distance(jd,km=True):
'''Earth-Sun distance in km'''
T=(jd-2451545)/36525.
e=0.016708634-0.000042037*T-0.0000001267*T**2
M=math.radians(357.52911+35999.05029*T-0.0001537*T**2)
C=math.radians((1.914602-0.004817*T-0.000014*T**2)*math.sin(M)+(0.019993-0.000101*T)*math.sin(2*M)+0.000289*math.sin(3*M))
nu=M+C
R=1.000001018*(1-e**2)/(1+e*math.cos(nu))
if km: R*=AU
return R
| 26.291667 | 126 | 0.62916 |
4233e43b1aa8c3735bfa71a29e6ebbf01825729f | 5,681 | py | Python | test/paths.py | cychitivav/kobuki_navigation | 9da1ad425b8804b49005720594e9837295eb9976 | [
"MIT"
] | null | null | null | test/paths.py | cychitivav/kobuki_navigation | 9da1ad425b8804b49005720594e9837295eb9976 | [
"MIT"
] | null | null | null | test/paths.py | cychitivav/kobuki_navigation | 9da1ad425b8804b49005720594e9837295eb9976 | [
"MIT"
] | null | null | null | #!/usr/bin/python
import numpy as np
import cv2
from matplotlib import pyplot as plt
import networkx as nx
if __name__ == "__main__":
image = cv2.imread('map/map.pgm', 0)
rotated = rotate_image(image, -7.66)
#cv2.imwrite('map/rotated.pgm', rotated)
_, th = cv2.threshold(rotated, 245, 255, cv2.THRESH_BINARY)
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3,3))
op = cv2.morphologyEx(th, cv2.MORPH_OPEN, kernel)
skel = cv2.ximgproc.thinning(op)
plt.figure()
plt.subplot(1,3,1)
plt.imshow(image, cmap='gray')
plt.axis('off')
plt.title('Original')
plt.subplot(1,3,2)
plt.imshow(rotated, cmap='gray')
plt.axis('off')
plt.title('Rotada')
plt.subplot(1,3,3)
plt.imshow(skel, cmap='gray')
plt.axis('off')
plt.title('Adelgazada')
base = cv2.dilate(skel, None, iterations=12)
path = cv2.cvtColor(base, cv2.COLOR_GRAY2RGB)
corners = cv2.cornerHarris(skel,7,7,0.04)
corners = cv2.dilate(corners, None)
_, corners = cv2.threshold(corners,0.001,255,cv2.THRESH_BINARY)
corners = np.uint8(corners)
contours, _ = cv2.findContours(corners,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
path[corners>0.0]=[0,255,0]
cv2.drawContours(path,contours,-1,(255,0,0),1)
G = nx.Graph()
points = []
for i, c in enumerate(contours):
# calculate moments for each contour
M = cv2.moments(c)
# calculate x,y coordinate of center
cX = int(round(M["m10"] / M["m00"]))
cY = int(round(M["m01"] / M["m00"]))
path[cY,cX]=[0,0,255]
G.add_node(i, pos=(cX,cY))
points.append((cX,cY))
font = cv2.FONT_HERSHEY_SIMPLEX
fontScale = 0.4
fontColor = (0,0,255)
thickness = 1
path = cv2.putText(path, str(i), (cX,cY), font, fontScale, fontColor, thickness)
plt.figure()
plt.subplot(1,2,1)
plt.imshow(base,cmap='gray')
plt.axis('off')
plt.title('Imagen base')
plt.subplot(1,2,2)
plt.imshow(path)
plt.axis('off')
plt.title('Esquinas')
noBlack = cv2.countNonZero(cv2.cvtColor(path,cv2.COLOR_BGR2GRAY))
for i, p1 in enumerate(points):
for j, p2 in enumerate(points):
if p1 == p2: continue
test_img = cv2.line(path.copy(), p1, p2, (234,0,234), 1)
# Recount to see if the images are the same
if cv2.countNonZero(cv2.cvtColor(test_img,cv2.COLOR_BGR2GRAY)) == noBlack:
# path = cv2.line(path, p1, p2, (234,0,234), 1)
G.add_edge(i,j,weight=np.hypot(p1[0]-p2[0], p1[1]-p2[1]))
plt.figure()
nx.draw(G,with_labels=True)
x_0, y_0 = [492,500]
x_f = np.random.randint(487) + 277
y_f = np.random.randint(448) + 368
path[y_0+1,x_0+1] = (255,0,0)
path[y_f+1,x_f+1] = (255,0,0)
_, th = cv2.threshold(rotated, 245, 255, cv2.THRESH_BINARY)
ero = cv2.erode(th,None,iterations=10)
th = ero.copy()
noBlack = cv2.countNonZero(th)
for i, p in enumerate(points):
test_img = cv2.line(th.copy(), (x_0,y_0), p, 234, 1)
# Recount to see if the images are the same
if cv2.countNonZero(test_img) == noBlack:
# path = cv2.line(path, p1, p2, (234,0,234), 1)
G.add_edge('p_0',i,weight=np.hypot(p[0]-x_0, y_0-p[1]))
for i, p in enumerate(points):
test_img = cv2.line(th.copy(), (x_f,y_f), p, 234, 1)
# Recount to see if the images are the same
if cv2.countNonZero(test_img) == noBlack:
# path = cv2.line(path, p1, p2, (234,0,234), 1)
G.add_edge('p_f',i,weight=np.hypot(p[0]-x_f, y_f-p[1]))
plan = nx.shortest_path(G,'p_0','p_f')
print plan
for i in range(len(plan)-1):
if i == 0:
path = cv2.line(path, (x_0,y_0), points[plan[i+1]], (251,229,78), 1)
elif i == len(plan)-2:
path = cv2.line(path, points[plan[i]], (x_f,y_f), (251,229,78), 1)
else:
path = cv2.line(path, points[plan[i]], points[plan[i+1]], (251,229,78), 1)
plt.figure()
plt.imshow(ero,cmap='gray')
plt.axis('off')
plt.title('Imagen erosionada')
plt.show()
| 31.38674 | 88 | 0.520155 |
4233e6b88d45b6951dc540a0e3110566d67aa657 | 458 | py | Python | intro-to-programming/python-for-everyone/3-variables-expressions-statements/exercise-4.py | udpsunil/computer-science | 94e3dfc7d39ad139671ab1a3457a61a1fd48fe39 | [
"MIT"
] | null | null | null | intro-to-programming/python-for-everyone/3-variables-expressions-statements/exercise-4.py | udpsunil/computer-science | 94e3dfc7d39ad139671ab1a3457a61a1fd48fe39 | [
"MIT"
] | null | null | null | intro-to-programming/python-for-everyone/3-variables-expressions-statements/exercise-4.py | udpsunil/computer-science | 94e3dfc7d39ad139671ab1a3457a61a1fd48fe39 | [
"MIT"
] | null | null | null | # Assume that we execute the following assignment statements
# width = 17
# height = 12.0
width = 17
height = 12.0
value_1 = width // 2
value_2 = width / 2.0
value_3 = height / 3
value_4 = 1 + 2 * 5
print(f"value_1 is {value_1} and it's type is {type(value_1)}")
print(f"value_2 is {value_2} and it's type is {type(value_2)}")
print(f"value_3 is {value_3} and it's type is {type(value_3)}")
print(f"value_4 is {value_4} and it's type is {type(value_4)}")
| 26.941176 | 63 | 0.68559 |
42370720ae2a40bece1dbd04a95205d5f5073cbf | 131 | py | Python | apps/weapons/admin.py | tufbel/wFocus | ee0f02053b8a5bc9c40dd862306fc5df1a063b9d | [
"Apache-2.0"
] | null | null | null | apps/weapons/admin.py | tufbel/wFocus | ee0f02053b8a5bc9c40dd862306fc5df1a063b9d | [
"Apache-2.0"
] | 11 | 2020-06-06T01:51:51.000Z | 2022-02-10T14:31:21.000Z | apps/weapons/admin.py | tufbel/wFocus | ee0f02053b8a5bc9c40dd862306fc5df1a063b9d | [
"Apache-2.0"
] | null | null | null | from django.contrib import admin
# Register your models here.
from apps.weapons.models import Weapon
admin.site.register(Weapon)
| 18.714286 | 38 | 0.80916 |
4237a4d8945ebfffd7fd8c863df2a43bde57f4e3 | 975 | py | Python | modules/kubrick/apps/awards/models.py | Lab-Quatro/aposcar | 97631f2e3939566cc4e5b81e50c58ce03a5350a4 | [
"MIT"
] | 3 | 2021-07-05T14:18:27.000Z | 2021-09-02T10:15:55.000Z | modules/kubrick/apps/awards/models.py | Lab-Quatro/aposcar | 97631f2e3939566cc4e5b81e50c58ce03a5350a4 | [
"MIT"
] | 1 | 2021-10-31T21:40:39.000Z | 2021-10-31T21:40:39.000Z | modules/kubrick/apps/awards/models.py | Lab-Quatro/aposcar | 97631f2e3939566cc4e5b81e50c58ce03a5350a4 | [
"MIT"
] | null | null | null | from django.db import models
| 25.657895 | 70 | 0.695385 |
42383a1d8efb06b1b9b9ac90bcfd5e6b24b3d414 | 6,113 | py | Python | scholarly_citation_finder/apps/citation/search/PublicationDocumentExtractor.py | citationfinder/scholarly_citation_finder | 3e6c340cfebc934a013759e27d8c145171110156 | [
"MIT"
] | 1 | 2017-01-23T18:02:42.000Z | 2017-01-23T18:02:42.000Z | scholarly_citation_finder/apps/citation/search/PublicationDocumentExtractor.py | citationfinder/scholarly_citation_finder | 3e6c340cfebc934a013759e27d8c145171110156 | [
"MIT"
] | null | null | null | scholarly_citation_finder/apps/citation/search/PublicationDocumentExtractor.py | citationfinder/scholarly_citation_finder | 3e6c340cfebc934a013759e27d8c145171110156 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
import logging
from datetime import datetime
from scholarly_citation_finder import config
from scholarly_citation_finder.apps.parser.Parser import Parser
from scholarly_citation_finder.apps.core.models import PublicationUrl
from scholarly_citation_finder.tools.extractor.grobid.GrobidExtractor import GrobidExtractor
from scholarly_citation_finder.lib.file import download_file_pdf, DownloadFailedException, UnexpectedContentTypeException
from scholarly_citation_finder.lib.process import ProcessException
from scholarly_citation_finder.apps.parser.Exceptions import ParserRollbackError
from scholarly_citation_finder.lib.string import normalize_string
from scholarly_citation_finder.tools.extractor.grobid.TeiParser import TeiParserNoDocumentTitle,\
TeiParserNoReferences
from scholarly_citation_finder.tools.nameparser.StringMatching import nearly_match
logger = logging.getLogger(__name__)
| 45.962406 | 135 | 0.657615 |
423985c9471e18c947bb00b13f5fb82114424fab | 2,884 | py | Python | webapp/web.py | thunderz99/azure_image_caption | f7d3649051c948c9651b7d3f9df006d84449cc14 | [
"MIT"
] | 1 | 2019-04-19T13:22:15.000Z | 2019-04-19T13:22:15.000Z | webapp/web.py | thunderz99/azure_image_caption | f7d3649051c948c9651b7d3f9df006d84449cc14 | [
"MIT"
] | null | null | null | webapp/web.py | thunderz99/azure_image_caption | f7d3649051c948c9651b7d3f9df006d84449cc14 | [
"MIT"
] | null | null | null | import sys
import os
import json
import urllib
from PIL import Image
from flask import Flask, request, redirect, url_for
from flask import send_from_directory, render_template
from werkzeug.utils import secure_filename
from datetime import datetime
from caption_service import CaptionService
from translation_service import TranslationService
sys.path.append(os.curdir) #
UPLOAD_FOLDER = '/tmp/uploads'
os.makedirs(UPLOAD_FOLDER, exist_ok=True)
ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg', 'gif'])
app = Flask(__name__, static_url_path='/static', static_folder='assets/static')
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
cs = CaptionService()
ts = TranslationService()
def get_caption(filepath):
print('getting caption', filepath)
caption_en = cs.get_caption(filepath)
caption_ja = ts.get_translation(caption_en)
return caption_en, caption_ja
if __name__ == '__main__':
port = os.environ.get('PORT', 5000)
app.run(host='0.0.0.0', port=port)
| 28 | 79 | 0.691054 |
423cfa9d306c6cce1a1273c94c45fb8dde9787d8 | 16,706 | py | Python | map2loop/m2l_map_checker.py | Leguark/map2loop | 365dde4490f50ad73612120a7d4bee61e54a9a18 | [
"MIT"
] | null | null | null | map2loop/m2l_map_checker.py | Leguark/map2loop | 365dde4490f50ad73612120a7d4bee61e54a9a18 | [
"MIT"
] | null | null | null | map2loop/m2l_map_checker.py | Leguark/map2loop | 365dde4490f50ad73612120a7d4bee61e54a9a18 | [
"MIT"
] | null | null | null | import geopandas as gpd
from shapely.geometry import LineString, Polygon,MultiLineString
import os.path
from map2loop import m2l_utils
import warnings
import numpy as np
import pandas as pd
#explodes polylines and modifies objectid for exploded parts
| 44.079156 | 160 | 0.534359 |
423dba72ede1b75a23e84d734d1a416227c1565d | 2,116 | py | Python | DeepBrainSeg/readers/nib.py | JasperHG90/DeepBrainSeg | 92cf5f758f115e7ac51202966a1287fb58c09d78 | [
"MIT"
] | 130 | 2019-04-09T02:35:44.000Z | 2022-02-26T15:53:19.000Z | DeepBrainSeg/readers/nib.py | koriavinash1/DeepMedX | 02fcee6d7b21b16e7f1e28089f24be56ef6b9383 | [
"MIT"
] | 11 | 2019-09-18T03:55:29.000Z | 2021-01-03T13:11:20.000Z | DeepBrainSeg/readers/nib.py | koriavinash1/DeepMedX | 02fcee6d7b21b16e7f1e28089f24be56ef6b9383 | [
"MIT"
] | 38 | 2018-11-28T01:34:41.000Z | 2022-01-17T03:53:47.000Z | #! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# author: Avinash Kori
# contact: koriavinash1@gmail.com
# MIT License
# Copyright (c) 2020 Avinash Kori
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import tempfile
from time import time
import datetime
import numpy as np
import nibabel as nib
| 30.666667 | 80 | 0.676749 |
423ee3e6a6459504377643bd233fea0f011a4f80 | 259 | py | Python | tensorflow/intro/main.py | donutloop/machine_learning_examples | 46192a57e2dd194925ae76d6bfb169cd2af142dd | [
"MIT"
] | 1 | 2018-10-08T18:24:40.000Z | 2018-10-08T18:24:40.000Z | tensorflow/intro/main.py | donutloop/machine_learning_examples | 46192a57e2dd194925ae76d6bfb169cd2af142dd | [
"MIT"
] | null | null | null | tensorflow/intro/main.py | donutloop/machine_learning_examples | 46192a57e2dd194925ae76d6bfb169cd2af142dd | [
"MIT"
] | 1 | 2018-10-09T06:50:48.000Z | 2018-10-09T06:50:48.000Z | import os
import tensorflow as tf
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
x1 = tf.constant(5)
x2 = tf.constant(6)
result = tf.multiply(x1, x2)
print(result)
sess = tf.Session()
with tf.Session() as sess:
output = sess.run(result)
print(output)
| 15.235294 | 40 | 0.683398 |
423f75233120c5c9e5189a28dbf159544fa15eba | 845 | py | Python | twitter-bots/auto_liker.py | debasish-dutta/Python-projects | e06710ba47b37d42d83bd1859c46023513ea1c80 | [
"MIT"
] | null | null | null | twitter-bots/auto_liker.py | debasish-dutta/Python-projects | e06710ba47b37d42d83bd1859c46023513ea1c80 | [
"MIT"
] | null | null | null | twitter-bots/auto_liker.py | debasish-dutta/Python-projects | e06710ba47b37d42d83bd1859c46023513ea1c80 | [
"MIT"
] | null | null | null | import auth_key
import tweepy
import time
auth = tweepy.OAuthHandler(auth_key.API_key, auth_key.API_secret_key)
auth.set_access_token(auth_key.Access_token, auth_key.Access_token_secret)
api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)
user = api.me()
indId = 2282863
india_trend = api.trends_place(indId)
tweetNo = 5
a =[]
trndInd = api.trends_place(indId)
for trend in trndInd[0]['trends']:
a.append(trend['name'])
for item in a:
print(item)
for tweet in tweepy.Cursor(api.search, item).items(tweetNo):
try:
print("tweet liked & retweeted")
tweet.favorite()
tweet.retweet()
time.sleep(10)
except tweepy.TweepError as e:
print(e.reason)
except StopIteration:
break
| 24.852941 | 80 | 0.647337 |
423f9534e4fce6ed19f5f3059bb0ba6698e76415 | 745 | py | Python | ds_discovery/engines/distributed_mesh/domain_products/controller/src/controller.py | project-hadron/discovery-transition-ds | 08229ca3b7617b42ce2dd8e47ff93876c0843810 | [
"BSD-3-Clause"
] | 2 | 2020-09-21T17:24:16.000Z | 2021-05-28T18:02:54.000Z | ds_discovery/engines/distributed_mesh/domain_products/controller/src/controller.py | project-hadron/discovery-transition-ds | 08229ca3b7617b42ce2dd8e47ff93876c0843810 | [
"BSD-3-Clause"
] | null | null | null | ds_discovery/engines/distributed_mesh/domain_products/controller/src/controller.py | project-hadron/discovery-transition-ds | 08229ca3b7617b42ce2dd8e47ff93876c0843810 | [
"BSD-3-Clause"
] | 1 | 2021-07-23T13:52:04.000Z | 2021-07-23T13:52:04.000Z | from ds_discovery import Controller
import os
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
warnings.simplefilter(action='ignore', category=DeprecationWarning)
__author__ = 'Darryl Oatridge'
if __name__ == '__main__':
domain_controller()
| 32.391304 | 100 | 0.777181 |
423fee1037a4130b27a1927c09025e289e851a6f | 1,491 | py | Python | utils_test.py | lostsquirrel/words | aaa4bb2b3a9c8c7c7300e29ec73f39cff4409b8d | [
"MIT"
] | null | null | null | utils_test.py | lostsquirrel/words | aaa4bb2b3a9c8c7c7300e29ec73f39cff4409b8d | [
"MIT"
] | null | null | null | utils_test.py | lostsquirrel/words | aaa4bb2b3a9c8c7c7300e29ec73f39cff4409b8d | [
"MIT"
] | null | null | null | import json
import unittest
from utils import CustomEncoder, Paging, ValidationError, generate_uuid, Validator
| 26.625 | 82 | 0.564051 |
424044b56baa6c4ca720ef729a7deb71c15b2301 | 1,342 | py | Python | src/pyclean/cli.py | uranusjr/pyclean-py | ba3f4674d02fde396391e0f16906bd2b9cf7cd2d | [
"ISC"
] | null | null | null | src/pyclean/cli.py | uranusjr/pyclean-py | ba3f4674d02fde396391e0f16906bd2b9cf7cd2d | [
"ISC"
] | null | null | null | src/pyclean/cli.py | uranusjr/pyclean-py | ba3f4674d02fde396391e0f16906bd2b9cf7cd2d | [
"ISC"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import logging
import os
import sys
from . import entries, meta
logger = logging.getLogger(__name__)
if __name__ == '__main__':
main()
| 21.301587 | 68 | 0.632638 |
4240a3a135f3d439bdb928b669c203c2c5a8b79b | 6,890 | py | Python | app.py | ZhongxuanWang/simple_web_remainder-python | e61f9cf05d464fa55ae628fe415ea164f7574cde | [
"MIT"
] | null | null | null | app.py | ZhongxuanWang/simple_web_remainder-python | e61f9cf05d464fa55ae628fe415ea164f7574cde | [
"MIT"
] | null | null | null | app.py | ZhongxuanWang/simple_web_remainder-python | e61f9cf05d464fa55ae628fe415ea164f7574cde | [
"MIT"
] | null | null | null | from flask import Flask, render_template, url_for, redirect, request
from flask_sqlalchemy import SQLAlchemy
from datetime import datetime
from dateutil.relativedelta import relativedelta
from demail import demail
__author__ = 'Zhongxuan Wang'
__doc__ = 'Never Forget online remainder'
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///list.db'
# Remember, every time you make changes to the column (such as adding one col or removing one col, change the value),
# you have to do the following: open terminal from pycharm, python3.7, from app import db, db.create_all() and exit.
db = SQLAlchemy(app)
db.create_all()
datetime_format = '%b-%d-%Y %H:%M'
'''
This part requires your email information in order to receive email notifications. (This is left blank intentionally)
'''
email_account = ''
email_password = ''
# TODO send email warning if the due time is so soon and still incomplete,
'''
This will return a new date & time that after adding the values in time dictionaries
'''
def read_file(filename):
try:
with open(filename) as f:
return f.readline()
except IOError:
print("IO ERROR Raised. Reading file failed,")
f = open(filename, "w")
f.write('email@example.com')
f.close()
return 'content'
def write_file(filename, file_content):
try:
with open(filename, 'w') as f:
f.write(file_content)
except IOError:
print("IO ERROR Raised. Writing file failed,")
return ''
if __name__ == '__main__':
app.run(debug=False)
| 31.318182 | 118 | 0.633962 |
424371e9002a0d30915e7782779c23b77cf1168c | 522 | py | Python | homeassistant/components/solaredge/__init__.py | DavidDeSloovere/core | 909a20b36d4df6724c955c2ae28cb82fe6d50c2e | [
"Apache-2.0"
] | 4 | 2020-08-10T20:02:24.000Z | 2022-01-31T02:14:22.000Z | homeassistant/components/solaredge/__init__.py | DavidDeSloovere/core | 909a20b36d4df6724c955c2ae28cb82fe6d50c2e | [
"Apache-2.0"
] | 78 | 2020-07-23T07:13:08.000Z | 2022-03-31T06:02:04.000Z | homeassistant/components/solaredge/__init__.py | DavidDeSloovere/core | 909a20b36d4df6724c955c2ae28cb82fe6d50c2e | [
"Apache-2.0"
] | 3 | 2022-01-17T20:10:54.000Z | 2022-01-17T20:17:22.000Z | """The solaredge integration."""
from __future__ import annotations
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
import homeassistant.helpers.config_validation as cv
from .const import DOMAIN
CONFIG_SCHEMA = cv.deprecated(DOMAIN)
| 27.473684 | 77 | 0.781609 |
4243ae92dc1a6dc43f40406353ff665ec5905d97 | 3,241 | py | Python | main.py | eteq/door_beeper | 56c3ddcd9b24c66870aefa4dda0f3df3960049b1 | [
"Apache-2.0"
] | null | null | null | main.py | eteq/door_beeper | 56c3ddcd9b24c66870aefa4dda0f3df3960049b1 | [
"Apache-2.0"
] | null | null | null | main.py | eteq/door_beeper | 56c3ddcd9b24c66870aefa4dda0f3df3960049b1 | [
"Apache-2.0"
] | null | null | null | import uos
import utime
import machine
from machine import Pin, PWM
import utils
default_config = dict(
sleep_time_ms = 250,
freezer_delay_ms = 1000,
fridge_delay_ms = 1000,
write_battery_voltage = True,
piezo_plus_pin_num = 12,
piezo_min_pin_num = 33,
freezer_switch_pin_num = 23,
fridge_switch_pin_num = 21
)
try:
config_dct = {}
execfile('config.py', config_dct)
except Exception as e:
print("Could not run config file, using defaults:", default_config, '. File error:')
print(e)
globals().update(default_config)
else:
for varnm in default_config.keys():
if varnm in config_dct:
globals()[varnm] = config_dct[varnm]
print('Loaded config value for', varnm, ':', config_dct[varnm])
else:
globals()[varnm] = default_config[varnm]
print('Using default config value for', varnm, ':', default_config[varnm])
# setup pins
led_pin = Pin(13, Pin.OUT)
piezo_min_pin = Pin(piezo_min_pin_num, Pin.OUT)
freezer_switch_pin = Pin(freezer_switch_pin_num, Pin.IN, Pin.PULL_UP)
fridge_switch_pin = Pin(fridge_switch_pin_num, Pin.IN, Pin.PULL_UP)
#set initial state of pins
piezo_min_pin.value(0)
led_pin.value(0)
# set up PWM
piezo_plus_pwm = PWM(Pin(piezo_plus_pin_num), duty=512)
piezo_plus_pwm.deinit()
# how often to write out the battery status. None means don't do it at all
battery_time_spacing_secs = 600
# use an infinite loop to watch for door opening
last_battery_time = None
open_times = {'Freezer': None, 'Fridge': None}
while True:
check_open(freezer_switch_pin, 'Freezer', open_times, ([1300,1000], 10, 500), freezer_delay_ms)
check_open(fridge_switch_pin, 'Fridge', open_times, ([1200,900], 10, 500), fridge_delay_ms)
utime.sleep_ms(sleep_time_ms)
# write out battery status if desired
if battery_time_spacing_secs is not None:
if last_battery_time is None:
last_battery_time = utime.time()
else:
if (utime.time() - last_battery_time) > battery_time_spacing_secs:
voltage = utils.read_battery_voltage()
print('Battery level:', voltage, 'V')
if write_battery_voltage:
with open('battery_voltage', 'a') as f:
f.write(str(utime.time()))
f.write(' ')
f.write(str(voltage))
f.write('\n')
last_battery_time = utime.time()
| 34.849462 | 100 | 0.622339 |
42440ed0ff98d8396cf65df66d98259bed94142f | 6,034 | py | Python | modules/backend.py | Uncle-Yuanl/model_zoo | 455a2fd4ac5562a922f29e68de2f4e1fb2d3d2d8 | [
"Apache-2.0"
] | null | null | null | modules/backend.py | Uncle-Yuanl/model_zoo | 455a2fd4ac5562a922f29e68de2f4e1fb2d3d2d8 | [
"Apache-2.0"
] | null | null | null | modules/backend.py | Uncle-Yuanl/model_zoo | 455a2fd4ac5562a922f29e68de2f4e1fb2d3d2d8 | [
"Apache-2.0"
] | null | null | null | import os, sys
from distutils.util import strtobool
import numpy as np
import tensorflow as tf
import tensorflow.keras.backend as K
from tensorflow.python.util import nest, tf_inspect
from tensorflow.python.eager import tape
# from tensorflow.python.ops.custom_gradient import graph_mode_decorator
#
do_recompute = strtobool(os.environ.get('RECOMPUTE', '0'))
# https://zhuanlan.zhihu.com/p/349492378
# https://arxiv.53yu.com/pdf/1606.08415.pdf
def gelu_erf(x):
"""erfgelu
"""
# np64tf32
return 0.5 * x * (1.0 + tf.math.erf(x / np.sqrt(2.0)))
def set_gelu(version):
"""gelu
"""
version = version.lower()
assert version in ['erf', 'tanh'], 'gelu version must in erf or tanh'
if version == 'erf':
tf.keras.utils.get_custom_objects()['gelu'] = gelu_erf
elif version == 'tanh':
tf.keras.utils.get_custom_objects()['gelu'] = gelu_tanh
def align(tensor, axes, ndim=None):
"""tensorexpand_dimstranspose
axes: itensoraxes[i]
ndim: tensor
Example:
>>> tensor = tf.constant(np.arange(12).reshape(3,4), dtype=tf.float32)
>>> print(tensor)
tf.Tensor(
[[ 0. 1. 2. 3.]
[ 4. 5. 6. 7.]
[ 8. 9. 10. 11.]], shape=(3, 4), dtype=float32)
>>> same_dim = align(tensor, [0, -1], 2)
>>> print(same_dim)
tf.Tensor(
[[ 0. 1. 2. 3.]
[ 4. 5. 6. 7.]
[ 8. 9. 10. 11.]], shape=(3, 4), dtype=float32)
>>> more_dim = align(tensor, [0, -1], 3)
>>> print(more_dim)
tf.Tensor(
[[[ 0. 1. 2. 3.]]
<BLANKLINE>
[[ 4. 5. 6. 7.]]
<BLANKLINE>
[[ 8. 9. 10. 11.]]], shape=(3, 1, 4), dtype=float32)
"""
assert len(axes) == K.ndim(tensor)
indices = [None] * (ndim or max(axes))
for i in axes:
indices[i] = slice(None)
return tensor[indices]
def sequence_masking(x, mask, value=0, axis=None):
"""mask
parameters:
-----------
x: tensor
mask: tensor
(batch_size, seq_len)0-1
value: float or str
mask'inf''-inf'
axis: int
1
"""
if mask is None:
return x
# x*
x_type = K.dtype(x)
if x_type == 'bool':
x = K.cast(x, 'int32')
# mask = x
if K.dtype(mask) != K.dtype(x):
mask = K.cast(mask, K.dtype(x))
if value == '-inf':
# --------------------------
value = -K.infinity
if value == 'inf':
value = K.infinity
value = K.cast(value, K.dtype(x))
# axis
if axis is None:
axis = 1
if axis < 0:
axis = K.ndim(x) + axis
assert axis > 0, 'axis must be greater than 0'
# shape
for _ in range(axis - 1): # > 1
mask = K.expand_dims(mask, 1) # 0batch_size
for _ in range(K.ndim(x) - K.ndim(mask)):
mask = K.expand_dims(mask, K.ndim(mask))
x = x * mask + value * (1 - mask)
# x
if x_type == 'bool':
x = K.cast(x, x_type)
return x
def recompute_grad(call):
# ----------------------------------------------
"""kerascall
https://arxiv.org/abs/1604.06174
"""
if not do_recompute:
return call
return inner
def infinity():
"""
"""
return tf.keras.utils.get_custom_objects().get('infinity', 1e12)
def set_infinity(value):
"""
"""
tf.keras.utils.get_custom_objects()['infinity'] = value
# keras.backend K.epsilon()
K.infinity = infinity
K.set_infinity = set_infinity
sys.modules['tensorflow.keras.backend'] = K
custom_objects = {
'gelu_erf': gelu_erf,
'gelu_tanh': gelu_tanh,
'gelu': gelu_erf,
}
tf.keras.utils.get_custom_objects().update(custom_objects)
if __name__ == '__main__':
import doctest
doctest.testmod()
| 27.678899 | 75 | 0.542592 |
42441c80231ccaad24f01bdd333bcd71d34fa2e7 | 2,957 | py | Python | apod_daily.py | gultugaydemir/apod_daily | 994ccebdf2646c1a700110d891ea73261773bea2 | [
"CC0-1.0"
] | null | null | null | apod_daily.py | gultugaydemir/apod_daily | 994ccebdf2646c1a700110d891ea73261773bea2 | [
"CC0-1.0"
] | null | null | null | apod_daily.py | gultugaydemir/apod_daily | 994ccebdf2646c1a700110d891ea73261773bea2 | [
"CC0-1.0"
] | null | null | null | import datetime
import os
import requests
import tweepy
from PIL import Image
# Get your own keys from developer.twitter.com
# You can find a detailed tutorial about authenticating accounts from github.com/gultugaydemir/Twitter_OAuth1.0a
consumer_key = ''
consumer_secret = ''
access_token = ''
access_token_secret = ''
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
# You can get your own API key from api.nasa.gov. However simply writing "DEMO_KEY" works too, as it can be seen on the website.
response = requests.get("https://api.nasa.gov/planetary/apod?api_key=DEMO_KEY") #This link contains the data we needed about the photo of the day.
data = response.json() # Converts the data to JSON format so that we can retrieve data from it.
description = data["title"] # Getting the title of the photo.
date = datetime.datetime.now().strftime("%y%m%d") # We need the {yymmdd} format for the source link.
source = "https://apod.nasa.gov/apod/ap{date}.html".format(date=date) # Creating the source link for the posted photo.
message = '"' + description + '" \n' + source # The status format for the image tweets.
message_video = '"' + description + '" \n' # The status format for the YouTube tweets.
try:
image = data["hdurl"] # The image URL from API.
except KeyError: # Code throws KeyError if a video is posted that day, since API doesn't include a "hdurl" element.
image = data["url"]
image = image.replace("embed/", "watch?v=")
api.update_status(status = message_video+ source + ' \n'+ image) # Bot only tweets the YouTube link and not a picture.
print("Video tweeted successfully.")
quit()
# Tweepy's "update_with_media" function only allows us to tweet an image from the local directory.
# Since posting the picture from a URL would be more practical, I'm using a function that will complete this step for me automatically.
tweet_image(image, message) # Tweeting the picture with the status. Image URL and the status message are used as parameters.
| 40.506849 | 147 | 0.683801 |
424460c099ec096eec540d08794ad2f9da57997e | 6,414 | py | Python | datasets/dad.py | LivingSkyTechnologies/Document_Layout_Segmentation | 0db00a18fb39afa1efa8ae183bbd57309a6ebfcf | [
"MIT"
] | 4 | 2021-01-28T23:06:43.000Z | 2022-01-15T19:17:07.000Z | datasets/dad.py | LivingSkyTechnologies/Document_Layout_Segmentation | 0db00a18fb39afa1efa8ae183bbd57309a6ebfcf | [
"MIT"
] | 2 | 2021-01-25T21:54:05.000Z | 2021-08-23T21:19:21.000Z | datasets/dad.py | LivingSkyTechnologies/Document_Layout_Segmentation | 0db00a18fb39afa1efa8ae183bbd57309a6ebfcf | [
"MIT"
] | 2 | 2021-01-28T13:39:33.000Z | 2022-01-15T19:17:13.000Z | import pickle
import os
import tensorflow as tf
from glob import glob
import utils.DataLoaderUtils as dlu
from utils.AnnotationUtils import write_dad_masks
# Static Dataset Config Options
TAG_NAMES = {'highlights',
'urls_to_supplementary',
'abbreviation',
'abstract',
'additional_file',
'affiliation',
'appendice',
'author_bio',
'author_contribution',
'author_name',
'availability_of_data',
'caption',
'conflict_int',
'contact_info',
'copyright',
'core_text',
'date',
'doi',
'figure',
'funding_info',
'index',
'keywords',
'list',
'math_formula',
'note',
'publisher_note',
'reference',
'section_heading',
'subheading',
'table',
'title',
'nomenclature',
'code',
'publisher',
'journal',
'corresponding_author',
'editor',
'ethics',
'consent_publication',
'MSC',
'article_history',
'acknowledgment',
'background'}
TAG_MAPPING = {'abbreviation': 'background',
'acknowledgment': 'background',
'additional_file': 'background',
'affiliation': 'background',
'article_history': 'background',
'author_contribution': 'background',
'availability_of_data': 'background',
'code': 'background',
'conflict_int': 'background',
'consent_publication': 'background',
'corresponding_author': 'background',
'date': 'background',
'ethics': 'background',
'index': 'background',
'journal': 'background',
'nomenclature': 'background',
'publisher_note': 'background',
'urls_to_supplementary': 'background',
'msc': 'background',
'MSC': 'background',
'highlights': 'background',
'subheading': 'section_heading'}
SAVED_PKL_FILE = 'saved_dad_paths.pkl'
BUFFER_SIZE = 500
MASKS_DIR = "masks"
DOCUMENTS_DIR = "documents"
ANNOTATIONS_DIR = "annotations"
| 42.76 | 140 | 0.588868 |
4248c96a6cf8583046ad1cd239d37aa7ac5e5d96 | 740 | py | Python | terrascript/resource/ddelnano/mikrotik.py | mjuenema/python-terrascript | 6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d | [
"BSD-2-Clause"
] | 507 | 2017-07-26T02:58:38.000Z | 2022-01-21T12:35:13.000Z | terrascript/resource/ddelnano/mikrotik.py | mjuenema/python-terrascript | 6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d | [
"BSD-2-Clause"
] | 135 | 2017-07-20T12:01:59.000Z | 2021-10-04T22:25:40.000Z | terrascript/resource/ddelnano/mikrotik.py | mjuenema/python-terrascript | 6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d | [
"BSD-2-Clause"
] | 81 | 2018-02-20T17:55:28.000Z | 2022-01-31T07:08:40.000Z | # terrascript/resource/ddelnano/mikrotik.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:21:43 UTC)
import terrascript
__all__ = [
"mikrotik_bgp_instance",
"mikrotik_bgp_peer",
"mikrotik_dhcp_lease",
"mikrotik_dns_record",
"mikrotik_pool",
"mikrotik_scheduler",
"mikrotik_script",
]
| 17.209302 | 73 | 0.754054 |
424a2a5c3d067c0a48cf8560895baac37e4bf0ea | 812 | py | Python | test/threaddd.py | liaohongdong/IPProxy | 90152f02708717c661b7c1532e4a131a55103950 | [
"MIT"
] | null | null | null | test/threaddd.py | liaohongdong/IPProxy | 90152f02708717c661b7c1532e4a131a55103950 | [
"MIT"
] | 1 | 2021-03-31T19:17:41.000Z | 2021-03-31T19:17:41.000Z | test/threaddd.py | liaohongdong/IPProxy | 90152f02708717c661b7c1532e4a131a55103950 | [
"MIT"
] | null | null | null | import time
import queue
import threading
if __name__ == '__main__':
num_of_threads = 5
source = [i for i in range(1, 21)]
q = queue.Queue()
threads = []
for i in range(1, num_of_threads + 1):
t = threading.Thread(target=aaa, args=(i,))
threads.append(t)
t.start()
for item in source:
time.sleep(0.01)
q.put(item)
q.join()
# print("----------")
# #
for i in range(num_of_threads):
q.put(None)
# for t in threads:
# t.join()
# print(threads)
| 20.820513 | 58 | 0.507389 |
424a464b22116de9e6ed995f96ff3b93bc5bdfe1 | 665 | py | Python | Codes/Liam/203_remove_linked_list_elements.py | liuxiaohui1221/algorithm | d80e64185ceb4798ac5389bfbd226dc1d406f6b5 | [
"Apache-2.0"
] | 256 | 2017-10-25T13:02:15.000Z | 2022-02-25T13:47:59.000Z | Codes/Liam/203_remove_linked_list_elements.py | liuxiaohui1221/algorithm | d80e64185ceb4798ac5389bfbd226dc1d406f6b5 | [
"Apache-2.0"
] | 56 | 2017-10-27T01:34:20.000Z | 2022-03-01T00:20:55.000Z | Codes/Liam/203_remove_linked_list_elements.py | liuxiaohui1221/algorithm | d80e64185ceb4798ac5389bfbd226dc1d406f6b5 | [
"Apache-2.0"
] | 83 | 2017-10-25T12:51:53.000Z | 2022-02-15T08:27:03.000Z | # : 68 ms
# : 16.6 MB
# sentinelhead
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
| 22.166667 | 67 | 0.538346 |
424d5b248c6b3fcd0ec5e3855e8a59d969b36415 | 1,296 | py | Python | bailleurs/migrations/0001_initial.py | MTES-MCT/appel | 3b840ccea600ef31cfea57721fe5e6edbdbc2c79 | [
"MIT"
] | null | null | null | bailleurs/migrations/0001_initial.py | MTES-MCT/appel | 3b840ccea600ef31cfea57721fe5e6edbdbc2c79 | [
"MIT"
] | 2 | 2021-12-15T05:10:43.000Z | 2021-12-15T05:11:00.000Z | bailleurs/migrations/0001_initial.py | MTES-MCT/appel | 3b840ccea600ef31cfea57721fe5e6edbdbc2c79 | [
"MIT"
] | 1 | 2021-12-28T13:06:06.000Z | 2021-12-28T13:06:06.000Z | # Generated by Django 3.2.5 on 2021-07-06 14:18
import uuid
from django.db import migrations, models
| 35.027027 | 80 | 0.548611 |
424d80dc7999edc27c21ab202ecf629475f40e26 | 2,026 | py | Python | tests/primitives/flow/probe_tcpip_extended_unibiflow_test.py | kjerabek/netexp | 362c200230ba7b2549adcedd4a9890492dad51c7 | [
"MIT"
] | null | null | null | tests/primitives/flow/probe_tcpip_extended_unibiflow_test.py | kjerabek/netexp | 362c200230ba7b2549adcedd4a9890492dad51c7 | [
"MIT"
] | null | null | null | tests/primitives/flow/probe_tcpip_extended_unibiflow_test.py | kjerabek/netexp | 362c200230ba7b2549adcedd4a9890492dad51c7 | [
"MIT"
] | null | null | null | from tests.primitives.flow import probe_tcpip_extended_biflow_test
from netexp.primitives.flow import TCPIPFlowExtendedUniBiFlowInfo
from netexp.common import naming
| 56.277778 | 117 | 0.651037 |
424f02955cdf26ece00480c3e560a36d37aea6f6 | 19,816 | py | Python | optionstrader/database.py | Zaitsev11/Optionstrader | ed2dbef802ad08f14a0e5280e91746f1bf1fa3f3 | [
"MIT"
] | 6 | 2018-04-26T03:02:04.000Z | 2022-02-26T04:58:53.000Z | optionstrader/database.py | webclinic017/Optionstrader | ed2dbef802ad08f14a0e5280e91746f1bf1fa3f3 | [
"MIT"
] | null | null | null | optionstrader/database.py | webclinic017/Optionstrader | ed2dbef802ad08f14a0e5280e91746f1bf1fa3f3 | [
"MIT"
] | 5 | 2019-12-01T08:09:08.000Z | 2021-11-28T03:43:24.000Z | import time
import mysql.connector
from optionstrader.customlogging import CustomLog
from optionstrader.parser import Parser
MYSQL_IP_ADDR = '192.168.1.10'
# Used to debug via logs
DEBUG = False
| 41.717895 | 155 | 0.605874 |
424fc9a502a8c9fe3c5da2a1e3dec902d92abba5 | 10,254 | py | Python | backend/api/migrations/0001_initial.py | leowotzak/ljwe-db | ab49f90feaac5fad26efa900db5567c9c09f3435 | [
"MIT"
] | null | null | null | backend/api/migrations/0001_initial.py | leowotzak/ljwe-db | ab49f90feaac5fad26efa900db5567c9c09f3435 | [
"MIT"
] | 9 | 2021-11-17T18:31:29.000Z | 2021-11-21T00:47:39.000Z | backend/api/migrations/0001_initial.py | leowotzak/ljwe-db | ab49f90feaac5fad26efa900db5567c9c09f3435 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.9 on 2021-11-24 02:52
from django.db import migrations, models
import django.db.models.deletion
| 49.062201 | 111 | 0.536083 |
42500bb71a15c0815810b37eafb946db4fb96b64 | 3,713 | py | Python | Ch2_Linked_Lists/test/test_CTCI_Ch2_Ex6.py | mtrdazzo/CTCI | 30a82aed96b05fe21b7d337a138e4ec19950eb9d | [
"MIT"
] | null | null | null | Ch2_Linked_Lists/test/test_CTCI_Ch2_Ex6.py | mtrdazzo/CTCI | 30a82aed96b05fe21b7d337a138e4ec19950eb9d | [
"MIT"
] | null | null | null | Ch2_Linked_Lists/test/test_CTCI_Ch2_Ex6.py | mtrdazzo/CTCI | 30a82aed96b05fe21b7d337a138e4ec19950eb9d | [
"MIT"
] | null | null | null | from unittest import TestCase
from CTCI.Ch2_Linked_Lists.common.SinglyLinkedList import Empty, Node
from CTCI.Ch2_Linked_Lists.exercises.CTCI_Ch2_Ex6 import PalindromeSinglyLinkedList, is_palindrome_brute_force
from CTCI.Ch2_Linked_Lists.exercises.CTCI_Ch2_Ex6 import is_palindrome_reverse
| 24.919463 | 110 | 0.649879 |
4250d5da81ea72feff3b65a105d5b2c76567a7d7 | 49,917 | py | Python | alphafold2_pytorch/utils.py | nilbot/alphafold2 | 455124ca9135e534739b9670c010512487965547 | [
"MIT"
] | 1 | 2022-01-21T04:58:18.000Z | 2022-01-21T04:58:18.000Z | alphafold2_pytorch/utils.py | nilbot/alphafold2 | 455124ca9135e534739b9670c010512487965547 | [
"MIT"
] | null | null | null | alphafold2_pytorch/utils.py | nilbot/alphafold2 | 455124ca9135e534739b9670c010512487965547 | [
"MIT"
] | null | null | null | # utils for working with 3d-protein structures
import os
import numpy as np
import torch
from functools import wraps
from einops import rearrange, repeat
# import torch_sparse # only needed for sparse nth_deg adj calculation
# bio
from Bio import SeqIO
import itertools
import string
# sidechainnet
from sidechainnet.utils.sequence import ProteinVocabulary, ONE_TO_THREE_LETTER_MAP
from sidechainnet.utils.measure import GLOBAL_PAD_CHAR
from sidechainnet.structure.build_info import NUM_COORDS_PER_RES, BB_BUILD_INFO, SC_BUILD_INFO
from sidechainnet.structure.StructureBuilder import _get_residue_build_iter
# build vocabulary
VOCAB = ProteinVocabulary()
# constants
import alphafold2_pytorch.constants as constants
# helpers
# constants: same as in alphafold2.py
DISTANCE_THRESHOLDS = torch.linspace(2, 20, steps = constants.DISTOGRAM_BUCKETS)
# distance binning function
# decorators
def expand_arg_dims(dim_len = 3):
""" pack here for reuse.
turns input into (B x D x N)
"""
return outer
# preprocess data
def get_atom_ids_dict():
""" Get's a dict mapping each atom to a token. """
ids = set(["", "N", "CA", "C", "O"])
for k,v in SC_BUILD_INFO.items():
for name in v["atom-names"]:
ids.add(name)
return {k: i for i,k in enumerate(sorted(ids))}
def make_cloud_mask(aa):
""" relevent points will be 1. paddings will be 0. """
mask = np.zeros(14)
# early stop if padding token
if aa == "_":
return mask
# get num of atoms in aa
n_atoms = 4+len( SC_BUILD_INFO[ ONE_TO_THREE_LETTER_MAP[aa] ]["atom-names"] )
mask[:n_atoms] = 1
return mask
def make_atom_id_embedds(aa, atom_ids):
""" Return the tokens for each atom in the aa. """
mask = np.zeros(14)
# early stop if padding token
if aa == "_":
return mask
# get atom id
atom_list = ["N", "CA", "C", "O"] + SC_BUILD_INFO[ ONE_TO_THREE_LETTER_MAP[aa] ]["atom-names"]
for i,atom in enumerate(atom_list):
mask[i] = ATOM_IDS[atom]
return mask
ATOM_IDS = get_atom_ids_dict()
CUSTOM_INFO = {k: {"cloud_mask": make_cloud_mask(k),
"atom_id_embedd": make_atom_id_embedds(k, atom_ids=ATOM_IDS),
} for k in "ARNDCQEGHILKMFPSTWYV_"}
#common utils
# parsing to pdb for easier visualization - other example from sidechainnet is:
# https://github.com/jonathanking/sidechainnet/tree/master/sidechainnet/structure
def download_pdb(name, route):
""" Downloads a PDB entry from the RCSB PDB.
Inputs:
* name: str. the PDB entry id. 4 characters, capitalized.
* route: str. route of the destin file. usually ".pdb" extension
Output: route of destin file
"""
os.system(f"curl https://files.rcsb.org/download/{name}.pdb > {route}")
return route
def clean_pdb(name, route=None, chain_num=None):
""" Cleans the structure to only leave the important part.
Inputs:
* name: str. route of the input .pdb file
* route: str. route of the output. will overwrite input if not provided
* chain_num: int. index of chain to select (1-indexed as pdb files)
Output: route of destin file.
"""
import mdtraj
destin = route if route is not None else name
# read input
raw_prot = mdtraj.load_pdb(name)
# iterate over prot and select the specified chains
idxs = []
for chain in raw_prot.topology.chains:
# if arg passed, only select that chain
if chain_num is not None:
if chain_num != chain.index:
continue
# select indexes of chain
chain_idxs = raw_prot.topology.select(f"chainid == {str(chain.index)}")
idxs.extend( chain_idxs.tolist() )
# sort: topology and xyz selection are ordered
idxs = sorted(idxs)
# get new trajectory from the sleected subset of indexes and save
prot = mdtraj.Trajectory(xyz=raw_prot.xyz[:, idxs],
topology=raw_prot.topology.subset(idxs))
prot.save(destin)
return destin
def custom2pdb(coords, proteinnet_id, route):
""" Takes a custom representation and turns into a .pdb file.
Inputs:
* coords: array/tensor of shape (3 x N) or (N x 3). in Angstroms.
same order as in the proteinnnet is assumed (same as raw pdb file)
* proteinnet_id: str. proteinnet id format (<class>#<pdb_id>_<chain_number>_<chain_id>)
see: https://github.com/aqlaboratory/proteinnet/
* route: str. destin route.
Output: tuple of routes: (original, generated) for the structures.
"""
import mdtraj
# convert to numpy
if isinstance(coords, torch.Tensor):
coords = coords.detach().cpu().numpy()
# ensure (1, N, 3)
if coords.shape[1] == 3:
coords = coords.T
coords = np.newaxis(coords, axis=0)
# get pdb id and chain num
pdb_name, chain_num = proteinnet_id.split("#")[-1].split("_")[:-1]
pdb_destin = "/".join(route.split("/")[:-1])+"/"+pdb_name+".pdb"
# download pdb file and select appropiate
download_pdb(pdb_name, pdb_destin)
clean_pdb(pdb_destin, chain_num=chain_num)
# load trajectory scaffold and replace coordinates - assumes same order
scaffold = mdtraj.load_pdb(pdb_destin)
scaffold.xyz = coords
scaffold.save(route)
return pdb_destin, route
def coords2pdb(seq, coords, cloud_mask, prefix="", name="af2_struct.pdb"):
""" Turns coordinates into PDB files ready to be visualized.
Inputs:
* seq: (L,) tensor of ints (sidechainnet aa-key pairs)
* coords: (3, N) coords of atoms
* cloud_mask: (L, C) boolean mask of occupied spaces in scn format
* prefix: str. directory to save files.
* name: str. name of destin file (ex: pred1.pdb)
"""
scaffold = torch.zeros( cloud_mask.shape, 3 )
scaffold[cloud_mask] = coords.cpu().float()
# build structures and save
pred = scn.StructureBuilder( seq, crd=scaffold )
pred.to_pdb(prefix+name)
#adapted from https://github.com/facebookresearch/esm
def remove_insertions(sequence: str) -> str:
""" Removes any insertions into the sequence. Needed to load aligned sequences in an MSA. """
deletekeys = dict.fromkeys(string.ascii_lowercase)
deletekeys["."] = None
deletekeys["*"] = None
translation = str.maketrans(deletekeys)
return sequence.translate(translation)
def read_msa(filename: str, nseq: int):
""" Reads the first nseq sequences from an MSA file, automatically removes insertions."""
return [(record.description, remove_insertions(str(record.seq)))
for record in itertools.islice(SeqIO.parse(filename, "fasta"), nseq)]
# sidechainnet / MSA / other data utils
def ids_to_embed_input(x):
""" Returns the amino acid string input for calculating the ESM and MSA transformer embeddings
Inputs:
* x: any deeply nested list of integers that correspond with amino acid id
"""
assert isinstance(x, list), 'input must be a list'
id2aa = VOCAB._int2char
out = []
for el in x:
if isinstance(el, list):
out.append(ids_to_embed_input(el))
elif isinstance(el, int):
out.append(id2aa[el])
else:
raise TypeError('type must be either list or character')
if all(map(lambda c: isinstance(c, str), out)):
return (None, ''.join(out))
return out
def get_msa_embedd(msa, embedd_model, batch_converter, device = None):
""" Returns the MSA_tr embeddings for a protein.
Inputs:
* seq: ( (b,) L,) tensor of ints (in sidechainnet int-char convention)
* embedd_model: MSA_tr model (see train_end2end.py for an example)
* batch_converter: MSA_tr batch converter (see train_end2end.py for an example)
Outputs: tensor of (batch, n_seqs, L, embedd_dim)
* n_seqs: number of sequences in the MSA
* embedd_dim: number of embedding dimensions. 768 for MSA_Transformer
"""
#use MSA transformer
REPR_LAYER_NUM = 12
device = embedd_model.device
max_seq_len = msa.shape[-1]
embedd_inputs = ids_to_embed_input(msa.cpu().tolist())
msa_batch_labels, msa_batch_strs, msa_batch_tokens = batch_converter(embedd_inputs)
with torch.no_grad():
results = embedd_model(msa_batch_tokens.to(device), repr_layers=[REPR_LAYER_NUM], return_contacts=False)
# index 0 is for start token. so take from 1 one
token_reps = results["representations"][REPR_LAYER_NUM][..., 1:, :]
return token_reps
def get_esm_embedd(seq, embedd_model, batch_converter, msa_data=None):
""" Returns the ESM embeddings for a protein.
Inputs:
* seq: ( (b,) L,) tensor of ints (in sidechainnet int-char convention)
* embedd_model: ESM model (see train_end2end.py for an example)
* batch_converter: ESM batch converter (see train_end2end.py for an example)
Outputs: tensor of (batch, n_seqs, L, embedd_dim)
* n_seqs: number of sequences in the MSA. 1 for ESM-1b
* embedd_dim: number of embedding dimensions. 1280 for ESM-1b
"""
#use ESM transformer
device = embedd_model.device
REPR_LAYER_NUM = 33
max_seq_len = seq.shape[-1]
embedd_inputs = ids_to_embed_input(seq.cpu().tolist())
batch_labels, batch_strs, batch_tokens = batch_converter(embedd_inputs)
with torch.no_grad():
results = embedd_model(batch_tokens.to(device), repr_layers=[REPR_LAYER_NUM], return_contacts=False)
# index 0 is for start token. so take from 1 one
token_reps = results["representations"][REPR_LAYER_NUM][..., 1:, :].unsqueeze(dim=1)
return token_reps
def get_all_protein_ids(dataloader, verbose=False):
""" Given a sidechainnet dataloader for a CASP version,
Returns all the ids belonging to proteins.
Inputs:
* dataloader: a sidechainnet dataloader for a CASP version
Outputs: a set containing the ids for all protein entries.
"""
# store ids here
ids = set([])
#iterate for all batches
for i,batch in tqdm(enumerate(dataloaders['train'])):
# for breaking from 2 loops at once
try:
for i in range(batch.int_seqs.shape[0]):
# check if all fragments are : 4_LETTER_PDB + NUM + CHAIN
max_len_10 = len(batch.pids[i]) < 10
fragments = [len(x) <= 4 for x in batch.pids[i].split("_")]
fragments_under_4 = sum(fragments) == len(fragments) # AND CONDITION
# record id
if max_len_10 and fragments_under_4:
ids.add(batch.pids[i])
else:
if verbose:
print("skip:", batch.pids[i], "under 4", fragments)
except StopIteration:
break
#returns set of ids
return ids
def scn_cloud_mask(scn_seq, boolean=True, coords=None):
""" Gets the boolean mask atom positions (not all aas have same atoms).
Inputs:
* scn_seq: (batch, length) sequence as provided by Sidechainnet package
* boolean: whether to return as array of idxs or boolean values
* coords: optional .(batch, lc, 3). sidechainnet coords.
returns the true mask (solves potential atoms that might not be provided)
Outputs: (batch, length, NUM_COORDS_PER_RES) boolean mask
"""
scn_seq = expand_dims_to(scn_seq, 2 - len(scn_seq.shape))
# early check for coords mask
if coords is not None:
batch_mask = ( rearrange(coords, '... (l c) d -> ... l c d', c=14) == 0 ).sum(dim=-1) < coords.shape[-1]
if boolean:
return batch_mask.bool()
else:
return batch_mask.nonzero()
# do loop in cpu
device = scn_seq.device
batch_mask = []
scn_seq = scn_seq.cpu().tolist()
for i, seq in enumerate(scn_seq):
# get masks for each prot (points for each aa)
batch_mask.append( torch.tensor([CUSTOM_INFO[VOCAB.int2char(aa)]['cloud_mask'] \
for aa in seq]).bool().to(device).unsqueeze(0) )
# concat in last dim
batch_mask = torch.cat(batch_mask, dim=0)
# return mask (boolean or indexes)
if boolean:
return batch_mask.bool()
else:
return batch_mask.nonzero()
def scn_backbone_mask(scn_seq, boolean=True, n_aa=3):
""" Gets the boolean mask for N and CA positions.
Inputs:
* scn_seq: sequence(s) as provided by Sidechainnet package (int tensor/s)
* n_aa: number of atoms in a backbone. (may include cbeta as 4th pos)
* bool: whether to return as array of idxs or boolean values
Outputs: (N_mask, CA_mask, C_mask)
"""
wrapper = torch.zeros(*scn_seq.shape, n_aa).to(scn_seq.device)
# N is the first atom in every AA. CA is the 2nd.
wrapper[..., 0] = 1
wrapper[..., 1] = 2
wrapper[..., 2] = 3
wrapper = rearrange(wrapper, '... l c -> ... (l c)')
# find idxs
N_mask = wrapper == 1
CA_mask = wrapper == 2
C_mask = wrapper == 3
if boolean:
return N_mask, CA_mask, C_mask
return torch.nonzero(N_mask), torch.nonzero(CA_mask), torch.nonzero(C_mask)
def scn_atom_embedd(scn_seq):
""" Returns the token for each atom in the aa.
Inputs:
* scn_seq: sequence(s) as provided by Sidechainnet package (int tensor/s)
"""
device = scn_seq.device
batch_tokens = []
# do loop in cpu
scn_seq = scn_seq.cpu()
for i,seq in enumerate(scn_seq):
batch_tokens.append( torch.tensor([CUSTOM_INFO[VOCAB.int2char(aa.item())]["atom_id_embedd"] \
for aa in seq]).long().to(device).unsqueeze(0) )
batch_tokens = torch.cat(batch_tokens, dim=0)
return batch_tokens
def nth_deg_adjacency(adj_mat, n=1, sparse=False):
""" Calculates the n-th degree adjacency matrix.
Performs mm of adj_mat and adds the newly added.
Default is dense. Mods for sparse version are done when needed.
Inputs:
* adj_mat: (N, N) adjacency tensor
* n: int. degree of the output adjacency
* sparse: bool. whether to use torch-sparse module
Outputs:
* edge_idxs: ij positions of the adjacency matrix
* edge_attrs: degree of connectivity (1 for neighs, 2 for neighs^2, ... )
"""
adj_mat = adj_mat.float()
attr_mat = torch.zeros_like(adj_mat)
new_adj_mat = adj_mat.clone()
for i in range(n):
if i == 0:
attr_mat += adj_mat
continue
if i == 1 and sparse:
idxs = adj_mat.nonzero().t()
vals = adj_mat[idxs[0], idxs[1]]
new_idxs = idxs.clone()
new_vals = vals.clone()
m, k, n = 3 * [adj_mat.shape[0]] #(m, n) * (n, k) , but adj_mats are squared: m=n=k
if sparse:
new_idxs, new_vals = torch_sparse.spspmm(new_idxs, new_vals, idxs, vals, m=m, k=k, n=n)
new_vals = new_vals.bool().float()
new_adj_mat = torch.zeros_like(attr_mat)
new_adj_mat[new_idxs[0], new_idxs[1]] = new_vals
# sparse to dense is slower
# torch.sparse.FloatTensor(idxs, vals).to_dense()
else:
new_adj_mat = (new_adj_mat @ adj_mat).bool().float()
attr_mat.masked_fill( (new_adj_mat - attr_mat.bool().float()).bool(), i+1 )
return new_adj_mat, attr_mat
def prot_covalent_bond(seqs, adj_degree=1, cloud_mask=None, mat=True):
""" Returns the idxs of covalent bonds for a protein.
Inputs
* seq: (b, n) torch long.
* adj_degree: int. adjacency degree
* cloud_mask: mask selecting the present atoms.
* mat: whether to return as indexes or matrices.
for indexes, only 1 seq is supported
Outputs: edge_idxs, edge_attrs
"""
device = seqs.device
# get starting poses for every aa
adj_mat = torch.zeros(seqs.shape[0], seqs.shape[1]*14, seqs.shape[1]*14)
# not needed to device since it's only for indices.
scaff = torch.zeros(seqs.shape[1], 14)
scaff[:, 0] = 1
idxs = torch.nonzero(scaff).reshape(-1)
for s,seq in enumerate(seqs):
for i,idx in enumerate(idxs):
if i >= seq.shape[0]:
break
# offset by pos in chain ( intra-aa bonds + with next aa )
bonds = idx + torch.tensor( constants.AA_DATA[VOCAB.int2char(seq[i].item())]['bonds'] + [[2, 14]] ).t()
# delete link with next if final AA in seq
if i == idxs.shape[0]-1:
bonds = bonds[:, :-1]
# modify adj mat
adj_mat[s, bonds[0], bonds[1]] = 1
#convert to undirected
adj_mat[s] = adj_mat[s] + adj_mat[s].t()
# do N_th degree adjacency
adj_mat, attr_mat = nth_deg_adjacency(adj_mat, n=adj_degree, sparse=False) # True
if mat:
return attr_mat.bool().to(seqs.device), attr_mat.to(device)
else:
edge_idxs = attr_mat[0].nonzero().t().long()
edge_attrs = attr_mat[0, edge_idxs[0], edge_idxs[1]]
return edge_idxs.to(seqs.device), edge_attrs.to(seqs.device)
def nerf_torch(a, b, c, l, theta, chi):
""" Custom Natural extension of Reference Frame.
Inputs:
* a: (batch, 3) or (3,). point(s) of the plane, not connected to d
* b: (batch, 3) or (3,). point(s) of the plane, not connected to d
* c: (batch, 3) or (3,). point(s) of the plane, connected to d
* theta: (batch,) or (float). angle(s) between b-c-d
* chi: (batch,) or float. dihedral angle(s) between the a-b-c and b-c-d planes
Outputs: d (batch, 3) or (3,). the next point in the sequence, linked to c
"""
#safety check
if not ( (-np.pi <= theta) * (theta <= np.pi) ).all().item():
raise ValueError(f"theta(s) must be in radians and in [-pi, pi]. theta(s) = {theta}")
# calc vecs
ba = b-a
cb = c-b
# calc rotation matrix. based on plane normals and normalized
n_plane = torch.cross(ba, cb, dim=-1)
n_plane_ = torch.cross(n_plane, cb, dim=-1)
rotate = torch.stack([cb, n_plane_, n_plane], dim=-1)
rotate /= torch.norm(rotate, dim=-2, keepdim=True)
# calc proto point, rotate
d = torch.stack([-torch.cos(theta),
torch.sin(theta) * torch.cos(chi),
torch.sin(theta) * torch.sin(chi)], dim=-1).unsqueeze(-1)
# extend base point, set length
return c + l.unsqueeze(-1) * torch.matmul(rotate, d).squeeze()
def sidechain_container(backbones, n_aa, cloud_mask=None, place_oxygen=False,
n_atoms=NUM_COORDS_PER_RES, padding=GLOBAL_PAD_CHAR):
""" Gets a backbone of the protein, returns the whole coordinates
with sidechains (same format as sidechainnet). Keeps differentiability.
Inputs:
* backbones: (batch, L*3, 3): assume batch=1 (could be extended later).
Coords for (N-term, C-alpha, C-term) of every aa.
* n_aa: int. number of points for each aa in the backbones.
* cloud_mask: (batch, l, c). optional. cloud mask from scn_cloud_mask`.
returns point outside to 0. if passed, else c_alpha
* place_oxygen: whether to claculate the oxygen of the
carbonyl group via NeRF
* n_atoms: int. n of atom positions / atom. same as in sidechainnet: 14
* padding: int. padding token. same as in sidechainnet: 0
Outputs: whole coordinates of shape (batch, L, n_atoms, 3)
"""
device = backbones.device
batch, length = backbones.shape[0], backbones.shape[1] // n_aa
# build scaffold from (N, CA, C, CB)
new_coords = torch.zeros(batch, length, NUM_COORDS_PER_RES, 3).to(device)
predicted = rearrange(backbones, 'b (l back) d -> b l back d', l=length)
# set backbone positions
new_coords[:, :, :3] = predicted[:, :, :3]
# set rest of positions to c_beta if present, else c_alpha
if n_aa == 4:
new_coords[:, :, 4:] = repeat(predicted[:, :, -1], 'b l d -> b l scn d', scn=10)
else:
new_coords[:, :, 4:] = repeat(new_coords[:, :, 1], 'b l d -> b l scn d', scn=10)
if cloud_mask is not None:
new_coords[torch.logical_not(cloud_mask)] = 0.
# hard-calculate oxygen position of carbonyl group with parallel version of NERF
if place_oxygen:
# build (=O) position of revery aa in each chain
for s in range(batch):
# dihedrals phi=f(c-1, n, ca, c) & psi=f(n, ca, c, n+1)
# phi = get_dihedral_torch(*backbone[s, i*3 - 1 : i*3 + 3]) if i>0 else None
psis = torch.tensor([ get_dihedral_torch(*backbones[s, i*3 + 0 : i*3 + 4] )if i < length-1 else np.pi*5/4 \
for i in range(length) ])
# the angle for placing oxygen is opposite to psi of current res.
# psi not available for last one so pi/4 taken for now
bond_lens = repeat(torch.tensor(BB_BUILD_INFO["BONDLENS"]["c-o"]), ' -> b', b=length).to(psis.device)
bond_angs = repeat(torch.tensor(BB_BUILD_INFO["BONDANGS"]["ca-c-o"]), ' -> b', b=length).to(psis.device)
correction = repeat(torch.tensor(-np.pi), ' -> b', b=length).to(psis.device)
new_coords[:, :, 3] = nerf_torch(new_coords[:, :, 0],
new_coords[:, :, 1],
new_coords[:, :, 2],
bond_lens, bond_angs, psis + correction)
else:
# init oxygen to carbonyl
new_coords[:, :, 3] = predicted[:, :, 2]
return new_coords
# distance utils (distogram to dist mat + masking)
def center_distogram_torch(distogram, bins=DISTANCE_THRESHOLDS, min_t=1., center="mean", wide="std"):
""" Returns the central estimate of a distogram. Median for now.
Inputs:
* distogram: (batch, N, N, B) where B is the number of buckets.
* bins: (B,) containing the cutoffs for the different buckets
* min_t: float. lower bound for distances.
Outputs:
* central: (batch, N, N)
* dispersion: (batch, N, N)
* weights: (batch, N, N)
"""
shape, device = distogram.shape, distogram.device
# threshold to weights and find mean value of each bin
n_bins = ( bins - 0.5 * (bins[2] - bins[1]) ).to(device)
n_bins[0] = 1.5
n_bins[-1] = 1.33*bins[-1] # above last threshold is ignored
max_bin_allowed = torch.tensor(n_bins.shape[0]-1).to(device).long()
# calculate measures of centrality and dispersion -
magnitudes = distogram.sum(dim=-1)
if center == "median":
cum_dist = torch.cumsum(distogram, dim=-1)
medium = 0.5 * cum_dist[..., -1:]
central = torch.searchsorted(cum_dist, medium).squeeze()
central = n_bins[ torch.min(central, max_bin_allowed) ]
elif center == "mean":
central = (distogram * n_bins).sum(dim=-1) / magnitudes
# create mask for last class - (IGNORE_INDEX)
mask = (central <= bins[-2].item()).float()
# mask diagonal to 0 dist - don't do masked filling to avoid inplace errors
diag_idxs = np.arange(shape[-2])
central = expand_dims_to(central, 3 - len(central.shape))
central[:, diag_idxs, diag_idxs] *= 0.
# provide weights
if wide == "var":
dispersion = (distogram * (n_bins - central.unsqueeze(-1))**2).sum(dim=-1) / magnitudes
elif wide == "std":
dispersion = ((distogram * (n_bins - central.unsqueeze(-1))**2).sum(dim=-1) / magnitudes).sqrt()
else:
dispersion = torch.zeros_like(central, device=device)
# rescale to 0-1. lower std / var --> weight=1. set potential nan's to 0
weights = mask / (1+dispersion)
weights[weights != weights] *= 0.
weights[:, diag_idxs, diag_idxs] *= 0.
return central, weights
# distance matrix to 3d coords: https://github.com/scikit-learn/scikit-learn/blob/42aff4e2e/sklearn/manifold/_mds.py#L279
def mds_torch(pre_dist_mat, weights=None, iters=10, tol=1e-5, eigen=False, verbose=2):
""" Gets distance matrix. Outputs 3d. See below for wrapper.
Assumes (for now) distogram is (N x N) and symmetric
Outs:
* best_3d_coords: (batch x 3 x N)
* historic_stresses: (batch x steps)
"""
device, dtype = pre_dist_mat.device, pre_dist_mat.type()
# ensure batched MDS
pre_dist_mat = expand_dims_to(pre_dist_mat, length = ( 3 - len(pre_dist_mat.shape) ))
# start
batch, N, _ = pre_dist_mat.shape
diag_idxs = np.arange(N)
his = [torch.tensor([np.inf]*batch, device=device)]
# initialize by eigendecomposition: https://www.lptmc.jussieu.fr/user/lesne/bioinformatics.pdf
#follow : https://www.biorxiv.org/content/10.1101/2020.11.27.401232v1.full.pdf
D = pre_dist_mat**2
M = 0.5 * (D[:, :1, :] + D[:, :, :1] - D)
# do loop svd bc it's faster: (2-3x in CPU and 1-2x in GPU)
#https://discuss.pytorch.org/t/batched-svd-lowrank-being-much-slower-than-loop-implementation-both-cpu-and-gpu/119336
svds = [torch.svd_lowrank(mi) for mi in M]
u = torch.stack([svd[0] for svd in svds], dim=0)
s = torch.stack([svd[1] for svd in svds], dim=0)
v = torch.stack([svd[2] for svd in svds], dim=0)
best_3d_coords = torch.bmm(u, torch.diag_embed(s).sqrt())[..., :3]
# only eigen - way faster but not weights
if weights is None and eigen==True:
return torch.transpose( best_3d_coords, -1, -2), torch.zeros_like(torch.stack(his, dim=0))
elif eigen==True:
if verbose:
print("Can't use eigen flag if weights are active. Fallback to iterative")
#continue the iterative way
if weights is None:
weights = torch.ones_like(pre_dist_mat)
# iterative updates:
for i in range(iters):
# compute distance matrix of coords and stress
best_3d_coords = best_3d_coords.contiguous()
dist_mat = torch.cdist(best_3d_coords, best_3d_coords, p=2).clone()
stress = ( weights * (dist_mat - pre_dist_mat)**2 ).sum(dim=(-1,-2)) * 0.5
# perturb - update X using the Guttman transform - sklearn-like
dist_mat[ dist_mat <= 0 ] += 1e-7
ratio = weights * (pre_dist_mat / dist_mat)
B = -ratio
B[:, diag_idxs, diag_idxs] += ratio.sum(dim=-1)
# update
coords = (1. / N * torch.matmul(B, best_3d_coords))
dis = torch.norm(coords, dim=(-1, -2))
if verbose >= 2:
print('it: %d, stress %s' % (i, stress))
# update metrics if relative improvement above tolerance
if (his[-1] - stress / dis).mean() <= tol:
if verbose:
print('breaking at iteration %d with stress %s' % (i,
stress / dis))
break
best_3d_coords = coords
his.append( stress / dis )
return torch.transpose(best_3d_coords, -1,-2), torch.stack(his, dim=0)
def mds_numpy(pre_dist_mat, weights=None, iters=10, tol=1e-5, eigen=False, verbose=2):
""" Gets distance matrix. Outputs 3d. See below for wrapper.
Assumes (for now) distrogram is (N x N) and symmetric
Out:
* best_3d_coords: (3 x N)
* historic_stress
"""
if weights is None:
weights = np.ones_like(pre_dist_mat)
# ensure batched MDS
pre_dist_mat = expand_dims_to(pre_dist_mat, length = ( 3 - len(pre_dist_mat.shape) ))
# start
batch, N, _ = pre_dist_mat.shape
his = [np.inf]
# init random coords
best_stress = np.inf * np.ones(batch)
best_3d_coords = 2*np.random.rand(batch, 3, N) - 1
# iterative updates:
for i in range(iters):
# compute distance matrix of coords and stress
dist_mat = np.linalg.norm(best_3d_coords[:, :, :, None] - best_3d_coords[:, :, None, :], axis=-3)
stress = (( weights * (dist_mat - pre_dist_mat) )**2).sum(axis=(-1, -2)) * 0.5
# perturb - update X using the Guttman transform - sklearn-like
dist_mat[dist_mat == 0] = 1e-7
ratio = weights * (pre_dist_mat / dist_mat)
B = -ratio
B[:, np.arange(N), np.arange(N)] += ratio.sum(axis=-1)
# update - double transpose. TODO: consider fix
coords = (1. / N * np.matmul(best_3d_coords, B))
dis = np.linalg.norm(coords, axis=(-1, -2))
if verbose >= 2:
print('it: %d, stress %s' % (i, stress))
# update metrics if relative improvement above tolerance
if (best_stress - stress / dis).mean() <= tol:
if verbose:
print('breaking at iteration %d with stress %s' % (i,
stress / dis))
break
best_3d_coords = coords
best_stress = stress / dis
his.append(best_stress)
return best_3d_coords, np.array(his)
def get_dihedral_torch(c1, c2, c3, c4):
""" Returns the dihedral angle in radians.
Will use atan2 formula from:
https://en.wikipedia.org/wiki/Dihedral_angle#In_polymer_physics
Can't use torch.dot bc it does not broadcast
Inputs:
* c1: (batch, 3) or (3,)
* c1: (batch, 3) or (3,)
* c1: (batch, 3) or (3,)
* c1: (batch, 3) or (3,)
"""
u1 = c2 - c1
u2 = c3 - c2
u3 = c4 - c3
return torch.atan2( ( (torch.norm(u2, dim=-1, keepdim=True) * u1) * torch.cross(u2,u3, dim=-1) ).sum(dim=-1) ,
( torch.cross(u1,u2, dim=-1) * torch.cross(u2, u3, dim=-1) ).sum(dim=-1) )
def get_dihedral_numpy(c1, c2, c3, c4):
""" Returns the dihedral angle in radians.
Will use atan2 formula from:
https://en.wikipedia.org/wiki/Dihedral_angle#In_polymer_physics
Inputs:
* c1: (batch, 3) or (3,)
* c1: (batch, 3) or (3,)
* c1: (batch, 3) or (3,)
* c1: (batch, 3) or (3,)
"""
u1 = c2 - c1
u2 = c3 - c2
u3 = c4 - c3
return np.arctan2( ( (np.linalg.norm(u2, axis=-1, keepdims=True) * u1) * np.cross(u2,u3, axis=-1)).sum(axis=-1),
( np.cross(u1,u2, axis=-1) * np.cross(u2, u3, axis=-1) ).sum(axis=-1) )
def calc_phis_torch(pred_coords, N_mask, CA_mask, C_mask=None,
prop=True, verbose=0):
""" Filters mirrors selecting the 1 with most N of negative phis.
Used as part of the MDScaling wrapper if arg is passed. See below.
Angle Phi between planes: (Cterm{-1}, N, Ca{0}) and (N{0}, Ca{+1}, Cterm{+1})
Inputs:
* pred_coords: (batch, 3, N) predicted coordinates
* N_mask: (batch, N) boolean mask for N-term positions
* CA_mask: (batch, N) boolean mask for C-alpha positions
* C_mask: (batch, N) or None. boolean mask for C-alpha positions or
automatically calculate from N_mask and CA_mask if None.
* prop: bool. whether to return as a proportion of negative phis.
* verbose: bool. verbosity level
Output: (batch, N) containing the phi angles or (batch,) containing
the proportions.
Note: use [0] since all prots in batch have same backbone
"""
# detach gradients for angle calculation - mirror selection
pred_coords_ = torch.transpose(pred_coords.detach(), -1 , -2).cpu()
# ensure dims
N_mask = expand_dims_to( N_mask, 2-len(N_mask.shape) )
CA_mask = expand_dims_to( CA_mask, 2-len(CA_mask.shape) )
if C_mask is not None:
C_mask = expand_dims_to( C_mask, 2-len(C_mask.shape) )
else:
C_mask = torch.logical_not(torch.logical_or(N_mask,CA_mask))
# select points
n_terms = pred_coords_[:, N_mask[0].squeeze()]
c_alphas = pred_coords_[:, CA_mask[0].squeeze()]
c_terms = pred_coords_[:, C_mask[0].squeeze()]
# compute phis for every pritein in the batch
phis = [get_dihedral_torch(c_terms[i, :-1],
n_terms[i, 1:],
c_alphas[i, 1:],
c_terms[i, 1:]) for i in range(pred_coords.shape[0])]
# return percentage of lower than 0
if prop:
return torch.tensor( [(x<0).float().mean().item() for x in phis] )
return phis
def calc_phis_numpy(pred_coords, N_mask, CA_mask, C_mask=None,
prop=True, verbose=0):
""" Filters mirrors selecting the 1 with most N of negative phis.
Used as part of the MDScaling wrapper if arg is passed. See below.
Angle Phi between planes: (Cterm{-1}, N, Ca{0}) and (N{0}, Ca{+1}, Cterm{+1})
Inputs:
* pred_coords: (batch, 3, N) predicted coordinates
* N_mask: (N, ) boolean mask for N-term positions
* CA_mask: (N, ) boolean mask for C-alpha positions
* C_mask: (N, ) or None. boolean mask for C-alpha positions or
automatically calculate from N_mask and CA_mask if None.
* prop: bool. whether to return as a proportion of negative phis.
* verbose: bool. verbosity level
Output: (batch, N) containing the phi angles or (batch,) containing
the proportions.
"""
# detach gradients for angle calculation - mirror selection
pred_coords_ = np.transpose(pred_coords, (0, 2, 1))
n_terms = pred_coords_[:, N_mask.squeeze()]
c_alphas = pred_coords_[:, CA_mask.squeeze()]
# select c_term auto if not passed
if C_mask is not None:
c_terms = pred_coords_[:, C_mask]
else:
c_terms = pred_coords_[:, (np.ones_like(N_mask)-N_mask-CA_mask).squeeze().astype(bool) ]
# compute phis for every pritein in the batch
phis = [get_dihedral_numpy(c_terms[i, :-1],
n_terms[i, 1:],
c_alphas[i, 1:],
c_terms[i, 1:]) for i in range(pred_coords.shape[0])]
# return percentage of lower than 0
if prop:
return np.array( [(x<0).mean() for x in phis] )
return phis
#alignment by centering + rotation to compute optimal RMSD
#adapted from : https://github.com/charnley/rmsd/
def kabsch_torch(X, Y, cpu=True):
""" Kabsch alignment of X into Y.
Assumes X,Y are both (Dims x N_points). See below for wrapper.
"""
device = X.device
# center X and Y to the origin
X_ = X - X.mean(dim=-1, keepdim=True)
Y_ = Y - Y.mean(dim=-1, keepdim=True)
# calculate convariance matrix (for each prot in the batch)
C = torch.matmul(X_, Y_.t()).detach()
if cpu:
C = C.cpu()
# Optimal rotation matrix via SVD
if int(torch.__version__.split(".")[1]) < 8:
#warning! int torch 1.<8 : W must be transposed
V, S, W = torch.svd(C)
W = W.t()
else:
V, S, W = torch.linalg.svd(C)
# determinant sign for direction correction
d = (torch.det(V) * torch.det(W)) < 0.0
if d:
S[-1] = S[-1] * (-1)
V[:, -1] = V[:, -1] * (-1)
# Create Rotation matrix U
U = torch.matmul(V, W).to(device)
# calculate rotations
X_ = torch.matmul(X_.t(), U).t()
# return centered and aligned
return X_, Y_
def kabsch_numpy(X, Y):
""" Kabsch alignment of X into Y.
Assumes X,Y are both (Dims x N_points). See below for wrapper.
"""
# center X and Y to the origin
X_ = X - X.mean(axis=-1, keepdims=True)
Y_ = Y - Y.mean(axis=-1, keepdims=True)
# calculate convariance matrix (for each prot in the batch)
C = np.dot(X_, Y_.transpose())
# Optimal rotation matrix via SVD
V, S, W = np.linalg.svd(C)
# determinant sign for direction correction
d = (np.linalg.det(V) * np.linalg.det(W)) < 0.0
if d:
S[-1] = S[-1] * (-1)
V[:, -1] = V[:, -1] * (-1)
# Create Rotation matrix U
U = np.dot(V, W)
# calculate rotations
X_ = np.dot(X_.T, U).T
# return centered and aligned
return X_, Y_
# metrics - more formulas here: http://predictioncenter.org/casp12/doc/help.html
def distmat_loss_torch(X=None, Y=None, X_mat=None, Y_mat=None, p=2, q=2, custom=None, distmat_mask=None):
""" Calculates a loss on the distance matrix - no need to align structs.
Inputs:
* X: (N, d) tensor. the predicted structure. One of (X, X_mat) is needed.
* X_mat: (N, N) tensor. the predicted distance matrix. Optional ()
* Y: (N, d) tensor. the true structure. One of (Y, Y_mat) is needed.
* Y_mat: (N, N) tensor. the predicted distance matrix. Optional ()
* p: int. power for the distance calculation (2 for euclidean)
* q: float. power for the scaling of the loss (2 for MSE, 1 for MAE, etc)
* custom: func or None. custom loss over distance matrices.
ex: lambda x,y: 1 - 1/ (1 + ((x-y))**2) (1 is very bad. 0 is good)
* distmat_mask: (N, N) mask (boolean or weights for each ij pos). optional.
"""
assert (X is not None or X_mat is not None) and \
(Y is not None or Y_mat is not None), "The true and predicted coords or dist mats must be provided"
#calculate distance matrices
if X_mat is None:
X_mat = torch.cdist(X, X, p=p)
if Y_mat is None:
Y_mat = torch.cdist(Y, Y, p=p)
if distmat_mask is None:
distmat_mask = torch.ones_like(Y_mat).bool()
#do custom expression if passed
if custom is not None:
loss = custom(X_mat, Y_mat).mean()
#**2 ensures always positive. Later scale back to desired power
else:
loss = ( X_mat - Y_mat )**2
if q != 2:
loss = loss**(q/2)
return loss[distmat_mask].mean()
def rmsd_torch(X, Y):
""" Assumes x,y are both (B x D x N). See below for wrapper. """
return torch.sqrt( torch.mean((X - Y)**2, axis=(-1, -2)) )
def rmsd_numpy(X, Y):
""" Assumes x,y are both (B x D x N). See below for wrapper. """
return np.sqrt( np.mean((X - Y)**2, axis=(-1, -2)) )
def gdt_torch(X, Y, cutoffs, weights=None):
""" Assumes x,y are both (B x D x N). see below for wrapper.
* cutoffs is a list of `K` thresholds
* weights is a list of `K` weights (1 x each threshold)
"""
device = X.device
if weights is None:
weights = torch.ones(1,len(cutoffs))
else:
weights = torch.tensor([weights]).to(device)
# set zeros and fill with values
GDT = torch.zeros(X.shape[0], len(cutoffs), device=device)
dist = ((X - Y)**2).sum(dim=1).sqrt()
# iterate over thresholds
for i,cutoff in enumerate(cutoffs):
GDT[:, i] = (dist <= cutoff).float().mean(dim=-1)
# weighted mean
return (GDT*weights).mean(-1)
def gdt_numpy(X, Y, cutoffs, weights=None):
""" Assumes x,y are both (B x D x N). see below for wrapper.
* cutoffs is a list of `K` thresholds
* weights is a list of `K` weights (1 x each threshold)
"""
if weights is None:
weights = np.ones( (1,len(cutoffs)) )
else:
weights = np.array([weights])
# set zeros and fill with values
GDT = np.zeros( (X.shape[0], len(cutoffs)) )
dist = np.sqrt( ((X - Y)**2).sum(axis=1) )
# iterate over thresholds
for i,cutoff in enumerate(cutoffs):
GDT[:, i] = (dist <= cutoff).mean(axis=-1)
# weighted mean
return (GDT*weights).mean(-1)
def tmscore_torch(X, Y):
""" Assumes x,y are both (B x D x N). see below for wrapper. """
L = X.shape[-1]
d0 = 1.24 * np.cbrt(L - 15) - 1.8
# get distance
dist = ((X - Y)**2).sum(dim=1).sqrt()
# formula (see wrapper for source):
return (1 / (1 + (dist/d0)**2)).mean(dim=-1)
def tmscore_numpy(X, Y):
""" Assumes x,y are both (B x D x N). see below for wrapper. """
L = X.shape[-1]
d0 = 1.24 * np.cbrt(L - 15) - 1.8
# get distance
dist = np.sqrt( ((X - Y)**2).sum(axis=1) )
# formula (see wrapper for source):
return (1 / (1 + (dist/d0)**2)).mean(axis=-1)
def mdscaling_torch(pre_dist_mat, weights=None, iters=10, tol=1e-5,
fix_mirror=True, N_mask=None, CA_mask=None, C_mask=None,
eigen=False, verbose=2):
""" Handles the specifics of MDS for proteins (mirrors, ...) """
#batched mds for full parallel
preds, stresses = mds_torch(pre_dist_mat, weights=weights,iters=iters,
tol=tol, eigen=eigen, verbose=verbose)
if not fix_mirror:
return preds, stresses
# no need to caculate multiple mirrors - just correct Z axis
phi_ratios = calc_phis_torch(preds, N_mask, CA_mask, C_mask, prop=True)
to_correct = torch.nonzero( (phi_ratios < 0.5)).view(-1)
# fix mirrors by (-1)*Z if more (+) than (-) phi angles
preds[to_correct, -1] = (-1)*preds[to_correct, -1]
if verbose == 2:
print("Corrected mirror idxs:", to_correct)
return preds, stresses
def mdscaling_numpy(pre_dist_mat, weights=None, iters=10, tol=1e-5,
fix_mirror=True, N_mask=None, CA_mask=None, C_mask=None, verbose=2):
""" Handles the specifics of MDS for proteins (mirrors, ...) """
#batched mds for full parallel
preds, stresses = mds_numpy(pre_dist_mat, weights=weights,iters=iters,
tol=tol, verbose=verbose)
if not fix_mirror:
return preds, stresses
# no need to caculate multiple mirrors - just correct Z axis
phi_ratios = calc_phis_numpy(preds, N_mask, CA_mask, C_mask, prop=True)
for i,pred in enumerate(preds):
# fix mirrors by (-1)*Z if more (+) than (-) phi angles
if phi_ratios < 0.5:
preds[i, -1] = (-1)*preds[i, -1]
if verbose == 2:
print("Corrected mirror in struct no.", i)
return preds, stresses
def lddt_ca_torch(true_coords, pred_coords, cloud_mask, r_0=15.):
""" Computes the lddt score for each C_alpha.
https://academic.oup.com/bioinformatics/article/29/21/2722/195896
Inputs:
* true_coords: (b, l, c, d) in sidechainnet format.
* pred_coords: (b, l, c, d) in sidechainnet format.
* cloud_mask : (b, l, c) adapted for scn format.
* r_0: float. maximum inclusion radius in reference struct.
Outputs:
* (b, l) lddt for c_alpha scores (ranging between 0 and 1)
See wrapper below.
"""
device, dtype = true_coords.device, true_coords.type()
thresholds = torch.tensor([0.5, 1, 2, 4], device=device).type(dtype)
# adapt masks
cloud_mask = cloud_mask.bool().cpu()
c_alpha_mask = torch.zeros(cloud_mask.shape[1:], device=device).bool() # doesn't have batch dim
c_alpha_mask[..., 1] = True
# container for c_alpha scores (between 0,1)
wrapper = torch.zeros(true_coords.shape[:2], device=device).type(dtype)
for bi, seq in enumerate(true_coords):
# select atoms for study
c_alphas = cloud_mask[bi]*c_alpha_mask #only pick c_alpha positions
selected_pred = pred_coords[bi, c_alphas, :]
selected_target = true_coords[bi, c_alphas, :]
# get number under distance
dist_mat_pred = torch.cdist(selected_pred, selected_pred, p=2)
dist_mat_target = torch.cdist(selected_target, selected_target, p=2)
under_r0_target = dist_mat_target < r_0
compare_dists = torch.abs(dist_mat_pred - dist_mat_target)[under_r0_target]
# measure diff below threshold
score = torch.zeros_like(under_r0_target).float()
max_score = torch.zeros_like(under_r0_target).float()
max_score[under_r0_target] = 4.
#measure under how many thresholds
score[under_r0_target] = thresholds.shape[0] - \
torch.bucketize( compare_dists, boundaries=thresholds ).float()
# dont include diagonal
l_mask = c_alphas.float().sum(dim=-1).bool()
wrapper[bi, l_mask] = ( score.sum(dim=-1) - thresholds.shape[0] ) / \
( max_score.sum(dim=-1) - thresholds.shape[0] )
return wrapper
################
###WRAPPERS ###
################
| 41.356255 | 122 | 0.610033 |
4252097259c5f8f2219e8a65c81337c134ef50fa | 1,151 | py | Python | src/clean_property_file.py | wmaciel/van-crime | e70d0310f41de3a1b54572f6c6bf01083e56e0ab | [
"MIT"
] | 2 | 2016-03-03T00:14:59.000Z | 2016-08-21T14:28:02.000Z | src/clean_property_file.py | wmaciel/van-crime | e70d0310f41de3a1b54572f6c6bf01083e56e0ab | [
"MIT"
] | null | null | null | src/clean_property_file.py | wmaciel/van-crime | e70d0310f41de3a1b54572f6c6bf01083e56e0ab | [
"MIT"
] | null | null | null | __author__ = 'walthermaciel'
import pandas as pd
import numpy as np
if __name__ == '__main__':
main()
| 28.775 | 99 | 0.650738 |
4252c9d8b3317ae5bd56696743e5b2124dce1942 | 4,040 | py | Python | homeassistant/components/sensor/verisure.py | beschouten/home-assistant | f50c30bbbad4d92e342c8547630c63c0c7882803 | [
"MIT"
] | 1 | 2016-07-14T05:20:54.000Z | 2016-07-14T05:20:54.000Z | homeassistant/components/sensor/verisure.py | beschouten/home-assistant | f50c30bbbad4d92e342c8547630c63c0c7882803 | [
"MIT"
] | null | null | null | homeassistant/components/sensor/verisure.py | beschouten/home-assistant | f50c30bbbad4d92e342c8547630c63c0c7882803 | [
"MIT"
] | 1 | 2018-11-22T13:55:23.000Z | 2018-11-22T13:55:23.000Z | """
Interfaces with Verisure sensors.
For more details about this platform, please refer to the documentation at
documentation at https://home-assistant.io/components/verisure/
"""
import logging
from homeassistant.components.verisure import HUB as hub
from homeassistant.const import TEMP_CELSIUS
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the Verisure platform."""
sensors = []
if int(hub.config.get('thermometers', '1')):
hub.update_climate()
sensors.extend([
VerisureThermometer(value.id)
for value in hub.climate_status.values()
if hasattr(value, 'temperature') and value.temperature
])
if int(hub.config.get('hygrometers', '1')):
hub.update_climate()
sensors.extend([
VerisureHygrometer(value.id)
for value in hub.climate_status.values()
if hasattr(value, 'humidity') and value.humidity
])
if int(hub.config.get('mouse', '1')):
hub.update_mousedetection()
sensors.extend([
VerisureMouseDetection(value.deviceLabel)
for value in hub.mouse_status.values()
# is this if needed?
if hasattr(value, 'amountText') and value.amountText
])
add_devices(sensors)
class VerisureHygrometer(Entity):
"""Representation of a Verisure hygrometer."""
def __init__(self, device_id):
"""Initialize the sensor."""
self._id = device_id
def update(self):
"""Update the sensor."""
hub.update_climate()
class VerisureMouseDetection(Entity):
"""Representation of a Verisure mouse detector."""
def __init__(self, device_id):
"""Initialize the sensor."""
self._id = device_id
def update(self):
"""Update the sensor."""
hub.update_mousedetection()
| 26.933333 | 74 | 0.611881 |
4253d0f64f25024f864712c154a198a0bd7c1158 | 1,135 | py | Python | articles/blogs/tests/factories.py | MahmoudFarid/articles | f0238908b1430c949dace50401fb3ddf268a581b | [
"MIT"
] | null | null | null | articles/blogs/tests/factories.py | MahmoudFarid/articles | f0238908b1430c949dace50401fb3ddf268a581b | [
"MIT"
] | null | null | null | articles/blogs/tests/factories.py | MahmoudFarid/articles | f0238908b1430c949dace50401fb3ddf268a581b | [
"MIT"
] | null | null | null | import factory
from factory.django import DjangoModelFactory as Factory
from django.contrib.auth.models import Permission
from ..models import Blog
from articles.users.tests.factories import UserFactory
| 33.382353 | 102 | 0.767401 |
425489e4c1a682c5eeaad70ce3b5e922f8f9536b | 8,847 | py | Python | api_formatter/serializers.py | RockefellerArchiveCenter/argo | c02fec68dbb50382f3f0bdf11c51240ca22a181c | [
"MIT"
] | null | null | null | api_formatter/serializers.py | RockefellerArchiveCenter/argo | c02fec68dbb50382f3f0bdf11c51240ca22a181c | [
"MIT"
] | 115 | 2019-08-19T20:19:06.000Z | 2022-03-04T17:40:50.000Z | api_formatter/serializers.py | RockefellerArchiveCenter/argo | c02fec68dbb50382f3f0bdf11c51240ca22a181c | [
"MIT"
] | null | null | null | from datetime import datetime
from django.urls import reverse
from rest_framework import serializers
from .view_helpers import description_from_notes
| 35.247012 | 98 | 0.706228 |
42549d1737ce596628e42957af0838f8a820986b | 828 | py | Python | cmz/cms_news/migrations/0004_auto_20160923_1958.py | inmagik/cmz | e183f0c7203bda5efb1cbeb96f4f06a76aa91231 | [
"MIT"
] | 1 | 2016-10-01T18:35:24.000Z | 2016-10-01T18:35:24.000Z | cmz/cms_news/migrations/0004_auto_20160923_1958.py | inmagik/cmz | e183f0c7203bda5efb1cbeb96f4f06a76aa91231 | [
"MIT"
] | 8 | 2016-09-14T21:39:09.000Z | 2016-10-25T20:08:31.000Z | cmz/cms_news/migrations/0004_auto_20160923_1958.py | inmagik/cmz | e183f0c7203bda5efb1cbeb96f4f06a76aa91231 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-09-23 19:58
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
| 27.6 | 126 | 0.621981 |
42553eda4ebfb5ccb85d9727626440163f717d34 | 3,252 | py | Python | mopidy/audio/utils.py | grdorin/mopidy | 76db44088c102d7ad92a3fc6a15a938e66b99b0d | [
"Apache-2.0"
] | 6,700 | 2015-01-01T03:57:59.000Z | 2022-03-30T09:31:31.000Z | mopidy/audio/utils.py | pnijhara/mopidy | 7168787ea6c82b66e138fc2b388d78fa1c7661ba | [
"Apache-2.0"
] | 1,141 | 2015-01-02T09:48:59.000Z | 2022-03-28T22:25:30.000Z | mopidy/audio/utils.py | pnijhara/mopidy | 7168787ea6c82b66e138fc2b388d78fa1c7661ba | [
"Apache-2.0"
] | 735 | 2015-01-01T21:15:50.000Z | 2022-03-20T16:13:44.000Z | from mopidy import httpclient
from mopidy.internal.gi import Gst
def calculate_duration(num_samples, sample_rate):
"""Determine duration of samples using GStreamer helper for precise
math."""
return Gst.util_uint64_scale(num_samples, Gst.SECOND, sample_rate)
def create_buffer(data, timestamp=None, duration=None):
"""Create a new GStreamer buffer based on provided data.
Mainly intended to keep gst imports out of non-audio modules.
.. versionchanged:: 2.0
``capabilites`` argument was removed.
"""
if not data:
raise ValueError("Cannot create buffer without data")
buffer_ = Gst.Buffer.new_wrapped(data)
if timestamp is not None:
buffer_.pts = timestamp
if duration is not None:
buffer_.duration = duration
return buffer_
def millisecond_to_clocktime(value):
"""Convert a millisecond time to internal GStreamer time."""
return value * Gst.MSECOND
def clocktime_to_millisecond(value):
"""Convert an internal GStreamer time to millisecond time."""
return value // Gst.MSECOND
def supported_uri_schemes(uri_schemes):
"""Determine which URIs we can actually support from provided whitelist.
:param uri_schemes: list/set of URIs to check support for.
:type uri_schemes: list or set or URI schemes as strings.
:rtype: set of URI schemes we can support via this GStreamer install.
"""
supported_schemes = set()
registry = Gst.Registry.get()
for factory in registry.get_feature_list(Gst.ElementFactory):
for uri in factory.get_uri_protocols():
if uri in uri_schemes:
supported_schemes.add(uri)
return supported_schemes
def setup_proxy(element, config):
"""Configure a GStreamer element with proxy settings.
:param element: element to setup proxy in.
:type element: :class:`Gst.GstElement`
:param config: proxy settings to use.
:type config: :class:`dict`
"""
if not hasattr(element.props, "proxy") or not config.get("hostname"):
return
element.set_property("proxy", httpclient.format_proxy(config, auth=False))
element.set_property("proxy-id", config.get("username"))
element.set_property("proxy-pw", config.get("password"))
| 31.882353 | 78 | 0.681119 |
425582d3b0bd9aebc3e98f0f395cf656db9c8b38 | 467 | py | Python | day09/part1.py | mtn/advent16 | 0df34237485ee1246532e9eda0ef643e6950d13e | [
"MIT"
] | null | null | null | day09/part1.py | mtn/advent16 | 0df34237485ee1246532e9eda0ef643e6950d13e | [
"MIT"
] | null | null | null | day09/part1.py | mtn/advent16 | 0df34237485ee1246532e9eda0ef643e6950d13e | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import re
with open("input.txt") as f:
content = f.read().strip()
ans = ""
i = 0
while i < len(content):
if content[i] == "(":
end = content[i:].find(")") + i
instr = content[i+1:end]
chars, times = map(int, content[i+1:end].split("x"))
to_copy = content[end+1:end+1+chars]
ans += times * to_copy
i = end + 1 + chars
else:
ans += content[i]
i += 1
print(len(ans))
| 20.304348 | 60 | 0.509636 |
4255be118dbe243d9d0c4b4eac0548f7377725a0 | 2,825 | py | Python | sa/profiles/Alcatel/AOS/get_inventory.py | prorevizor/noc | 37e44b8afc64318b10699c06a1138eee9e7d6a4e | [
"BSD-3-Clause"
] | 84 | 2017-10-22T11:01:39.000Z | 2022-02-27T03:43:48.000Z | sa/profiles/Alcatel/AOS/get_inventory.py | prorevizor/noc | 37e44b8afc64318b10699c06a1138eee9e7d6a4e | [
"BSD-3-Clause"
] | 22 | 2017-12-11T07:21:56.000Z | 2021-09-23T02:53:50.000Z | sa/profiles/Alcatel/AOS/get_inventory.py | prorevizor/noc | 37e44b8afc64318b10699c06a1138eee9e7d6a4e | [
"BSD-3-Clause"
] | 23 | 2017-12-06T06:59:52.000Z | 2022-02-24T00:02:25.000Z | # ----------------------------------------------------------------------
# Alcatel.AOS.get_inventory
# ----------------------------------------------------------------------
# Copyright (C) 2007-2014 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# Python modules
import re
# NOC modules
from noc.core.script.base import BaseScript
from noc.sa.interfaces.igetinventory import IGetInventory
| 36.217949 | 72 | 0.43469 |
42569d1c317bd48e4f4e7021e87396555e651ced | 1,276 | py | Python | db_conn.py | achhetr/Library-book-store-app | a85e9a26dba48119ce52abb5ee8219528e06ac30 | [
"MIT"
] | null | null | null | db_conn.py | achhetr/Library-book-store-app | a85e9a26dba48119ce52abb5ee8219528e06ac30 | [
"MIT"
] | null | null | null | db_conn.py | achhetr/Library-book-store-app | a85e9a26dba48119ce52abb5ee8219528e06ac30 | [
"MIT"
] | null | null | null | import sqlite3 | 33.578947 | 98 | 0.579937 |
4258b13ddf592d8967b4cf56eb4a465b00010bc4 | 5,286 | py | Python | edge-tool/cbor_converter.py | hckim-kornic/mbed-edge-kornic | b83ea92066fae7c274777aa27494d5524c577c12 | [
"Apache-2.0"
] | null | null | null | edge-tool/cbor_converter.py | hckim-kornic/mbed-edge-kornic | b83ea92066fae7c274777aa27494d5524c577c12 | [
"Apache-2.0"
] | null | null | null | edge-tool/cbor_converter.py | hckim-kornic/mbed-edge-kornic | b83ea92066fae7c274777aa27494d5524c577c12 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# ----------------------------------------------------------------------------
# Copyright 2018 ARM Ltd.
#
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
import os
import cbor2
import struct
from pyclibrary import CParser
from collections import namedtuple
CERTIFICATE_KEYS = ('MBED_CLOUD_DEV_BOOTSTRAP_DEVICE_CERTIFICATE',
'MBED_CLOUD_DEV_BOOTSTRAP_SERVER_ROOT_CA_CERTIFICATE',
'arm_uc_default_certificate')
KEY_KEYS = ('MBED_CLOUD_DEV_BOOTSTRAP_DEVICE_PRIVATE_KEY')
UPDATE_KEYS = ('arm_uc_default_certificate',
'arm_uc_class_id',
'arm_uc_vendor_id')
KEY_MAP = {
'MBED_CLOUD_DEV_BOOTSTRAP_DEVICE_CERTIFICATE': 'mbed.BootstrapDeviceCert',
'MBED_CLOUD_DEV_BOOTSTRAP_SERVER_ROOT_CA_CERTIFICATE': 'mbed.BootstrapServerCACert',
'MBED_CLOUD_DEV_BOOTSTRAP_DEVICE_PRIVATE_KEY': 'mbed.BootstrapDevicePrivateKey',
'MBED_CLOUD_DEV_BOOTSTRAP_ENDPOINT_NAME': 'mbed.EndpointName',
'MBED_CLOUD_DEV_BOOTSTRAP_SERVER_URI': 'mbed.BootstrapServerURI',
'MBED_CLOUD_DEV_ACCOUNT_ID': 'mbed.AccountID',
'MBED_CLOUD_DEV_MANUFACTURER': 'mbed.Manufacturer',
'MBED_CLOUD_DEV_MODEL_NUMBER': 'mbed.ModelNumber',
'MBED_CLOUD_DEV_SERIAL_NUMBER': 'mbed.SerialNumber',
'MBED_CLOUD_DEV_DEVICE_TYPE': 'mbed.DeviceType',
'MBED_CLOUD_DEV_HARDWARE_VERSION': 'mbed.HardwareVersion',
'MBED_CLOUD_DEV_MEMORY_TOTAL_KB': 'mbed.MemoryTotalKB',
'arm_uc_default_certificate': 'mbed.UpdateAuthCert',
'arm_uc_class_id': 'mbed.ClassId',
'arm_uc_vendor_id': 'mbed.VendorId'
}
ConfigParam = namedtuple('ConfigParam', ['Data', 'Name'])
Certificate = namedtuple('Certificate', ['Data', 'Format', 'Name'])
Key = namedtuple('Key', ['Data', 'Format', 'Name', 'Type'])
| 40.661538 | 99 | 0.620885 |
4258ec1ee3116d288de649b3f19210bd3aa35e35 | 3,012 | py | Python | turbinia/processors/archive_test.py | sa3eed3ed/turbinia | 1eb4db37813f2bd44dcc2c3764e9411f6a2f9d97 | [
"Apache-2.0"
] | 559 | 2015-09-16T21:55:12.000Z | 2022-03-28T11:08:11.000Z | turbinia/processors/archive_test.py | sa3eed3ed/turbinia | 1eb4db37813f2bd44dcc2c3764e9411f6a2f9d97 | [
"Apache-2.0"
] | 630 | 2015-09-16T21:53:41.000Z | 2022-03-25T07:03:32.000Z | turbinia/processors/archive_test.py | sa3eed3ed/turbinia | 1eb4db37813f2bd44dcc2c3764e9411f6a2f9d97 | [
"Apache-2.0"
] | 158 | 2015-12-06T20:39:32.000Z | 2022-03-13T22:15:01.000Z | # -*- coding: utf-8 -*-
# Copyright 2019 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the Archive processor to compress and decompress folders."""
from __future__ import unicode_literals
import os
import tarfile
import unittest
import tempfile
from random import randint
from shutil import rmtree
from turbinia.processors import archive
from turbinia import TurbiniaException
if __name__ == '__main__':
unittest.main()
| 33.842697 | 74 | 0.729416 |
42595d917949c306ffaf79514babf64460ba3c69 | 1,869 | py | Python | blog.py | BenTimor/SerializationConceptSystem | 0f85dc32063d270a5564cda3199d84d474e5d83e | [
"MIT"
] | 1 | 2020-11-13T22:21:47.000Z | 2020-11-13T22:21:47.000Z | blog.py | BenTimor/SerializationConceptSystem | 0f85dc32063d270a5564cda3199d84d474e5d83e | [
"MIT"
] | null | null | null | blog.py | BenTimor/SerializationConceptSystem | 0f85dc32063d270a5564cda3199d84d474e5d83e | [
"MIT"
] | null | null | null | from utils import database | 29.666667 | 160 | 0.602996 |
4259a696e067dbb5b562342c586a116816461462 | 29 | py | Python | src/svr/tests/__init__.py | yottaawesome/fsnd-project-2 | 7ed478fa945a561a28af06dc8e4492a9fbea510a | [
"MIT"
] | 3 | 2019-05-04T12:30:00.000Z | 2020-05-14T06:28:51.000Z | src/svr/tests/__init__.py | yottaawesome/fsnd-project-2 | 7ed478fa945a561a28af06dc8e4492a9fbea510a | [
"MIT"
] | 1 | 2019-05-05T01:30:37.000Z | 2019-05-16T02:50:04.000Z | src/svr/tests/__init__.py | yottaawesome/fsnd-project-2 | 7ed478fa945a561a28af06dc8e4492a9fbea510a | [
"MIT"
] | 1 | 2020-03-27T07:12:40.000Z | 2020-03-27T07:12:40.000Z | from .test_db import TestDal
| 14.5 | 28 | 0.827586 |
425afadcb24a0ea23083f2d7fe78d83b6b1403c9 | 971 | py | Python | Owner/models.py | 2000090063/Vehicle_Rental_System-SDP-2- | 483d811aa239a226607b4bfb262c99da3be017b4 | [
"MIT"
] | 3 | 2022-03-12T08:27:42.000Z | 2022-03-17T12:16:16.000Z | Owner/models.py | 2000090063/Vehicle_Rental_System-SDP-2- | 483d811aa239a226607b4bfb262c99da3be017b4 | [
"MIT"
] | null | null | null | Owner/models.py | 2000090063/Vehicle_Rental_System-SDP-2- | 483d811aa239a226607b4bfb262c99da3be017b4 | [
"MIT"
] | null | null | null | from django.db import models
# Create your models here. | 42.217391 | 70 | 0.748713 |
425c5f6cf6cd74314b97f4bcb6721e3f260e8ac7 | 6,548 | py | Python | tectosaur/fmm/builder.py | jlmaurer/tectosaur | 7cc5606d814f061395b19754e7a4b6c5e4c236e5 | [
"MIT"
] | 17 | 2017-06-29T16:48:56.000Z | 2021-10-03T18:31:41.000Z | tectosaur/fmm/builder.py | jlmaurer/tectosaur | 7cc5606d814f061395b19754e7a4b6c5e4c236e5 | [
"MIT"
] | 4 | 2018-05-29T08:21:13.000Z | 2021-04-01T01:28:50.000Z | tectosaur/fmm/builder.py | jlmaurer/tectosaur | 7cc5606d814f061395b19754e7a4b6c5e4c236e5 | [
"MIT"
] | 8 | 2019-06-10T22:19:40.000Z | 2022-01-12T20:55:37.000Z | import numpy as np
import tectosaur.util.gpu as gpu
from tectosaur.fmm.c2e import build_c2e
import logging
logger = logging.getLogger(__name__)
| 36.786517 | 95 | 0.609652 |
425d43c4429c4fecedfff11a5de11c9d121390a6 | 2,553 | py | Python | fabio/test/codecs/test_mpaimage.py | picca/fabio | bc3aae330bef6e1c983007562157edfe6d7daf91 | [
"Apache-2.0"
] | null | null | null | fabio/test/codecs/test_mpaimage.py | picca/fabio | bc3aae330bef6e1c983007562157edfe6d7daf91 | [
"Apache-2.0"
] | 2 | 2019-04-24T13:43:41.000Z | 2019-06-13T08:54:02.000Z | fabio/test/codecs/test_mpaimage.py | boesecke/fabio | 11350e445a6def4d02c6860aea3ae7f36652af6a | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Project: Fable Input Output
# https://github.com/silx-kit/fabio
#
# Copyright (C) European Synchrotron Radiation Facility, Grenoble, France
#
# Principal author: Jrme Kieffer (Jerome.Kieffer@ESRF.eu)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Multiwire Unit tests"""
from __future__ import print_function, with_statement, division, absolute_import
import unittest
import logging
logger = logging.getLogger(__name__)
import fabio
from ..utilstest import UtilsTest
if __name__ == '__main__':
runner = unittest.TextTestRunner()
runner.run(suite())
| 34.04 | 111 | 0.653741 |
425dd97c671323bb5d6b53095ab3886bfc7da465 | 1,064 | py | Python | currencySpider.py | cloud322/helloScrap | 6313c5b99bce04c6a78a5dfb2ec910c73a33add3 | [
"Apache-2.0"
] | null | null | null | currencySpider.py | cloud322/helloScrap | 6313c5b99bce04c6a78a5dfb2ec910c73a33add3 | [
"Apache-2.0"
] | null | null | null | currencySpider.py | cloud322/helloScrap | 6313c5b99bce04c6a78a5dfb2ec910c73a33add3 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import scrapy
import codecs
import sys
# utf-8 utf-8
reload(sys)
sys.setdefaultencoding('utf8')
# scrapy spider crawling/scrapping
#crawling/scrapping | 28 | 86 | 0.56015 |
425f6d304bf8b5a8fd1c2a47d2f7c554468160b1 | 1,812 | py | Python | tests/test_sanity_check/test_similar_columns.py | thibaultbl/feature_engine | 08374227e7a88b67ee64b64f22e4f30390df9253 | [
"BSD-3-Clause"
] | 1 | 2021-09-08T08:54:56.000Z | 2021-09-08T08:54:56.000Z | tests/test_sanity_check/test_similar_columns.py | thibaultbl/feature_engine | 08374227e7a88b67ee64b64f22e4f30390df9253 | [
"BSD-3-Clause"
] | 1 | 2021-09-10T08:54:51.000Z | 2021-09-10T08:54:51.000Z | tests/test_sanity_check/test_similar_columns.py | thibaultbl/feature_engine | 08374227e7a88b67ee64b64f22e4f30390df9253 | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
import pandas as pd
from feature_engine.sanity_check import SimilarColumns
| 27.044776 | 85 | 0.557395 |
425fb9945bfce39ef08339e9cffda8aa831a4e3d | 6,780 | py | Python | examples/sem_seg_dense/train.py | megaelius/deep_gcns_torch | 5d565a02020ff9faff3a34d55f278e7328c73ec2 | [
"MIT"
] | null | null | null | examples/sem_seg_dense/train.py | megaelius/deep_gcns_torch | 5d565a02020ff9faff3a34d55f278e7328c73ec2 | [
"MIT"
] | null | null | null | examples/sem_seg_dense/train.py | megaelius/deep_gcns_torch | 5d565a02020ff9faff3a34d55f278e7328c73ec2 | [
"MIT"
] | null | null | null |
import __init__
import os
#os.environ['LD_LIBRARY_PATH'] += ':/usr/local/cuda-11.1/bin64:/usr/local/cuda-11.2/bin64'
import numpy as np
import torch
import torch.multiprocessing as mp
import torch_geometric.datasets as GeoData
from torch_geometric.loader import DenseDataLoader
import torch_geometric.transforms as T
from torch.nn.parallel import DistributedDataParallel
from torch.utils.data.distributed import DistributedSampler
from config import OptInit
from architecture import DenseDeepGCN, CustomDenseDeepGCN
from utils.ckpt_util import load_pretrained_models, load_pretrained_optimizer, save_checkpoint
from utils.metrics import AverageMeter
import logging
from tqdm import tqdm
from parallel_wrapper import launch
import comm
from torch.utils.tensorboard import SummaryWriter
writer = SummaryWriter(log_dir='log/mlp4')
if __name__ == '__main__':
main() | 39.649123 | 156 | 0.633333 |
426012a76defd0d35b2275dd689a17428018f29c | 707 | py | Python | sources/tkinter/prog03.py | kantel/pythoncuriosa | 4dfb92b443cbe0acf8d8efa5c54efbf13e834620 | [
"MIT"
] | null | null | null | sources/tkinter/prog03.py | kantel/pythoncuriosa | 4dfb92b443cbe0acf8d8efa5c54efbf13e834620 | [
"MIT"
] | null | null | null | sources/tkinter/prog03.py | kantel/pythoncuriosa | 4dfb92b443cbe0acf8d8efa5c54efbf13e834620 | [
"MIT"
] | null | null | null | import tkinter as tk
from tkinter import ttk
win = tk.Tk()
win.title("Python GUI")
win.resizable(False, False)
win.configure(background = "grey94")
a_label = ttk.Label(win, text = "Gib Deinen Namen ein:")
a_label.grid(column = 0, row = 0)
a_label.grid_configure(padx = 8, pady = 8)
name = tk.StringVar()
name_entered = ttk.Entry(win, width = 12, textvariable = name)
name_entered.grid(column = 0, row = 1)
name_entered.grid_configure(padx = 8, pady = 8)
name_entered.focus()
action = ttk.Button(win, text = "Drck mich!", command = clickMe)
action.grid(column = 1, row = 1)
action.grid_configure(padx = 8, pady = 8)
win.mainloop() | 26.185185 | 65 | 0.701556 |
4260837af4a64a8bea9204399d75709291c91101 | 528 | py | Python | openarticlegauge/slavedriver.py | CottageLabs/OpenArticleGauge | 58d29b4209a7b59041d61326ffe1cf03f98f3cff | [
"BSD-3-Clause"
] | 1 | 2016-04-07T18:29:27.000Z | 2016-04-07T18:29:27.000Z | openarticlegauge/slavedriver.py | CottageLabs/OpenArticleGauge | 58d29b4209a7b59041d61326ffe1cf03f98f3cff | [
"BSD-3-Clause"
] | 11 | 2015-01-06T15:53:09.000Z | 2022-03-01T01:46:14.000Z | openarticlegauge/slavedriver.py | CottageLabs/OpenArticleGauge | 58d29b4209a7b59041d61326ffe1cf03f98f3cff | [
"BSD-3-Clause"
] | null | null | null | """
Initialise the Celery instance to be used by the application
This is largely just boiler plate, and we could probably look at coming back to it and cleaning it
up a bit in the future.
"""
from __future__ import absolute_import
from celery import Celery
celery = Celery()
from openarticlegauge import celeryconfig
celery.config_from_object(celeryconfig)
# Optional configuration, see the application user guide.
celery.conf.update(
CELERY_TASK_RESULT_EXPIRES=3600,
)
if __name__ == '__main__':
celery.start()
| 21.12 | 98 | 0.780303 |
42629d99092a4d568c978d01f8d8dafafec338c9 | 28,061 | py | Python | cbf_ros/scripts/cbf_controller_sy.py | k1majd/CBF_TB_RRT | 2632357d42155de6dec5802c337a5abfdc824aac | [
"MIT"
] | 2 | 2021-10-07T17:06:57.000Z | 2021-11-23T15:58:14.000Z | cbf_ros/scripts/cbf_controller_sy.py | k1majd/CBF_TB_RRT | 2632357d42155de6dec5802c337a5abfdc824aac | [
"MIT"
] | 1 | 2021-10-13T17:18:32.000Z | 2021-10-13T17:37:26.000Z | cbf_ros/scripts/cbf_controller_sy.py | k1majd/CBF_TB_RRT | 2632357d42155de6dec5802c337a5abfdc824aac | [
"MIT"
] | 1 | 2021-11-30T11:09:43.000Z | 2021-11-30T11:09:43.000Z | #! /usr/bin/env python
# call roscore
# $ roscore
#
# If start in manual
# $ rosrun cbf_ros cbf_controller.py
import rospy
import sys
import argparse
import re
import numpy as np
from scipy.integrate import odeint
from sympy import symbols, Matrix, sin, cos, lambdify, exp, sqrt, log
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import cvxopt as cvxopt
# ROS msg
from geometry_msgs.msg import Twist
from geometry_msgs.msg import PoseStamped
from geometry_msgs.msg import Vector3
from nav_msgs.msg import Odometry
from gazebo_msgs.msg import ModelState
from gazebo_msgs.srv import GetWorldProperties, GetModelState, GetModelStateRequest
# ROS others
import tf
DEBUG = False
if __name__ == '__main__':
## Parameters
findBestCommandAnyway = 1 #make this zero if you don't want to do anything if it's riskier than intended
#use 1 if you want to do the best even if there is risk
plotanimation = 0
# Goal info
GoalCenter = np.array([0, 0])
rGoal = np.power(0.5,2)
# Unsafe
UnsafeInclude = 9 # consider obstacle if in radius
UnsafeRadius = 0.5 #radius of unsafe sets/distance from obstacles
# Enviroment Bounds
env_bounds = type('', (), {})()
env_bounds.y_min = -1.2
env_bounds.y_max = 1
# env_bounds.x_max = 1.25
# env_bounds.x_min = -1.35
l = 0.01 #bicycle model approximation parameter
U = np.array([[-0.33,0.33],[-0.3,0.3]])
T = 1 #Lookahead horizon
risk = 0.1 # max risk desired
gamma = 5 # CBF coefficient
u1d = 0 # desired input to save energy!
# Plotting options
plotit = 1
plotlanes = 1
robot = robot(l)
GoalInfo = robot.GoalFuncs(GoalCenter,rGoal)
UnsafeInfo = robot.UnsafeFuncs(gamma,UnsafeRadius)
MapInfo = robot.MapFuncs(env_bounds)
# Process arguments
p = argparse.ArgumentParser(description='CBF controller')
args = p.parse_args(rospy.myargv()[1:])
try:
rospy.init_node('cbf_controller')
cbf_controller = CBF_CONTROLLER(robot,GoalInfo,UnsafeInfo,MapInfo)
control_priod = 0.05 #[sec] we can change controll priod with this parameter.
rospy.Timer(rospy.Duration(control_priod), cbf_controller.controller_loop_callback)
rospy.spin()
except rospy.ROSInterruptException:
pass
plottrajs(cbf_controller.trajs)
| 51.393773 | 222 | 0.490218 |
4262af6285d912525c9c840db4e454a16f646f01 | 5,250 | py | Python | src/gui/ui_paste_dialog.py | tonypdmtr/sxtool | 225468d70c5fe1bf7414f19ce13dcdd43e872433 | [
"BSD-2-Clause"
] | 3 | 2018-10-11T15:34:24.000Z | 2022-02-20T23:24:01.000Z | src/gui/ui_paste_dialog.py | tonypdmtr/sxtool | 225468d70c5fe1bf7414f19ce13dcdd43e872433 | [
"BSD-2-Clause"
] | 1 | 2018-10-16T06:58:22.000Z | 2018-10-22T20:19:55.000Z | src/gui/ui_paste_dialog.py | tonypdmtr/sxtool | 225468d70c5fe1bf7414f19ce13dcdd43e872433 | [
"BSD-2-Clause"
] | 1 | 2022-02-20T23:26:50.000Z | 2022-02-20T23:26:50.000Z | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'src/gui/ui_paste_dialog.ui'
#
# Created by: PyQt5 UI code generator 5.11.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
| 58.333333 | 110 | 0.739619 |
4262ea9b91c2ce1c0da94f2913617caab9285e6f | 110 | py | Python | app/pathfinding/finder/__init__.py | TheronHa/Spaghetti | e181c2f7ea0c044fb7d0edb36bd203dac2eabaf9 | [
"MIT"
] | 208 | 2017-01-23T17:45:13.000Z | 2022-03-22T22:27:25.000Z | app/pathfinding/finder/__init__.py | TheronHa/Spaghetti | e181c2f7ea0c044fb7d0edb36bd203dac2eabaf9 | [
"MIT"
] | 31 | 2017-10-28T09:21:06.000Z | 2021-09-26T15:38:36.000Z | app/pathfinding/finder/__init__.py | TheronHa/Spaghetti | e181c2f7ea0c044fb7d0edb36bd203dac2eabaf9 | [
"MIT"
] | 60 | 2016-12-13T00:05:36.000Z | 2022-03-21T22:23:49.000Z | __all__ = ['a_star', 'best_first', 'bi_a_star', 'breadth_first', 'dijkstra',
'finder', 'ida_star']
| 36.666667 | 76 | 0.609091 |
4263245bfbde431be1ac8c88739a3f1f392bf22f | 34,891 | py | Python | dependencies/src/4Suite-XML-1.0.2/test/Xml/Xslt/Borrowed/uo_20000929.py | aleasims/Peach | bb56841e943d719d5101fee0a503ed34308eda04 | [
"MIT"
] | null | null | null | dependencies/src/4Suite-XML-1.0.2/test/Xml/Xslt/Borrowed/uo_20000929.py | aleasims/Peach | bb56841e943d719d5101fee0a503ed34308eda04 | [
"MIT"
] | null | null | null | dependencies/src/4Suite-XML-1.0.2/test/Xml/Xslt/Borrowed/uo_20000929.py | aleasims/Peach | bb56841e943d719d5101fee0a503ed34308eda04 | [
"MIT"
] | 1 | 2020-07-26T03:57:45.000Z | 2020-07-26T03:57:45.000Z | #Uche's test from Sun's SVG slide publisher
import os
from Xml.Xslt import test_harness
#From Sun's toolkit
sheet_1_uri = "Xml/Xslt/Borrowed/svgslides.xsl"
sheet_2_uri = "Xml/Xslt/Borrowed/svgslides_custom.xsl"
sheet_3_uri = "Xml/Xslt/Borrowed/slidescript.xsl"
source_1_uri = "Xml/Xslt/Borrowed/slides4svg.xml"
saxon_output = """"""
expected_1 = """<?xml version='1.0' encoding='UTF-8'?>
<?xml-stylesheet href="slides.css" type="text/css"?>
<svg height='768' width='1024' style='pointer-events:visible' xml:space='preserve' onload='initSlides(evt)' xmlns:xlink='http://www.w3.org/2000/xlink/namespace/'>
<script><![CDATA[
var doc = null;
// Called upon presentation loading
function initSlides(evt){
var target = evt.getTarget();
doc = target.getOwnerDocument();
hideAndShow(evt, curSlide, curSlide);
}
function onPrevSlide(evt){
// Process new current slide
var oldCurSlide = curSlide;
curSlide = curSlide - 1;
if(curSlide < 0){
curSlide = slideList.length - 1;
}
hideAndShow(evt, oldCurSlide, curSlide);
}
function onNextSlide(evt){
// Process new current slide
var prevSlide = curSlide;
curSlide = curSlide + 1;
if(curSlide > (slideList.length - 1)){
curSlide = 0;
}
hideAndShow(evt, prevSlide, curSlide);
// alert("onNextSlide");
}
function hideAndShow(evt, hideSlide, showSlide){
// alert("Hiding : " + hideSlide + " and showing : " + showSlide);
// Hide previous current slide and show new
// one.
var hideSlideName = slideList[hideSlide];
var showSlideName = slideList[showSlide];
/*if(hideSlideName == null)
alert("hideSlideName is null");
else
alert("hideSlideName is NOT null:" + hideSlideName);*/
var slideGroup = doc.getElementById(hideSlideName);
slideGroup.setAttribute("style", "visibility:hidden");
slideGroup = doc.getElementById(showSlideName);
slideGroup.setAttribute("style", "visibility:show");
var slideMenuItemId = slideList[hideSlide] + "MenuItem";
var menuItem = doc.getElementById(slideMenuItemId);
if(menuItem != null)
menuItem.setAttribute("class", "slideMenuItem");
slideMenuItemId = slideList[showSlide] + "MenuItem";
menuItem = doc.getElementById(slideMenuItemId);
if(menuItem != null)
menuItem.setAttribute("class", "currentSlideMenuItem");
}
function onHighlightMenuItem(evt, highlight, itemId){
var target = evt.getTarget();
var doc = target.getOwnerDocument();
var menuItem = doc.getElementById(itemId);
if(highlight == "true")
menuItem.setAttribute("class", "highlightedSlideMenuItem");
else{
var curSlideMenuItemId = slideList[curSlide] + "MenuItem";
if(curSlideMenuItemId == itemId)
menuItem.setAttribute("class", "currentSlideMenuItem");
else
menuItem.setAttribute("class", "slideMenuItem");
}
}
function onMenuItemSelected(evt, index){
// alert("Should show slide # " + index);
var oldCurSlide = curSlide;
curSlide = index;
hideAndShow(evt, oldCurSlide, index);
}
function onSetFill(evt, elementId, fillValue){
var element = doc.getElementById(elementId);
element.setAttribute("style", "fill:" + fillValue);
}
function onExpand(evt, submenuGroupId){
var submenuGroup = doc.getElementById(submenuGroupId);
submenuGroup.setAttribute("style", "visibility:hidden");
var javaScriptCode = "window.expandNow('" + submenuGroupId + "')";
window.expandNow = expandNow;
setTimeout(javaScriptCode, 1000);
}
function expandNow(submenuGroupId){
var submenuGroup = doc.getElementById(submenuGroupId);
submenuGroup.setAttribute("style", "visibility:show");
}
function onCollapse(evt, submenuGroupId){
var submenuGroup = doc.getElementById(submenuGroupId);
submenuGroup.setAttribute("style", "visibility:hidden");
}
]]></script>
<script><![CDATA[
var slideList = new Array();
var slideIndex = new Object();
var curSlide = 0;
slideList[0]="slideShowCover";
slideIndex["slideShowCover"] = 0;
slideList[1]="slidesetCover1";
slideIndex["slidesetCover1"] = 1;
slideList[2] = "slide1-1";
slideIndex["slide1-1"] = 2;
slideList[3]="slidesetCover2";
slideIndex["slidesetCover2"] = 3;
slideList[4] = "slide2-1";
slideIndex["slide2-1"] = 4;
slideList[5] = "slide2-2";
slideIndex["slide2-2"] = 5;
slideList[6] = "slide2-3";
slideIndex["slide2-3"] = 6;
slideList[7]="slidesetCover3";
slideIndex["slidesetCover3"] = 7;
slideList[8] = "slide3-1";
slideIndex["slide3-1"] = 8;
slideList[9] = "slide3-2";
slideIndex["slide3-2"] = 9;
]]></script>
<defs>
<linearGradient spreadMethod='pad' id='slideBackgroundPaint' x1='0' y2='768' x2='1024' y1='0' gradientUnits='userSpaceOnUse'>
<stop offset='0%' style='stop-color:black; stop-opacity:1;'/>
<stop offset='100%' style='stop-color:rgb(103, 107, 157); stop-opacity:1;'/>
</linearGradient>
<linearGradient spreadMethod='pad' id='slideTitleSeparatorPaint' x1='0' y2='0' x2='1024' y1='0' gradientUnits='userSpaceOnUse'>
<stop offset='0%' style='stop-color:rgb(23, 27, 77); stop-opacity:1;'/>
<stop offset='.5' style='stop-color:rgb(103, 107, 157); stop-opacity:1;'/>
<stop offset='100%' style='stop-color:rgb(23, 27, 77); stop-opacity:1;'/>
</linearGradient>
<linearGradient spreadMethod='pad' id='menuBarPaint' x1='0' y2='0' x2='210' y1='0' gradientUnits='userSpaceOnUse'>
<stop offset='0%' style='stop-color:black; stop-opacity:1;'/>
<stop offset='50%' style='stop-color:rgb(103, 107, 157); stop-opacity:1;'/>
<stop offset='100%' style='stop-color:white; stop-opacity:1;'/>
</linearGradient>
<linearGradient spreadMethod='pad' id='slideBackgroundHeaderPaint' x1='0' y2='100' x2='0' y1='0' gradientUnits='userSpaceOnUse'>
<stop offset='0%' style='stop-color:black; stop-opacity:1;'/>
<stop offset='50%' style='stop-color:rgb(103, 107, 157); stop-opacity:1;'/>
<stop offset='100%' style='stop-color:white; stop-opacity:1;'/>
</linearGradient>
<g id='stripePattern'>
<g style='fill:black; fill-opacity:.25'>
<rect height='2' width='1' y='0'/>
<rect height='2' width='1' y='4'/>
<rect height='2' width='1' y='8'/>
<rect height='2' width='1' y='12'/>
<rect height='2' width='1' y='16'/>
<rect height='2' width='1' y='20'/>
<rect height='2' width='1' y='24'/>
<rect height='2' width='1' y='28'/>
<rect height='2' width='1' y='32'/>
<rect height='2' width='1' y='36'/>
<rect height='2' width='1' y='40'/>
<rect height='2' width='1' y='44'/>
<rect height='2' width='1' y='48'/>
<rect height='2' width='1' y='52'/>
<rect height='2' width='1' y='56'/>
<rect height='2' width='1' y='60'/>
<rect height='2' width='1' y='64'/>
<rect height='2' width='1' y='68'/>
<rect height='2' width='1' y='72'/>
<rect height='2' width='1' y='76'/>
<rect height='2' width='1' y='80'/>
<rect height='2' width='1' y='84'/>
<rect height='2' width='1' y='88'/>
<rect height='2' width='1' y='92'/>
<rect height='2' width='1' y='96'/>
<rect height='2' width='1' y='100'/>
<rect height='2' width='1' y='104'/>
<rect height='2' width='1' y='108'/>
<rect height='2' width='1' y='112'/>
<rect height='2' width='1' y='116'/>
<rect height='2' width='1' y='120'/>
<rect height='2' width='1' y='124'/>
<rect height='2' width='1' y='128'/>
<rect height='2' width='1' y='132'/>
<rect height='2' width='1' y='136'/>
<rect height='2' width='1' y='140'/>
<rect height='2' width='1' y='144'/>
<rect height='2' width='1' y='148'/>
<rect height='2' width='1' y='152'/>
<rect height='2' width='1' y='156'/>
<rect height='2' width='1' y='160'/>
<rect height='2' width='1' y='164'/>
<rect height='2' width='1' y='168'/>
<rect height='2' width='1' y='172'/>
<rect height='2' width='1' y='176'/>
<rect height='2' width='1' y='180'/>
<rect height='2' width='1' y='184'/>
<rect height='2' width='1' y='188'/>
<rect height='2' width='1' y='192'/>
<rect height='2' width='1' y='196'/>
<rect height='2' width='1' y='200'/>
<rect height='2' width='1' y='204'/>
<rect height='2' width='1' y='208'/>
<rect height='2' width='1' y='212'/>
<rect height='2' width='1' y='216'/>
<rect height='2' width='1' y='220'/>
<rect height='2' width='1' y='224'/>
<rect height='2' width='1' y='228'/>
<rect height='2' width='1' y='232'/>
<rect height='2' width='1' y='236'/>
<rect height='2' width='1' y='240'/>
<rect height='2' width='1' y='244'/>
<rect height='2' width='1' y='248'/>
<rect height='2' width='1' y='252'/>
<rect height='2' width='1' y='256'/>
<rect height='2' width='1' y='260'/>
<rect height='2' width='1' y='264'/>
<rect height='2' width='1' y='268'/>
<rect height='2' width='1' y='272'/>
<rect height='2' width='1' y='276'/>
<rect height='2' width='1' y='280'/>
<rect height='2' width='1' y='284'/>
<rect height='2' width='1' y='288'/>
<rect height='2' width='1' y='292'/>
<rect height='2' width='1' y='296'/>
<rect height='2' width='1' y='300'/>
<rect height='2' width='1' y='304'/>
<rect height='2' width='1' y='308'/>
<rect height='2' width='1' y='312'/>
<rect height='2' width='1' y='316'/>
<rect height='2' width='1' y='320'/>
<rect height='2' width='1' y='324'/>
<rect height='2' width='1' y='328'/>
<rect height='2' width='1' y='332'/>
<rect height='2' width='1' y='336'/>
<rect height='2' width='1' y='340'/>
<rect height='2' width='1' y='344'/>
<rect height='2' width='1' y='348'/>
<rect height='2' width='1' y='352'/>
<rect height='2' width='1' y='356'/>
<rect height='2' width='1' y='360'/>
<rect height='2' width='1' y='364'/>
<rect height='2' width='1' y='368'/>
<rect height='2' width='1' y='372'/>
<rect height='2' width='1' y='376'/>
<rect height='2' width='1' y='380'/>
<rect height='2' width='1' y='384'/>
<rect height='2' width='1' y='388'/>
<rect height='2' width='1' y='392'/>
<rect height='2' width='1' y='396'/>
<rect height='2' width='1' y='400'/>
<rect height='2' width='1' y='404'/>
<rect height='2' width='1' y='408'/>
<rect height='2' width='1' y='412'/>
<rect height='2' width='1' y='416'/>
<rect height='2' width='1' y='420'/>
<rect height='2' width='1' y='424'/>
<rect height='2' width='1' y='428'/>
<rect height='2' width='1' y='432'/>
<rect height='2' width='1' y='436'/>
<rect height='2' width='1' y='440'/>
<rect height='2' width='1' y='444'/>
<rect height='2' width='1' y='448'/>
<rect height='2' width='1' y='452'/>
<rect height='2' width='1' y='456'/>
<rect height='2' width='1' y='460'/>
<rect height='2' width='1' y='464'/>
<rect height='2' width='1' y='468'/>
<rect height='2' width='1' y='472'/>
<rect height='2' width='1' y='476'/>
<rect height='2' width='1' y='480'/>
<rect height='2' width='1' y='484'/>
<rect height='2' width='1' y='488'/>
<rect height='2' width='1' y='492'/>
<rect height='2' width='1' y='496'/>
<rect height='2' width='1' y='500'/>
<rect height='2' width='1' y='504'/>
<rect height='2' width='1' y='508'/>
<rect height='2' width='1' y='512'/>
<rect height='2' width='1' y='516'/>
<rect height='2' width='1' y='520'/>
<rect height='2' width='1' y='524'/>
<rect height='2' width='1' y='528'/>
<rect height='2' width='1' y='532'/>
<rect height='2' width='1' y='536'/>
<rect height='2' width='1' y='540'/>
<rect height='2' width='1' y='544'/>
<rect height='2' width='1' y='548'/>
<rect height='2' width='1' y='552'/>
<rect height='2' width='1' y='556'/>
<rect height='2' width='1' y='560'/>
<rect height='2' width='1' y='564'/>
<rect height='2' width='1' y='568'/>
<rect height='2' width='1' y='572'/>
<rect height='2' width='1' y='576'/>
<rect height='2' width='1' y='580'/>
<rect height='2' width='1' y='584'/>
<rect height='2' width='1' y='588'/>
<rect height='2' width='1' y='592'/>
<rect height='2' width='1' y='596'/>
<rect height='2' width='1' y='600'/>
<rect height='2' width='1' y='604'/>
<rect height='2' width='1' y='608'/>
<rect height='2' width='1' y='612'/>
<rect height='2' width='1' y='616'/>
<rect height='2' width='1' y='620'/>
<rect height='2' width='1' y='624'/>
<rect height='2' width='1' y='628'/>
<rect height='2' width='1' y='632'/>
<rect height='2' width='1' y='636'/>
<rect height='2' width='1' y='640'/>
<rect height='2' width='1' y='644'/>
<rect height='2' width='1' y='648'/>
<rect height='2' width='1' y='652'/>
<rect height='2' width='1' y='656'/>
<rect height='2' width='1' y='660'/>
<rect height='2' width='1' y='664'/>
<rect height='2' width='1' y='668'/>
<rect height='2' width='1' y='672'/>
<rect height='2' width='1' y='676'/>
<rect height='2' width='1' y='680'/>
<rect height='2' width='1' y='684'/>
<rect height='2' width='1' y='688'/>
<rect height='2' width='1' y='692'/>
<rect height='2' width='1' y='696'/>
<rect height='2' width='1' y='700'/>
<rect height='2' width='1' y='704'/>
<rect height='2' width='1' y='708'/>
<rect height='2' width='1' y='712'/>
<rect height='2' width='1' y='716'/>
<rect height='2' width='1' y='720'/>
<rect height='2' width='1' y='724'/>
<rect height='2' width='1' y='728'/>
<rect height='2' width='1' y='732'/>
<rect height='2' width='1' y='736'/>
<rect height='2' width='1' y='740'/>
<rect height='2' width='1' y='744'/>
<rect height='2' width='1' y='748'/>
<rect height='2' width='1' y='752'/>
<rect height='2' width='1' y='756'/>
<rect height='2' width='1' y='760'/>
<rect height='2' width='1' y='764'/>
<rect height='2' width='1' y='768'/>
<rect height='2' width='1' y='772'/>
<rect height='2' width='1' y='776'/>
<rect height='2' width='1' y='780'/>
<rect height='2' width='1' y='784'/>
<rect height='2' width='1' y='788'/>
<rect height='2' width='1' y='792'/>
<rect height='2' width='1' y='796'/>
</g>
</g>
<g id='bullet' transform='translate(0, -20)'>
<path style='stroke:white; stroke-width:2; fill:none' d='M0.436,1.418C7.853-1.088,16.396,1.706,19.52,7.658c2.498,4.762-0.287,10.248-6.22,12.252c-4.747,1.604-10.215-0.184-12.213-3.993c-1.599-3.048,0.183-6.559,3.981-7.842c3.038-1.026,6.538,0.118,7.816,2.556 c1.024,1.951-0.117,4.198-2.547,5.019c-1.945,0.657-4.185-0.076-5.003-1.636c-0.655-1.248,0.075-2.686,1.63-3.212c1.245-0.42,2.678,0.048,3.202,1.047'/>
</g>
</defs>
<g id='slideBackground' class='slideBackground'>
<rect height='768' style='fill:black' width='1024' x='0' y='0'/>
<rect height='668' style='fill:url(#menuBarPaint)' width='210' x='0' y='100'/>
<rect height='100' style='fill:url(#slideBackgroundHeaderPaint)' width='1024' x='0' y='0'/>
<use xlink:href='#stripePattern' transform='scale(1024, 1)'/>
<rect height='5' style='fill:url(#slideTitleSeparatorPaint)' width='1024' x='0' y='100'/>
</g>
<g id='navigationGroup' style='fill:white' transform='translate(984, 45) scale(2, 2)'>
<polygon id='prevSlideControl' onclick='onPrevSlide(evt)' onmouseover="onSetFill(evt, 'prevSlideControl', 'rgb(176, 22, 40)')" points='1 10 10 0 1 -10 1 10' onmouseout="onSetFill(evt, 'prevSlideControl', 'white')" transform='rotate(180)'/>
<polygon id='nextSlideControl' onclick='onNextSlide(evt)' onmouseover="onSetFill(evt, 'nextSlideControl', 'rgb(176, 22, 40)')" points='1 10 10 0 1 -10 1 10' onmouseout="onSetFill(evt, 'nextSlideControl', 'white')"/>
</g>
<g id='slideMenu' transform='translate(15, 130)'>
<text onclick='onMenuItemSelected(evt, 1)' class='slidesetMenuHeader' x='0' y='0'>Background and Motivation</text>
<g style='visibility:visible'>
<rect height='5' id='Expand1' x='-10' y='-5' onclick="onExpand(evt, 'slideSetSubmenu1')" style='fill:white' width='5'/>
<rect height='5' id='Collapse1' x='-10' y='-5' onclick="onCollapse(evt, 'slideSetSubmenu1')" style='fill:red; visibility:hidden' width='5'>
<set fill='freeze' attributeType='CSS' attributeName='visibility' dur='0s' to='hidden' begin='Collapse1.click'/>
<set fill='freeze' attributeType='CSS' attributeName='visibility' dur='0s' to='visible' begin='Expand1.click'/>
</rect>
<set fill='freeze' attributeType='CSS' attributeName='visibility' dur='0s' to='visible' begin='Collapse1.click'/>
<set fill='freeze' attributeType='CSS' attributeName='visibility' dur='0s' to='hidden' begin='Expand1.click'/>
</g>
<g style='visibility:hidden' id='slideSetSubmenu1'>
<text id='slide1-1MenuItem' x='10' y='20' onmouseout="onHighlightMenuItem(evt, 'false', 'slide1-1MenuItem')" onclick='onMenuItemSelected(evt, 2)' onmouseover="onHighlightMenuItem(evt, 'true', 'slide1-1MenuItem')" class='slideMenuItem'>Why Yet Another Grap...</text>
</g>
<g transform='translate(0, 20)'>
<g>
<text onclick='onMenuItemSelected(evt, 3)' class='slidesetMenuHeader' x='0' y='0'>The ABCs of SVG</text>
<g style='visibility:visible'>
<rect height='5' id='Expand2' x='-10' y='-5' onclick="onExpand(evt, 'slideSetSubmenu2')" style='fill:white' width='5'/>
<rect height='5' id='Collapse2' x='-10' y='-5' onclick="onCollapse(evt, 'slideSetSubmenu2')" style='fill:red; visibility:hidden' width='5'>
<set fill='freeze' attributeType='CSS' attributeName='visibility' dur='0s' to='hidden' begin='Collapse2.click'/>
<set fill='freeze' attributeType='CSS' attributeName='visibility' dur='0s' to='visible' begin='Expand2.click'/>
</rect>
<set fill='freeze' attributeType='CSS' attributeName='visibility' dur='0s' to='visible' begin='Collapse2.click'/>
<set fill='freeze' attributeType='CSS' attributeName='visibility' dur='0s' to='hidden' begin='Expand2.click'/>
</g>
<g style='visibility:hidden' id='slideSetSubmenu2'>
<text id='slide2-1MenuItem' x='10' y='20' onmouseout="onHighlightMenuItem(evt, 'false', 'slide2-1MenuItem')" onclick='onMenuItemSelected(evt, 4)' onmouseover="onHighlightMenuItem(evt, 'true', 'slide2-1MenuItem')" class='slideMenuItem'>SVG Features</text>
<text id='slide2-2MenuItem' x='10' y='40' onmouseout="onHighlightMenuItem(evt, 'false', 'slide2-2MenuItem')" onclick='onMenuItemSelected(evt, 5)' onmouseover="onHighlightMenuItem(evt, 'true', 'slide2-2MenuItem')" class='slideMenuItem'>SVG Sample Source</text>
<text id='slide2-3MenuItem' x='10' y='60' onmouseout="onHighlightMenuItem(evt, 'false', 'slide2-3MenuItem')" onclick='onMenuItemSelected(evt, 6)' onmouseover="onHighlightMenuItem(evt, 'true', 'slide2-3MenuItem')" class='slideMenuItem'>SVG Sample Output</text>
</g>
<g transform='translate(0, 20)'>
<g>
<text onclick='onMenuItemSelected(evt, 7)' class='slidesetMenuHeader' x='0' y='0'>The SVG Community</text>
<g style='visibility:visible'>
<rect height='5' id='Expand3' x='-10' y='-5' onclick="onExpand(evt, 'slideSetSubmenu3')" style='fill:white' width='5'/>
<rect height='5' id='Collapse3' x='-10' y='-5' onclick="onCollapse(evt, 'slideSetSubmenu3')" style='fill:red; visibility:hidden' width='5'>
<set fill='freeze' attributeType='CSS' attributeName='visibility' dur='0s' to='hidden' begin='Collapse3.click'/>
<set fill='freeze' attributeType='CSS' attributeName='visibility' dur='0s' to='visible' begin='Expand3.click'/>
</rect>
<set fill='freeze' attributeType='CSS' attributeName='visibility' dur='0s' to='visible' begin='Collapse3.click'/>
<set fill='freeze' attributeType='CSS' attributeName='visibility' dur='0s' to='hidden' begin='Expand3.click'/>
</g>
<g style='visibility:hidden' id='slideSetSubmenu3'>
<text id='slide3-1MenuItem' x='10' y='20' onmouseout="onHighlightMenuItem(evt, 'false', 'slide3-1MenuItem')" onclick='onMenuItemSelected(evt, 8)' onmouseover="onHighlightMenuItem(evt, 'true', 'slide3-1MenuItem')" class='slideMenuItem'>Some SVG Resources</text>
<text id='slide3-2MenuItem' x='10' y='40' onmouseout="onHighlightMenuItem(evt, 'false', 'slide3-2MenuItem')" onclick='onMenuItemSelected(evt, 9)' onmouseover="onHighlightMenuItem(evt, 'true', 'slide3-2MenuItem')" class='slideMenuItem'>Quote Them on it</text>
</g>
<animateTransform fill='freeze' id='translator' type='translate' from='0, 0' dur='1s' accumulate='none' attributeName='transform' attributeType='XML' additive='replace' begin='Expand2.click' to='0, 60'/>
<animateTransform fill='freeze' id='translator2' type='translate' from='0, 0' dur='1s' accumulate='sum' attributeName='transform' attributeType='XML' additive='sum' begin='Collapse2.click' to='0, -60'/>
</g>
</g>
<animateTransform fill='freeze' id='translator' type='translate' from='0, 0' dur='1s' accumulate='none' attributeName='transform' attributeType='XML' additive='replace' begin='Expand1.click' to='0, 20'/>
<animateTransform fill='freeze' id='translator2' type='translate' from='0, 0' dur='1s' accumulate='sum' attributeName='transform' attributeType='XML' additive='sum' begin='Collapse1.click' to='0, -20'/>
</g>
</g>
</g>
<g onclick='onNextSlide(evt)' style='visibility:hidden' id='slideShowCover'>
<defs>
<linearGradient spreadMethod='pad' id='backgroundPaint' x1='0' y2='768' x2='0' y1='0' gradientUnits='userSpaceOnUse'>
<stop offset='0%' style='stop-color:black; stop-opacity:1;'/>
<stop offset='25%' style='stop-color:rgb(103, 103, 157); stop-opacity:1;'/>
<stop offset='50%' style='stop-color:white; stop-opacity:1;'/>
<stop offset='75%' style='stop-color:rgb(103, 103, 157); stop-opacity:1;'/>
<stop offset='100%' style='stop-color:black; stop-opacity:1;'/>
</linearGradient>
<filter height='105%' id='dropShadow' filterUnits='objectBoundingBox' x='0%' width='105%' y='0%'>
<feGaussianBlur in='SourceAlpha' result='blur' stdDeviation='4'/>
<feOffset dy='4' dx='4' result='offsetBlur' in='blur'/>
<feFlood style='flood-color:black' result='solidBlack'/>
<feComposite in='solidBlack' in2='SourceAlpha' result='separation' operator='in'/>
<feOffset dy='-1' dx='-1' result='offsetSeparation' in='separation'/>
<feMerge>
<feMergeNode in='offsetBlur'/>
<feMergeNode in='offsetSeparation'/>
<feMergeNode in='SourceGraphic'/>
</feMerge>
</filter>
</defs>
<rect height='768' style='fill:url(#backgroundPaint)' width='1024'/>
<use xlink:href='#stripePattern' transform='scale(1024, 1)'/>
<g style='filter:url(#dropShadow)'>
<text class='slideCoverTitle' style='text-anchor:middle' x='512' y='300'>Introduction to SVG</text>
<g transform='translate(512, 490)' id='metadata' style='text-anchor:middle;'>
<text x='0' class='slideCoverSubTitle' y='0'>Uche Ogbuji</text>
<text x='0' class='slideCoverSubTitle' y='50'>Principal Consultant</text>
<text x='0' class='slideCoverSubTitle' y='100'>Fourthought Inc.</text>
<text x='0' class='slideCoverSubTitle' y='150'>Front Range XML Keiretsu</text>
</g>
</g>
</g>
<g onclick='onNextSlide(evt)' style='visibility:hidden' id='slidesetCover1'>
<rect height='768' style='fill:black' width='1024' x='0' y='0'/>
<rect height='768' style='fill:url(#menuBarPaint)' width='210' x='0' y='0'/>
<g transform='scale(210, 1)'>
<use xlink:href='#stripePattern'/>
</g>
<text x='240' class='slidesetCoverTitle' y='200'>Background and Motivation</text>
</g>
<g onclick='onNextSlide(evt)' style='visibility:hidden' id='slidesetCover2'>
<rect height='768' style='fill:black' width='1024' x='0' y='0'/>
<rect height='768' style='fill:url(#menuBarPaint)' width='210' x='0' y='0'/>
<g transform='scale(210, 1)'>
<use xlink:href='#stripePattern'/>
</g>
<text x='240' class='slidesetCoverTitle' y='200'>The ABCs of SVG</text>
</g>
<g onclick='onNextSlide(evt)' style='visibility:hidden' id='slidesetCover3'>
<rect height='768' style='fill:black' width='1024' x='0' y='0'/>
<rect height='768' style='fill:url(#menuBarPaint)' width='210' x='0' y='0'/>
<g transform='scale(210, 1)'>
<use xlink:href='#stripePattern'/>
</g>
<text x='240' class='slidesetCoverTitle' y='200'>The SVG Community</text>
</g>
<g id='slide1-1' style='visibility:hidden' class='slide'>
<text class='slideTitle' x='30' y='60'>Why Yet Another Graphics Format?</text>
<g><text x="240" y="150" class="itemClass">Leveraging the existing XML technology base</text></g>
<g><text x="240" y="185" class="itemClass">Integrating graphics into the semantic Web</text></g>
<g><text x="240" y="220" class="itemClass">Giving browsers access to image <tspan class='emphasis'>internals</tspan></text></g>
<g><text x="240" y="255" class="itemClass">Supporting the next generation of browsers</text></g>
</g>
<g id='slide2-1' style='visibility:hidden' class='slide'>
<text class='slideTitle' x='30' y='60'>SVG Features</text>
<text x='240' class='headingInline' y='150'>Basic Features</text>
<use class='listBullet' xlink:href='#bullet' x='240' y='185'/>
<g><text x="270" y="185" class="itemClass">Coordinate spaces and transforms</text></g>
<use class='listBullet' xlink:href='#bullet' x='240' y='220'/>
<g><text x="270" y="220" class="itemClass">Graphics primitives: ellipses, polygons, polylines, curves, etc.</text></g>
<use class='listBullet' xlink:href='#bullet' x='240' y='255'/>
<g><text x="270" y="255" class="itemClass">Stylesheets: CSS, XSL, etc.</text></g>
<text x='240' class='headingInline' y='290'>Advanced Features</text>
<use class='listBullet' xlink:href='#bullet' x='240' y='325'/>
<g><text x="270" y="325" class="itemClass">Raster filter effects</text></g>
<use class='listBullet' xlink:href='#bullet' x='240' y='360'/>
<g><text x="270" y="360" class="itemClass">Alpha masking</text></g>
<use class='listBullet' xlink:href='#bullet' x='240' y='395'/>
<g><text x="270" y="395" class="itemClass">Animation</text></g>
<use class='listBullet' xlink:href='#bullet' x='240' y='430'/>
<g><text x="270" y="430" class="itemClass">Zooming and Panning</text></g>
<use class='listBullet' xlink:href='#bullet' x='240' y='465'/>
<g><text x="270" y="465" class="itemClass">Scripting and extensibility</text></g>
</g>
<g id='slide2-2' style='visibility:hidden' class='slide'>
<text class='slideTitle' x='30' y='60'>SVG Sample Source</text>
<text x='240' class='preformattedInline' y='135'>
<?xml version="1.0"?>
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 20000802//EN"
"http://www.w3.org/TR/2000/CR-SVG-20000802/DTD/svg-20000802.dtd"
>
<svg width="800" height="800">
<desc>SVG Sample for SunWorld Article</desc>
<style type="text/css">
.Lagos { fill: white; stroke: green; stroke-width: 30 }
.ViaAppia { fill: none; stroke: black; stroke-width: 10 }
.OrthoLogos { font-size: 32; font-family: helvetica }
</style>
<ellipse transform="translate(500 200)" rx="250" ry="100"
style="fill: brown; stroke: yellow; stroke-width: 10"/>
<polygon transform="translate(100 200) rotate(45)"
class="Lagos"
points="350,75 379,161 469,161 397,215 423,301 350,250 277,
301 303,215 231,161 321,161"/>
<text class="OrthoLogos" x="400" y="400">TO KALON</text>
<path class="ViaAppia" d="M500,600 C500,500 650,500 650,600
S800,700 800,600"/>
</svg>
</text>
</g>
<g id='slide2-3' style='visibility:hidden' class='slide'>
<text class='slideTitle' x='30' y='60'>SVG Sample Output</text>
<g transform='translate(240, 135)'>
<svg height='10cm' width='10cm' viewBox='0 0 200 200'>
<desc>SVG Sample for SunWorld Article</desc>
<style type='text/css'>
.Lagos { fill: white; stroke: green; stroke-width: 30 }
.ViaAppia { fill: none; stroke: white; stroke-width: 10 }
.OrthoLogos { font-size: 32; font-family: helvetica; fill:white }
</style>
<ellipse transform='translate(500 200)' ry='100' rx='250' style='fill: brown; stroke: yellow; stroke-width: 10'/>
<polygon points='350,75 379,161 469,161 397,215 423,301 350,250 277, 301 303,215 231,161 321,161' transform='translate(100 200) rotate(45)' class='Lagos'/>
<text class='OrthoLogos' x='400' y='400'>TO KALON</text>
<path class='ViaAppia' d='M500,600 C500,500 650,500 650,600 S800,700 800,600'/>
</svg>
</g>
</g>
<g id='slide3-1' style='visibility:hidden' class='slide'>
<text class='slideTitle' x='30' y='60'>Some SVG Resources</text>
<g><text x="240" y="150" class="itemClass"><tspan class='linkStyle'>The W3C's SVG Page</tspan></text></g>
<g><text x="240" y="185" class="itemClass"><tspan class='linkStyle'>OpenDirectory SVG Links</tspan></text></g>
<g><text x="240" y="220" class="itemClass"><tspan class='linkStyle'>How to make slides like these</tspan></text></g>
</g>
<g id='slide3-2' style='visibility:hidden' class='slide'>
<text class='slideTitle' x='30' y='60'>Quote Them on it</text>
<text x='240' class='paraInline' y='150'>"Over twenty organizations, including Sun Microsystems, Adobe, Apple, IBM, and Kodak, have been involved in defining SVG."<tspan class='emphasis'> -- Vincent J. Hardy, Sun</tspan>
</text>
<text x='240' class='paraInline' y='185'>"I have been working with computer graphics for
over 25 years and split an immense amount of blood on the floor at
midnight. With SVG I can now do almost anything I want [except for 3D - in
which I also have a molecular interest]. And I suspect that I can stick
with it for the foreseeable future." <tspan class='emphasis'>-- Peter Murray-Rust, XML-DEV Founder</tspan>
</text>
<text x='240' class='paraInline' y='220'>"I envision a day where we have XHTML Web pages with SVG as the "chrome" of our interfaces--defining the buttons, the layers, the coloring, and the grid--where we can actually use a language that's XML-based rather than theses separate GIF files that can take so long to download. That's certainly one vision; that vision not just extending on the Web, on a monitor, but wireless onto my Palm Pilot or to print and other output as well." <tspan class='emphasis'>-- Steve Mulder, Razorfish</tspan>
</text>
</g>
</svg>"""
#"'
expected_1="""
<svg/>"""
| 51.010234 | 541 | 0.567367 |
42646da758d7d00689423c6bb8d4edd633b50938 | 232 | py | Python | src/2/2338.py | youngdaLee/Baekjoon | 7d858d557dbbde6603fe4e8af2891c2b0e1940c0 | [
"MIT"
] | 11 | 2020-09-20T15:17:11.000Z | 2022-03-17T12:43:33.000Z | src/2/2338.py | youngdaLee/Baekjoon | 7d858d557dbbde6603fe4e8af2891c2b0e1940c0 | [
"MIT"
] | 3 | 2021-10-30T07:51:36.000Z | 2022-03-09T05:19:23.000Z | src/2/2338.py | youngdaLee/Baekjoon | 7d858d557dbbde6603fe4e8af2891c2b0e1940c0 | [
"MIT"
] | 13 | 2021-01-21T03:19:08.000Z | 2022-03-28T10:44:58.000Z | """
2338.
: xCrypt0r
: Python 3
: 29,380 KB
: 72 ms
: 2020 9 13
"""
if __name__ == '__main__':
main()
| 12.888889 | 40 | 0.538793 |
4264be58cf46729f9ccb094d1db453583943d301 | 2,952 | py | Python | tests/ut/python/nn/test_activation.py | PowerOlive/mindspore | bda20724a94113cedd12c3ed9083141012da1f15 | [
"Apache-2.0"
] | 3,200 | 2020-02-17T12:45:41.000Z | 2022-03-31T20:21:16.000Z | tests/ut/python/nn/test_activation.py | zimo-geek/mindspore | 665ec683d4af85c71b2a1f0d6829356f2bc0e1ff | [
"Apache-2.0"
] | 176 | 2020-02-12T02:52:11.000Z | 2022-03-28T22:15:55.000Z | tests/ut/python/nn/test_activation.py | zimo-geek/mindspore | 665ec683d4af85c71b2a1f0d6829356f2bc0e1ff | [
"Apache-2.0"
] | 621 | 2020-03-09T01:31:41.000Z | 2022-03-30T03:43:19.000Z | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
""" test Activations """
import numpy as np
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.common.api import _cell_graph_executor
from ..ut_filter import non_graph_engine
class LogSoftmaxNet(nn.Cell):
def test_compile_relu():
net = Net1()
input_data = Tensor(np.array([[1.2, 2.1], [2.2, 3.2]], dtype=np.float32))
_cell_graph_executor.compile(net, input_data)
| 27.333333 | 80 | 0.661247 |
42667a983dfb48f00077636f4ff9f6c3c1fe62f9 | 743 | py | Python | sdk/python/tests/integration/feature_repos/universal/data_source_creator.py | marsishandsome/feast | 998e16945da240bfa73570cdb2c5e3639f892d34 | [
"Apache-2.0"
] | 1 | 2021-09-16T16:17:58.000Z | 2021-09-16T16:17:58.000Z | sdk/python/tests/integration/feature_repos/universal/data_source_creator.py | marsishandsome/feast | 998e16945da240bfa73570cdb2c5e3639f892d34 | [
"Apache-2.0"
] | null | null | null | sdk/python/tests/integration/feature_repos/universal/data_source_creator.py | marsishandsome/feast | 998e16945da240bfa73570cdb2c5e3639f892d34 | [
"Apache-2.0"
] | null | null | null | from abc import ABC, abstractmethod
from typing import Dict
import pandas as pd
from feast.data_source import DataSource
from feast.repo_config import FeastConfigBaseModel
| 22.515152 | 69 | 0.664872 |
42683ff20338aa58755c4a687ba9b5618ac5ee33 | 1,393 | py | Python | tests/interpreter/expression/var_assignment_interpreter_test.py | OtavioHenrique/yalul | ce99e32365ed5607527b9f2f39705ad5d9e20ba2 | [
"MIT"
] | 1 | 2021-04-01T20:22:36.000Z | 2021-04-01T20:22:36.000Z | tests/interpreter/expression/var_assignment_interpreter_test.py | OtavioHenrique/yalul | ce99e32365ed5607527b9f2f39705ad5d9e20ba2 | [
"MIT"
] | 1 | 2020-11-20T22:24:38.000Z | 2020-11-20T22:24:38.000Z | tests/interpreter/expression/var_assignment_interpreter_test.py | OtavioHenrique/yalul | ce99e32365ed5607527b9f2f39705ad5d9e20ba2 | [
"MIT"
] | null | null | null | from yalul.interpreters.environment import Environment
from yalul.interpreters.expressions.var_assignment_interpreter import VarAssignmentInterpreter
from yalul.interpreters.interpreter_errors import InterpreterErrors
| 34.825 | 116 | 0.676238 |
4268f94ca522ab0b564db536a3198008325ec23d | 2,547 | py | Python | backend/externals/events.py | crosspower/naruko | 4c524e2ef955610a711830bc86d730ffe4fc2bd8 | [
"MIT"
] | 17 | 2019-01-23T04:37:43.000Z | 2019-10-15T01:42:31.000Z | backend/externals/events.py | snickerjp/naruko | 4c524e2ef955610a711830bc86d730ffe4fc2bd8 | [
"MIT"
] | 1 | 2019-01-23T08:04:44.000Z | 2019-01-23T08:44:33.000Z | backend/externals/events.py | snickerjp/naruko | 4c524e2ef955610a711830bc86d730ffe4fc2bd8 | [
"MIT"
] | 6 | 2019-01-23T09:10:59.000Z | 2020-12-02T04:15:41.000Z | import boto3
from django.conf import settings
from backend.models import CloudWatchEvent
import json
| 28.617978 | 81 | 0.568512 |
426a3bed4febe19951912ab6a1ea3a6374609094 | 356 | py | Python | eg/deparse/example.py | KennethBlaney/rivescript-python | 87db472847ab526060afd9a5b8548e9689501a85 | [
"MIT"
] | null | null | null | eg/deparse/example.py | KennethBlaney/rivescript-python | 87db472847ab526060afd9a5b8548e9689501a85 | [
"MIT"
] | null | null | null | eg/deparse/example.py | KennethBlaney/rivescript-python | 87db472847ab526060afd9a5b8548e9689501a85 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# Manipulate sys.path to be able to import converscript from this local git
# repository.
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), "..", ".."))
from converscript import RiveScript
import json
bot = RiveScript()
bot.load_file("example.rive")
dep = bot.deparse()
print(json.dumps(dep, indent=2))
| 20.941176 | 75 | 0.735955 |
426a6f57e84f4626e97b52d506e5d77552f5cfca | 2,715 | py | Python | figuras/PycharmKayStatisticalReport/example_8_11.py | bor9/estudiando_el_kay | 6e07908b8b0b5a5166dadce30001e6100e8304c3 | [
"MIT"
] | null | null | null | figuras/PycharmKayStatisticalReport/example_8_11.py | bor9/estudiando_el_kay | 6e07908b8b0b5a5166dadce30001e6100e8304c3 | [
"MIT"
] | null | null | null | figuras/PycharmKayStatisticalReport/example_8_11.py | bor9/estudiando_el_kay | 6e07908b8b0b5a5166dadce30001e6100e8304c3 | [
"MIT"
] | 1 | 2021-11-02T05:27:27.000Z | 2021-11-02T05:27:27.000Z | import matplotlib.pyplot as plt
import numpy as np
from scipy import signal, linalg
from matplotlib import rc
from matplotlib import rcParams
__author__ = 'ernesto'
# if use latex or mathtext
rc('text', usetex=True)
rcParams['text.latex.preamble'] = [r"\usepackage{amsmath}"]
#respuesta al impulso deseada: sinc
N = 50 # numero par
fc = 0.1
nf = 1024
n = np.arange(-N/2, N/2+1)
N += 1
f = np.arange(nf)/(2 * nf)
# parmetros del filtro a disear
p = 10
q = 10
# respuesta al impulso
hd = 2 * fc * np.sinc(2 * fc * n) # * np.hanning(N)
# respuesta en frecuencia
_, Hd = signal.freqz(hd, a=1, worN=nf, whole=False, plot=None)
# estimacin de los coeficientes del denominador (a)
# hd = np.arange(N)
x = hd[q + 1:]
H = linalg.toeplitz(hd[q: N - 1], hd[q: q - p: -1])
# a_est = np.linalg.solve(H.T @ H, -H.T @ x)
epsilon = 1e-16
#epsilon = 0
a_est = linalg.solve(H.T @ H + epsilon * np.eye(p), -H.T @ x)
print("Nmero de Condicin 1: {}".format(np.linalg.cond(H.T @ H)))
h = hd[: q + 1]
H0 = linalg.toeplitz(np.concatenate(([0], hd[: q])), np.zeros((p, )))
b_est = h + H0 @ a_est
#print(h)
#print(H0)
# respuesta en frecuencia
a_est = np.concatenate(([1], a_est))
print(a_est)
print(b_est)
_, H_est = signal.freqz(b_est, a_est, worN=nf, whole=False, plot=None)
# respuesta al impulso
delta = np.zeros((N,))
delta[0] = 1
h_est = signal.lfilter(b_est, a_est, delta, axis=- 1, zi=None)
ms = 3
fs = 12
n = np.arange(N)
fig = plt.figure(0, figsize=(9, 5), frameon=False)
ax = plt.subplot2grid((8, 2), (0, 0), rowspan=6, colspan=1)
plt.xlim(0, N-1)
plt.ylim(np.amin(hd)-0.02, np.amax(hd)+0.02)
plt.plot(n, hd, linestyle='-', marker='s', color='k', markersize=ms, lw=1, label='${\\rm deseada}$')
plt.plot(n, h_est, linestyle='-', marker='s', color='r', markersize=ms, lw=1, label='${\\rm estimada}$')
leg = plt.legend(loc=1, frameon=False, fontsize=fs)
ax.set_xticklabels([])
ax.set_ylabel('${\\rm Respuesta\;al\;impulso}$', fontsize=fs)
ax = plt.subplot2grid((8, 2), (6, 0), rowspan=2, colspan=1)
e = hd-h_est
plt.xlim(0, N-1)
plt.ylim(np.amin(e)-0.001, np.amax(e)+0.001)
plt.plot(n, e, linestyle='-', marker='s', color='k', markersize=ms)
ax.set_xlabel(r'$n$', fontsize=fs)
ax.set_ylabel(r'$\epsilon[n]$', fontsize=fs)
ax = plt.subplot2grid((8, 2), (0, 1), rowspan=8, colspan=1)
plt.xlim(0, 0.5)
plt.ylim(-55, 8)
plt.plot(f, 10 * np.log10(np.abs(Hd)), 'k', label='${\\rm deseada}$')
plt.plot(f, 10 * np.log10(np.abs(H_est)), 'r', label='${\\rm estimada}$')
ax.set_xlabel('${\\rm Frecuencia\;normalizada}$', fontsize=fs)
ax.set_ylabel('${\\rm Respuesta\;en\;frecuencia\;(dB)}$', fontsize=fs)
leg = plt.legend(loc=1, frameon=False, fontsize=fs)
plt.savefig('example_8_11.pdf', bbox_inches='tight')
plt.show() | 29.835165 | 104 | 0.64825 |
426b013c87350379997d161bc0ecdefe4dd2b27e | 19,353 | py | Python | src/robotide/ui/treenodehandlers.py | crylearner/RIDE3X | 767f45b0c908f18ecc7473208def8dc7489f43b0 | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2017-08-20T14:46:02.000Z | 2017-08-20T14:46:02.000Z | src/robotide/ui/treenodehandlers.py | crylearner/RIDE3X | 767f45b0c908f18ecc7473208def8dc7489f43b0 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | src/robotide/ui/treenodehandlers.py | crylearner/RIDE3X | 767f45b0c908f18ecc7473208def8dc7489f43b0 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import wx
from robotide.controller.commands import (
RenameKeywordOccurrences, RemoveMacro, AddKeyword, AddTestCase, RenameTest,
CopyMacroAs, AddVariable, UpdateVariableName, RenameFile,
RenameResourceFile, DeleteFile, SortKeywords, Include, Exclude)
from robotide.controller.settingcontrollers import VariableController
from robotide.controller.macrocontrollers import (
TestCaseController, UserKeywordController)
from robotide.controller.filecontrollers import (
TestDataDirectoryController, ResourceFileController,
TestCaseFileController, ExcludedDirectoryController,
DirtyRobotDataException)
from robotide.editor.editordialogs import (
TestCaseNameDialog, UserKeywordNameDialog, ScalarVariableDialog,
ListVariableDialog, CopyUserKeywordDialog, DictionaryVariableDialog)
from robotide.publish import RideOpenVariableDialog
from robotide.ui.progress import LoadProgressObserver
from robotide.usages.UsageRunner import Usages, ResourceFileUsages
from .filedialogs import (
AddSuiteDialog, AddDirectoryDialog, ChangeFormatDialog, NewResourceDialog,
RobotFilePathDialog)
from robotide.utils import overrides
from robotide.widgets import PopupMenuItems
from .progress import RenameProgressObserver
from .resourcedialogs import ResourceRenameDialog, ResourceDeleteDialog
from robotide.ui.resourcedialogs import FolderDeleteDialog
class _CanBeRenamed(object):
class DirectoryHandler(_ActionHandler):
is_draggable = False
is_test_suite = False
can_be_rendered = False
_actions = [_ActionHandler._label_new_resource]
class TestDataHandler(_ActionHandler):
accepts_drag = lambda self, dragged: \
(isinstance(dragged, UserKeywordHandler) or
isinstance(dragged, VariableHandler))
is_draggable = False
is_test_suite = True
def has_been_modified_on_disk(self):
return self.item.has_been_modified_on_disk()
def do_drop(self, item):
self.controller.add_test_or_keyword(item)
def rename(self, new_name):
return False
def OnSortKeywords(self, event):
"""Sorts the keywords inside the treenode"""
self.controller.execute(SortKeywords())
def _rename_command(self, label):
raise NotImplementedError(self.__class__)
def _set_node_label(self, label):
self._tree.SetItemText(self._node, label)
class ResourceFileHandler(_FileHandlerThanCanBeRenamed, TestDataHandler):
is_test_suite = False
_actions = [_ActionHandler._label_new_user_keyword,
_ActionHandler._label_new_scalar,
_ActionHandler._label_new_list_variable,
_ActionHandler._label_new_dict_variable,
'---',
_ActionHandler._label_rename,
_ActionHandler._label_change_format,
_ActionHandler._label_sort_keywords,
_ActionHandler._label_find_usages,
_ActionHandler._label_delete]
class TestCaseFileHandler(_FileHandlerThanCanBeRenamed, TestDataHandler):
accepts_drag = lambda *args: True
_actions = [_ActionHandler._label_new_test_case,
_ActionHandler._label_new_user_keyword,
_ActionHandler._label_new_scalar,
_ActionHandler._label_new_list_variable,
_ActionHandler._label_new_dict_variable,
'---',
_ActionHandler._label_rename,
_ActionHandler._label_change_format,
_ActionHandler._label_sort_keywords,
_ActionHandler._label_delete,
'---',
_ActionHandler._label_select_all,
_ActionHandler._label_deselect_all,
_ActionHandler._label_select_failed_tests,
_ActionHandler._label_select_passed_tests
]
| 33.598958 | 80 | 0.661965 |