hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
834cfa268aa66defcd3a6263fa4f402f1287f7c1 | 2,732 | py | Python | azplugins/test-py/test_analyze_group_velocity.py | mphoward/azplugins | a4e3f92090dea78645b4e84cda96709cc9372ffa | [
"BSD-3-Clause"
] | 10 | 2019-02-27T16:13:33.000Z | 2020-02-21T01:07:08.000Z | azplugins/test-py/test_analyze_group_velocity.py | mphoward/azplugins | a4e3f92090dea78645b4e84cda96709cc9372ffa | [
"BSD-3-Clause"
] | 18 | 2019-02-26T17:22:15.000Z | 2020-04-22T20:20:43.000Z | azplugins/test-py/test_analyze_group_velocity.py | mphoward/azplugins | a4e3f92090dea78645b4e84cda96709cc9372ffa | [
"BSD-3-Clause"
] | 3 | 2019-06-18T18:15:42.000Z | 2020-02-21T01:07:16.000Z | # Copyright (c) 2018-2020, Michael P. Howard
# Copyright (c) 2021-2022, Auburn University
# This file is part of the azplugins project, released under the Modified BSD License.
import hoomd
from hoomd import md
hoomd.context.initialize()
try:
from hoomd import azplugins
except ImportError:
import azplugins
import unittest
import numpy as np
if __name__ == '__main__':
unittest.main(argv = ['test.py', '-v'])
| 39.028571 | 112 | 0.63287 |
83516db70951f52393c19b6cbc942b802e4f1c1e | 1,310 | py | Python | tests/test_settings.py | jpadilla/apistar | 3e0faafd0d6a7c59e2b7a1e3017e15d005c5cc3a | [
"BSD-3-Clause"
] | 1 | 2021-07-07T13:14:20.000Z | 2021-07-07T13:14:20.000Z | tests/test_settings.py | jpadilla/apistar | 3e0faafd0d6a7c59e2b7a1e3017e15d005c5cc3a | [
"BSD-3-Clause"
] | null | null | null | tests/test_settings.py | jpadilla/apistar | 3e0faafd0d6a7c59e2b7a1e3017e15d005c5cc3a | [
"BSD-3-Clause"
] | null | null | null | from apistar import App, Route, TestClient
from apistar.settings import Setting, Settings
routes = [
Route('/settings/', 'GET', get_settings),
Route('/setting/', 'GET', get_setting),
]
settings = {
'ABC': 123,
'XYZ': 456
}
app = App(routes=routes, settings=settings)
client = TestClient(app)
| 21.129032 | 53 | 0.61145 |
8351e3e4666f0e2916bbcd985c19442107e57895 | 1,825 | py | Python | api/views/reminder_views.py | OlegKlimenko/Plamber | a3536b864d05abb6b6bba0f2971ab4b7b9c60db6 | [
"Apache-2.0"
] | 13 | 2017-03-30T12:19:35.000Z | 2019-12-09T03:15:22.000Z | api/views/reminder_views.py | OlegKlimenko/Plamber | a3536b864d05abb6b6bba0f2971ab4b7b9c60db6 | [
"Apache-2.0"
] | 213 | 2017-02-18T11:48:40.000Z | 2022-03-11T23:20:36.000Z | api/views/reminder_views.py | OlegKlimenko/Plamber | a3536b864d05abb6b6bba0f2971ab4b7b9c60db6 | [
"Apache-2.0"
] | 3 | 2018-06-17T11:54:49.000Z | 2019-10-22T16:19:28.000Z | # -*- coding: utf-8 -*-
from django.shortcuts import get_object_or_404
from rest_framework import status
from rest_framework.decorators import api_view
from rest_framework.response import Response
from ..serializers.request_serializers import GetReminderRequest, UpdateReminderRequest
from ..utils import invalid_data_response, validate_api_secret_key
from app.models import TheUser
# ----------------------------------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------------------
| 35.096154 | 120 | 0.61863 |
8352e7131c0b9ae8dac198efa815d926f7e58c34 | 2,815 | py | Python | anaf/documents/migrations/0001_initial.py | tovmeod/anaf | 80e4a00532ce6f4ce76c5ffc858ff90c759a9879 | [
"BSD-3-Clause"
] | 2 | 2016-03-15T13:17:26.000Z | 2017-03-22T15:39:01.000Z | anaf/documents/migrations/0001_initial.py | tovmeod/anaf | 80e4a00532ce6f4ce76c5ffc858ff90c759a9879 | [
"BSD-3-Clause"
] | 4 | 2021-03-19T21:42:58.000Z | 2022-03-11T23:13:07.000Z | anaf/documents/migrations/0001_initial.py | tovmeod/anaf | 80e4a00532ce6f4ce76c5ffc858ff90c759a9879 | [
"BSD-3-Clause"
] | 4 | 2016-08-31T16:55:41.000Z | 2020-04-22T18:48:54.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import anaf.documents.models
import anaf.documents.files
| 36.558442 | 143 | 0.54032 |
83538fea40955920ad2d4b405a77ec50fa91b2b3 | 8,188 | py | Python | mbpo/models/utils.py | anyboby/ConstrainedMBPO | 036f4ffefc464e676a287c35c92cc5c0b8925fcf | [
"MIT"
] | 5 | 2020-02-12T17:09:09.000Z | 2021-09-29T16:06:40.000Z | mbpo/models/utils.py | anyboby/ConstrainedMBPO | 036f4ffefc464e676a287c35c92cc5c0b8925fcf | [
"MIT"
] | 10 | 2020-08-31T02:50:02.000Z | 2022-02-09T23:36:43.000Z | mbpo/models/utils.py | anyboby/ConstrainedMBPO | 036f4ffefc464e676a287c35c92cc5c0b8925fcf | [
"MIT"
] | 2 | 2022-03-15T01:45:26.000Z | 2022-03-15T06:46:47.000Z | from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import tensorflow as tf
import numpy as np
EPS = 1e-10
def gaussian_kl_np(mu0, log_std0, mu1, log_std1):
"""interprets each entry in mu_i and log_std_i as independent,
preserves shape
output clipped to {0, 1e10}
"""
var0, var1 = np.exp(2 * log_std0), np.exp(2 * log_std1)
pre_sum = 0.5*(((mu1- mu0)**2 + var0)/(var1+EPS) - 1) + log_std1 - log_std0
all_kls = pre_sum
#all_kls = np.mean(all_kls)
all_kls = np.clip(all_kls, 0, 1/EPS) ### for stability
return all_kls
def average_dkl(mu, std):
"""
Calculates the average kullback leiber divergences of multiple univariate gaussian distributions.
K(P1,Pk) = 1/(k(k1)) _[k_(i,j)=1] DKL(Pi||Pj)
(Andrea Sgarro, Informational divergence and the dissimilarity of probability distributions.)
expects the distributions along axis 0, and samples along axis 1.
Output is reduced by axis 0
Args:
mu: array-like means
std: array-like stds
"""
## clip log
log_std = np.log(std)
log_std = np.clip(log_std, -100, 1e8)
assert len(mu.shape)>=2 and len(log_std.shape)>=2
num_models = len(mu)
d_kl = None
for i in range(num_models):
for j in range(num_models):
if d_kl is None:
d_kl = gaussian_kl_np(mu[i], log_std[i], mu[j], log_std[j])
else: d_kl+= gaussian_kl_np(mu[i], log_std[i], mu[j], log_std[j])
d_kl = d_kl/(num_models*(num_models-1)+EPS)
return d_kl
def median_dkl(mu, std):
"""
Calculates the median kullback leiber divergences of multiple univariate gaussian distributions.
K(P1,Pk) = 1/(k(k1)) _[k_(i,j)=1] DKL(Pi||Pj)
(Andrea Sgarro, Informational divergence and the dissimilarity of probability distributions.)
expects the distributions along axis 0, and samples along axis 1.
Output is reduced by axis 0
Args:
mu: array-like means
std: array-like stds
"""
## clip log
log_std = np.log(std)
log_std = np.clip(log_std, -100, 1e8)
assert len(mu.shape)>=2 and len(log_std.shape)>=2
num_models = len(mu)
d_kl = np.zeros(shape=(num_models*(num_models-1),) + mu.shape[1:])
n = 0
for i in range(num_models):
for j in range(num_models):
if i != j:
d_kl[n] = gaussian_kl_np(mu[i], log_std[i], mu[j], log_std[j])
n += 1
d_kl_med = np.median(d_kl, axis=0)
return d_kl_med
| 34.116667 | 105 | 0.624695 |
8353ff3c9e015b9857f33992c111e498b4c778a1 | 6,391 | py | Python | ext/generate-models.py | gerhardtdatsomor/pytest-nunit | 8e27275337af3216a3bddc16e793ee9637902361 | [
"MIT"
] | null | null | null | ext/generate-models.py | gerhardtdatsomor/pytest-nunit | 8e27275337af3216a3bddc16e793ee9637902361 | [
"MIT"
] | null | null | null | ext/generate-models.py | gerhardtdatsomor/pytest-nunit | 8e27275337af3216a3bddc16e793ee9637902361 | [
"MIT"
] | null | null | null | """
A script for generating attrs-models from an XSD.
Built especially for this model. But feel-free to reuse elsewhere.
Licensed under MIT.
Written by Anthony Shaw.
"""
import logging
import xmlschema
import xmlschema.qnames
try:
import black
import click
except ImportError:
print("Install black and click before use.")
logging.basicConfig()
log = logging.getLogger("__name__")
log.setLevel(logging.DEBUG)
# Python reserved keywords. TODO : Sure this is in stdlib somewhere? maybe tokens
KEYWORDS = ["id", "type", "class", "if", "else", "and", "for", "not", "or", "filter"]
# Map XML atomic builtin types to Python std types
XS_ATOMIC_MAP = {
xmlschema.qnames.XSD_STRING: "str",
xmlschema.qnames.XSD_INTEGER: "int",
xmlschema.qnames.XSD_INT: "int",
xmlschema.qnames.XSD_BOOLEAN: "bool",
xmlschema.qnames.XSD_DECIMAL: "float",
}
# Make an Attrs attr.ib from an Element.
def make_attrib(attrib, type_, optional=False):
"""
Make attrs attribute from XmlAttribute
:return: `str`
"""
args = ["metadata={\"name\": '%s', \"type\": '%s', \"optional\": %s}" % (attrib.name, type_, optional)]
# Put type hints on XSD atomic types
if isinstance(attrib.type, xmlschema.validators.XsdAtomicBuiltin):
_atomic_type = XS_ATOMIC_MAP.get(attrib.type.name, "object")
args.append("type={0}".format(_atomic_type))
if hasattr(attrib, "use") and attrib.use == "required":
args.append(
"validator=attr.validators.instance_of({0})".format(_atomic_type)
)
elif isinstance(attrib.type, xmlschema.validators.XsdAtomicRestriction):
if hasattr(attrib, "use") and attrib.use == "required":
# If type is an enumeration facet
if (
attrib.type.facets
and xmlschema.qnames.XSD_ENUMERATION in attrib.type.facets
and attrib.type.name
):
args.append(
"validator=attr.validators.in_({0})".format(attrib.type.name)
)
# If simple restriction type, use the base type instead (this isn't java)
elif attrib.type.base_type.name in (XS_ATOMIC_MAP.keys()):
args.append(
"validator=attr.validators.instance_of({0})".format(
XS_ATOMIC_MAP.get(attrib.type.base_type.name, "object")
)
)
else:
args.append(
"validator=attr.validators.instance_of({0})".format(
attrib.type.name
)
)
elif isinstance(attrib.type, xmlschema.validators.XsdComplexType):
args.append("type='{0}'".format(attrib.type.name))
# args.append('validator=attr.validators.instance_of({0})'.format(attrib.type.name))
if hasattr(attrib, "use") and attrib.use == "optional":
optional = True
if optional:
args.append("default=attr.NOTHING")
name = attrib.name.replace("-", "_")
if name in KEYWORDS:
name = name + "_"
return "{0} = attr.ib({1})".format(name, ", ".join(args))
if __name__ == "__main__":
main()
| 34.923497 | 124 | 0.55985 |
8354f3e967b4c8a5432e55702c43dd8c0b61efde | 415 | py | Python | OrderService/Order/migrations/0003_order_payment_details.py | surajkendhey/Kart | 458bee955d1569372fc8b3facb2602063a6ec6f5 | [
"Apache-2.0"
] | null | null | null | OrderService/Order/migrations/0003_order_payment_details.py | surajkendhey/Kart | 458bee955d1569372fc8b3facb2602063a6ec6f5 | [
"Apache-2.0"
] | null | null | null | OrderService/Order/migrations/0003_order_payment_details.py | surajkendhey/Kart | 458bee955d1569372fc8b3facb2602063a6ec6f5 | [
"Apache-2.0"
] | null | null | null | # Generated by Django 3.1.2 on 2020-10-18 09:41
from django.db import migrations
import jsonfield.fields
| 20.75 | 59 | 0.621687 |
8355344375b34bffec11d50ae6b74005d1c2e2fb | 1,166 | py | Python | src/deepproblog/examples/Forth/Sort/sort.py | vossenwout/gtadeepproblog | 65509b740518af422b96e84ef10716e0ac246e75 | [
"Apache-2.0"
] | 54 | 2021-06-23T08:03:23.000Z | 2022-03-10T01:02:43.000Z | src/deepproblog/examples/Forth/Sort/sort.py | Damzwan/deepproblog | 56bcf5208e79c17510b5d288068fabc6cd64f3cf | [
"Apache-2.0"
] | 2 | 2021-06-30T23:48:25.000Z | 2022-03-18T10:45:05.000Z | src/deepproblog/examples/Forth/Sort/sort.py | Damzwan/deepproblog | 56bcf5208e79c17510b5d288068fabc6cd64f3cf | [
"Apache-2.0"
] | 12 | 2021-06-30T10:47:52.000Z | 2022-03-09T23:51:48.000Z | import torch
from deepproblog.dataset import DataLoader, QueryDataset
from deepproblog.engines import ExactEngine
from deepproblog.evaluate import get_confusion_matrix
from deepproblog.examples.Forth import EncodeModule
from deepproblog.model import Model
from deepproblog.network import Network
from deepproblog.train import train_model
train = 2
test = 8
train_queries = QueryDataset("data/train{}_test{}_train.txt".format(train, test))
dev_queries = QueryDataset("data/train{}_test{}_dev.txt".format(train, test))
test_queries = QueryDataset("data/train{}_test{}_test.txt".format(train, test))
fc1 = EncodeModule(20, 20, 2)
model = Model(
"compare.pl",
[Network(fc1, "swap_net", optimizer=torch.optim.Adam(fc1.parameters(), 1.0))],
)
model.set_engine(ExactEngine(model), cache=True)
test_model = Model("compare.pl", [Network(fc1, "swap_net", k=1)])
test_model.set_engine(ExactEngine(test_model), cache=False)
train_obj = train_model(
model,
DataLoader(train_queries, 16),
40,
log_iter=50,
test_iter=len(train_queries),
test=lambda x: [
("Accuracy", get_confusion_matrix(test_model, dev_queries).accuracy())
],
)
| 29.897436 | 82 | 0.752144 |
835b2ca04f52a867e7d976e3a7d13af46d16320d | 624 | py | Python | adapters/base_adapter.py | juangallostra/moonboard | d4a35857d480ee4bed06faee44e0347e1070b6b8 | [
"MIT"
] | null | null | null | adapters/base_adapter.py | juangallostra/moonboard | d4a35857d480ee4bed06faee44e0347e1070b6b8 | [
"MIT"
] | null | null | null | adapters/base_adapter.py | juangallostra/moonboard | d4a35857d480ee4bed06faee44e0347e1070b6b8 | [
"MIT"
] | null | null | null | from models.problem import Problem
| 31.2 | 86 | 0.658654 |
835d3a5a9f1f473d9972b7066265ae37781c89a5 | 7,439 | py | Python | meteo_inversion_matrix.py | yandex-research/classification-measures | 210fbc107d5f41e64cc4e6990f0b970973d25995 | [
"Apache-2.0"
] | 6 | 2021-12-07T03:15:03.000Z | 2022-02-10T20:39:44.000Z | meteo_inversion_matrix.py | yandex-research/classification-measures | 210fbc107d5f41e64cc4e6990f0b970973d25995 | [
"Apache-2.0"
] | null | null | null | meteo_inversion_matrix.py | yandex-research/classification-measures | 210fbc107d5f41e64cc4e6990f0b970973d25995 | [
"Apache-2.0"
] | null | null | null | from glob import glob
from collections import defaultdict, Counter
import sys
import math
import numpy as np
import random
random.seed(42)
EPS = 1e-5
if len(sys.argv)<2 or sys.argv[1] not in ('10m', '2h'):
use_fcs = list(range(12))
elif sys.argv[1] == '10m':
use_fcs = (0,)
else:
use_fcs = (11,)
metrics_impl = dict([
('f1', lambda tp, fn, fp, tn: (2*tp)/(2*tp+fp+fn)),
('jaccard', lambda tp, fn, fp, tn: tp/(tp+fp+fn)),
('ba', lambda tp, fn, fp, tn: ((tp)/(tp+fn)+(tn)/(tn+fp))/2.),
('acc', lambda tp, fn, fp, tn: (tp+tn)/(tp+tn+fp+fn)),
('iba', lambda tp, fp, fn, tn: ((tp)/(tp+fn)+(tn)/(tn+fp))/2.),
('gm1', alt_gm1_bin),
('ce', lambda tp, fn, fp, tn:-alt_confent_bin4(tp, fn, fp, tn)),
('sba', alt_sba_bin),
('kappa', alt_cohen_bin4),
('cc', alt_mcc_bin),
('cd',alt_CD),
])
_cache = dict()
found_examples = defaultdict(list)
discr_examples = defaultdict(list)
sampled_metrics = defaultdict(dict)
for fn in glob('data/meteo/*.tsv'):
for idx, line in enumerate(open(fn, encoding='utf-8')):
if not idx: continue
exp_group, utc_date, tn, tp, fn, fp = line.strip().split('\t')
tn = list(map(int,tn.split(',')))
tp = list(map(int,tp.split(',')))
fn = list(map(int,fn.split(',')))
fp = list(map(int,fp.split(',')))
for fc in use_fcs:
sampled_metrics[(utc_date, fc)][exp_group] = get_bin_indices(tp[fc], fn[fc], fp[fc], tn[fc])
total = set()
for ds in sampled_metrics:
for i, (a1, m1) in enumerate(sampled_metrics[ds].items()):
for j, (a2, m2) in enumerate(sampled_metrics[ds].items()):
if i<j:
left_winners = []
right_winners = []
draw_cases = []
for m in metrics_impl:
if np.isnan(m1[m]) or np.isnan(m2[m]):
continue
if m1[m]>m2[m] and abs(m1[m]-m2[m])>EPS:
left_winners.append( (m,i,j) )
if m1[m]<m2[m] and abs(m1[m]-m2[m])>EPS:
right_winners.append( (m,i,j) )
if abs(m1[m]-m2[m])<=EPS:
draw_cases.append( (m,i,j) )
handle = frozenset((ds,a1,a2))
if left_winners and right_winners:
for r1 in left_winners:
for r2 in right_winners:
found_examples[handle].append( tuple(sorted([r1[0],r2[0]])) )
discr_examples[ tuple(sorted([r1[0],r2[0]])) ].append( handle )
elif left_winners and draw_cases:
for r1 in left_winners:
for r2 in draw_cases:
found_examples[handle].append( tuple(sorted([r1[0],r2[0]])) )
discr_examples[ tuple(sorted([r1[0],r2[0]])) ].append( handle )
elif right_winners and draw_cases:
for r1 in right_winners:
for r2 in draw_cases:
found_examples[handle].append( tuple(sorted([r1[0],r2[0]])) )
discr_examples[ tuple(sorted([r1[0],r2[0]])) ].append( handle )
else:
if handle not in found_examples:
found_examples[handle] = list()
print('total',len(found_examples))
print('\t'+'\t'.join(sorted(metrics_impl)))
for r1 in sorted(metrics_impl):
r = [r1]
for r2 in sorted(metrics_impl):
n = len(discr_examples[
tuple(sorted([r1,r2]))
]
)
if n:
r.append( str( n ) )
else:
r.append( '' )
print('\t'.join(r))
| 29.058594 | 104 | 0.491329 |
835de6ecaa9ce8488f1f8c676c899a539e8ca67c | 1,217 | py | Python | terrain_following/src/image_processor.py | ZhiangChen/ros_vision | 4c8e6580f6b3ab05d8d782a5a0abdbdf44b0c2de | [
"MIT"
] | null | null | null | terrain_following/src/image_processor.py | ZhiangChen/ros_vision | 4c8e6580f6b3ab05d8d782a5a0abdbdf44b0c2de | [
"MIT"
] | 1 | 2019-12-07T00:48:36.000Z | 2019-12-07T00:48:36.000Z | terrain_following/src/image_processor.py | ZhiangChen/ros_vision | 4c8e6580f6b3ab05d8d782a5a0abdbdf44b0c2de | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""
Zhiang Chen
Nov 2019
"""
import rospy
from sensor_msgs.msg import Image
from sensor_msgs.msg import Imu
from cv_bridge import CvBridge, CvBridgeError
import numpy as np
import cv2
if __name__ == '__main__':
rospy.init_node('image_processor', anonymous=True)
IMP = Image_Processor()
IMP.start_recording("/home/zhiang/Pictures/terrain_boulder/")
try:
rospy.spin()
except rospy.ROSInterruptException:
print("Node killed!") | 28.97619 | 78 | 0.659819 |
835e51b25ab23118471502fce1356174cfc1f9cc | 137 | py | Python | lab_4/start.py | AnastasiaZheleznyakova/2020-2-level-labs | 926c7abde05f545f27d09a4d96b8014d5a668789 | [
"MIT"
] | null | null | null | lab_4/start.py | AnastasiaZheleznyakova/2020-2-level-labs | 926c7abde05f545f27d09a4d96b8014d5a668789 | [
"MIT"
] | null | null | null | lab_4/start.py | AnastasiaZheleznyakova/2020-2-level-labs | 926c7abde05f545f27d09a4d96b8014d5a668789 | [
"MIT"
] | null | null | null | if __name__ == '__main__':
pass
RESULT = 1
# DO NOT REMOVE NEXT LINE - KEEP IT INTENTIONALLY LAST
assert RESULT == 1, ''
| 22.833333 | 58 | 0.613139 |
835f4e7f9614427e618dd0d65cdbcc8a97ccc269 | 157 | py | Python | testtarget.py | epopisces/template_api_wrapper | e581eb31f6123ca2d93803453f2a1ab25c3c1981 | [
"MIT"
] | null | null | null | testtarget.py | epopisces/template_api_wrapper | e581eb31f6123ca2d93803453f2a1ab25c3c1981 | [
"MIT"
] | null | null | null | testtarget.py | epopisces/template_api_wrapper | e581eb31f6123ca2d93803453f2a1ab25c3c1981 | [
"MIT"
] | null | null | null |
toolname_tool = 'example'
tln = ToolNameAPI()
the_repo = "reponame"
author = "authorname"
profile = "authorprofile" | 15.7 | 25 | 0.713376 |
835f677bc91f7df84f3075940f43de8d60abf297 | 84 | py | Python | learning/__init__.py | aleisalem/Maat | 702c88a6a86f0b56e504df8f4d7ba18e8a39c887 | [
"Apache-2.0"
] | 4 | 2019-10-11T12:19:29.000Z | 2020-08-06T21:45:10.000Z | learning/__init__.py | aleisalem/Maat | 702c88a6a86f0b56e504df8f4d7ba18e8a39c887 | [
"Apache-2.0"
] | null | null | null | learning/__init__.py | aleisalem/Maat | 702c88a6a86f0b56e504df8f4d7ba18e8a39c887 | [
"Apache-2.0"
] | 1 | 2021-01-05T11:50:22.000Z | 2021-01-05T11:50:22.000Z | __all__ = ["feature_extraction", "hmm_learner", "scikit_learners", "string_kernel"]
| 42 | 83 | 0.761905 |
835f9384035a1bd549616f5ba14cfd3f214b0f26 | 2,509 | py | Python | Metrics/reporter.py | augdomingues/SPEX | 412034eb662b6cac466d7c96ac04c399ff2617c5 | [
"CC0-1.0"
] | null | null | null | Metrics/reporter.py | augdomingues/SPEX | 412034eb662b6cac466d7c96ac04c399ff2617c5 | [
"CC0-1.0"
] | null | null | null | Metrics/reporter.py | augdomingues/SPEX | 412034eb662b6cac466d7c96ac04c399ff2617c5 | [
"CC0-1.0"
] | 1 | 2021-09-14T06:28:07.000Z | 2021-09-14T06:28:07.000Z | from os.path import join
from math import ceil
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
plt.rcParams["font.size"] = 14
plt.rcParams["figure.figsize"] = [15, 8]
| 31.759494 | 95 | 0.539657 |
835fad4f88ef3c40e122cf474982c0fc18e561fb | 21,322 | py | Python | contrib/translate_test.py | csilvers/kake | 51465b12d267a629dd61778918d83a2a134ec3b2 | [
"MIT"
] | null | null | null | contrib/translate_test.py | csilvers/kake | 51465b12d267a629dd61778918d83a2a134ec3b2 | [
"MIT"
] | null | null | null | contrib/translate_test.py | csilvers/kake | 51465b12d267a629dd61778918d83a2a134ec3b2 | [
"MIT"
] | null | null | null | """Tests the translate_* files."""
from __future__ import absolute_import
import cPickle
import os
import shutil
from shared.testutil import testsize
from third_party import polib
from kake import compile_all_pot
from kake import compile_small_mo
from kake import translate_handlebars
from kake import translate_javascript
from kake import translate_util
import kake.lib.compile_rule
import kake.lib.testutil
import kake.make
class TestBuildForFakeLang(TestBase):
"""Test make.build() using the special codepath for fake languages."""
# Note we don't make any fake boxes.po file at all. kake
# automatically extracts the strings from the input file,
# fake-translates them, and inserts them into the translated file,
# all on the fly.
_BOX = u'\u25a1'.encode('utf-8')
_UTF8_GRAPHING_LINEAR_EQUATIONS = '%s %s %s' % (_BOX * len('GRAPHING'),
_BOX * len('LINEAR'),
_BOX * len('EQUATIONS'))
_S_GRAPHING_LINEAR_EQUATIONS = '%s %s %s' % (r'\u25a1' * len('GRAPHING'),
r'\u25a1' * len('LINEAR'),
r'\u25a1' * len('EQUATIONS'))
_S_HELLO_WORLD = '%s %%(where)s' % (r'\u25a1' * len('HELLO'))
_S_ADDITION_1 = '%s %s' % (r'\u25a1' * len('ADDITION'),
r'\u25a1' * len('1'))
| 45.657388 | 79 | 0.535409 |
83609972eefc4a7ddcf363f8e89f7408af9885f3 | 115 | py | Python | backend/backend/urls.py | lucasrafaldini/SpaceXLaunches | abcd3686677bc3e25903bc2ed1e084e00090ba33 | [
"MIT"
] | 1 | 2021-09-21T17:51:11.000Z | 2021-09-21T17:51:11.000Z | backend/backend/urls.py | lucasrafaldini/SpaceXLaunches | abcd3686677bc3e25903bc2ed1e084e00090ba33 | [
"MIT"
] | 9 | 2020-06-06T00:42:57.000Z | 2022-02-27T17:29:18.000Z | backend/backend/urls.py | lucasrafaldini/SpaceXLaunches | abcd3686677bc3e25903bc2ed1e084e00090ba33 | [
"MIT"
] | null | null | null | from django.conf.urls import url
from django.urls import include
urlpatterns = [url("api/", include("api.urls"))]
| 23 | 48 | 0.73913 |
8360d7fa109831cb6f6bca8e81a94ffadbaafea4 | 223 | py | Python | primitives_ubc/regCCFS/__init__.py | tonyjo/ubc_primitives | bc94a403f176fe28db2a9ac9d1a48cb9db021f90 | [
"Apache-2.0"
] | null | null | null | primitives_ubc/regCCFS/__init__.py | tonyjo/ubc_primitives | bc94a403f176fe28db2a9ac9d1a48cb9db021f90 | [
"Apache-2.0"
] | 4 | 2020-07-19T00:45:29.000Z | 2020-12-10T18:25:41.000Z | primitives_ubc/regCCFS/__init__.py | tonyjo/ubc_primitives | bc94a403f176fe28db2a9ac9d1a48cb9db021f90 | [
"Apache-2.0"
] | 1 | 2021-04-30T18:13:49.000Z | 2021-04-30T18:13:49.000Z | from .ccfsReg import CanonicalCorrelationForestsRegressionPrimitive
__all__ = ['CanonicalCorrelationForestsRegressionPrimitive']
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__) # type: ignore
| 27.875 | 67 | 0.847534 |
836261e038e930e3ea31c7a6628689b091e5c9d1 | 8,108 | py | Python | src/compas/numerical/dr/dr_numpy.py | arpastrana/compas | ed677a162c14dbe562c82d72f370279259faf7da | [
"MIT"
] | null | null | null | src/compas/numerical/dr/dr_numpy.py | arpastrana/compas | ed677a162c14dbe562c82d72f370279259faf7da | [
"MIT"
] | 9 | 2019-09-11T08:53:19.000Z | 2019-09-16T08:35:39.000Z | src/compas/numerical/dr/dr_numpy.py | Licini/compas | 34f65adb3d0abc3f403312ffba62aa76f3376292 | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from numpy import array
from numpy import isnan
from numpy import isinf
from numpy import ones
from numpy import zeros
from scipy.linalg import norm
from scipy.sparse import diags
from compas.numerical import connectivity_matrix
from compas.numerical import normrow
__all__ = ['dr_numpy']
K = [
[0.0],
[0.5, 0.5],
[0.5, 0.0, 0.5],
[1.0, 0.0, 0.0, 1.0],
]
def dr_numpy(vertices, edges, fixed, loads, qpre, fpre, lpre, linit, E, radius,
callback=None, callback_args=None, **kwargs):
"""Implementation of the dynamic relaxation method for form findong and analysis
of articulated networks of axial-force members.
Parameters
----------
vertices : list
XYZ coordinates of the vertices.
edges : list
Connectivity of the vertices.
fixed : list
Indices of the fixed vertices.
loads : list
XYZ components of the loads on the vertices.
qpre : list
Prescribed force densities in the edges.
fpre : list
Prescribed forces in the edges.
lpre : list
Prescribed lengths of the edges.
linit : list
Initial length of the edges.
E : list
Stiffness of the edges.
radius : list
Radius of the edges.
callback : callable, optional
User-defined function that is called at every iteration.
callback_args : tuple, optional
Additional arguments passed to the callback.
Returns
-------
xyz : array
XYZ coordinates of the equilibrium geometry.
q : array
Force densities in the edges.
f : array
Forces in the edges.
l : array
Lengths of the edges
r : array
Residual forces.
Notes
-----
For more info, see [1]_.
References
----------
.. [1] De Laet L., Veenendaal D., Van Mele T., Mollaert M. and Block P.,
*Bending incorporated: designing tension structures by integrating bending-active elements*,
Proceedings of Tensinet Symposium 2013,Istanbul, Turkey, 2013.
Examples
--------
>>>
"""
# --------------------------------------------------------------------------
# callback
# --------------------------------------------------------------------------
if callback:
assert callable(callback), 'The provided callback is not callable.'
# --------------------------------------------------------------------------
# configuration
# --------------------------------------------------------------------------
kmax = kwargs.get('kmax', 10000)
dt = kwargs.get('dt', 1.0)
tol1 = kwargs.get('tol1', 1e-3)
tol2 = kwargs.get('tol2', 1e-6)
coeff = Coeff(kwargs.get('c', 0.1))
ca = coeff.a
cb = coeff.b
# --------------------------------------------------------------------------
# attribute lists
# --------------------------------------------------------------------------
num_v = len(vertices)
num_e = len(edges)
free = list(set(range(num_v)) - set(fixed))
# --------------------------------------------------------------------------
# attribute arrays
# --------------------------------------------------------------------------
x = array(vertices, dtype=float).reshape((-1, 3)) # m
p = array(loads, dtype=float).reshape((-1, 3)) # kN
qpre = array(qpre, dtype=float).reshape((-1, 1))
fpre = array(fpre, dtype=float).reshape((-1, 1)) # kN
lpre = array(lpre, dtype=float).reshape((-1, 1)) # m
linit = array(linit, dtype=float).reshape((-1, 1)) # m
E = array(E, dtype=float).reshape((-1, 1)) # kN/mm2 => GPa
radius = array(radius, dtype=float).reshape((-1, 1)) # mm
# --------------------------------------------------------------------------
# sectional properties
# --------------------------------------------------------------------------
A = 3.14159 * radius ** 2 # mm2
EA = E * A # kN
# --------------------------------------------------------------------------
# create the connectivity matrices
# after spline edges have been aligned
# --------------------------------------------------------------------------
C = connectivity_matrix(edges, 'csr')
Ct = C.transpose()
Ci = C[:, free]
Cit = Ci.transpose()
Ct2 = Ct.copy()
Ct2.data **= 2
# --------------------------------------------------------------------------
# if none of the initial lengths are set,
# set the initial lengths to the current lengths
# --------------------------------------------------------------------------
if all(linit == 0):
linit = normrow(C.dot(x))
# --------------------------------------------------------------------------
# initial values
# --------------------------------------------------------------------------
q = ones((num_e, 1), dtype=float)
l = normrow(C.dot(x)) # noqa: E741
f = q * l
v = zeros((num_v, 3), dtype=float)
r = zeros((num_v, 3), dtype=float)
# --------------------------------------------------------------------------
# helpers
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
# start iterating
# --------------------------------------------------------------------------
for k in range(kmax):
# print(k)
q_fpre = fpre / l
q_lpre = f / lpre
q_EA = EA * (l - linit) / (linit * l)
q_lpre[isinf(q_lpre)] = 0
q_lpre[isnan(q_lpre)] = 0
q_EA[isinf(q_EA)] = 0
q_EA[isnan(q_EA)] = 0
q = qpre + q_fpre + q_lpre + q_EA
Q = diags([q[:, 0]], [0])
D = Cit.dot(Q).dot(C)
mass = 0.5 * dt ** 2 * Ct2.dot(qpre + q_fpre + q_lpre + EA / linit)
# RK
x0 = x.copy()
v0 = ca * v.copy()
dv = rk(x0, v0, steps=4)
v[free] = v0[free] + dv[free]
dx = v * dt
x[free] = x0[free] + dx[free]
# update
u = C.dot(x)
l = normrow(u) # noqa: E741
f = q * l
r = p - Ct.dot(Q).dot(u)
# crits
crit1 = norm(r[free])
crit2 = norm(dx[free])
# callback
if callback:
callback(k, x, [crit1, crit2], callback_args)
# convergence
if crit1 < tol1:
break
if crit2 < tol2:
break
return x, q, f, l, r
# ==============================================================================
# Main
# ==============================================================================
if __name__ == "__main__":
pass
| 34.067227 | 103 | 0.398249 |
8362ff8cbd0cfe323812bd28b2652a04191c1026 | 462 | py | Python | getColorFromNumber.py | clean-code-craft-tcq-1/modular-python-preetikadyan | 0775e7e62edbbb0d7c3506b2bd072562a44d7f8b | [
"MIT"
] | null | null | null | getColorFromNumber.py | clean-code-craft-tcq-1/modular-python-preetikadyan | 0775e7e62edbbb0d7c3506b2bd072562a44d7f8b | [
"MIT"
] | null | null | null | getColorFromNumber.py | clean-code-craft-tcq-1/modular-python-preetikadyan | 0775e7e62edbbb0d7c3506b2bd072562a44d7f8b | [
"MIT"
] | null | null | null | from main import *
| 42 | 61 | 0.779221 |
83638a87db865ba288d6ca6639d585c34a522b6e | 98 | py | Python | raspy/io/pwm_channel.py | cyrusbuilt/RasPy | 1e34840cc90ea7f19317e881162209d3d819eb09 | [
"MIT"
] | null | null | null | raspy/io/pwm_channel.py | cyrusbuilt/RasPy | 1e34840cc90ea7f19317e881162209d3d819eb09 | [
"MIT"
] | null | null | null | raspy/io/pwm_channel.py | cyrusbuilt/RasPy | 1e34840cc90ea7f19317e881162209d3d819eb09 | [
"MIT"
] | null | null | null | """The PWM channel to use."""
CHANNEL0 = 0
"""Channel zero."""
CHANNEL1 = 1
"""Channel one."""
| 10.888889 | 29 | 0.581633 |
836427cbbb35895144687ddb6c7a92d78b59686e | 10,157 | py | Python | xls/dslx/interpreter/concrete_type_helpers.py | hafixo/xls | 21009ec2165d04d0037d9cf3583b207949ef7a6d | [
"Apache-2.0"
] | null | null | null | xls/dslx/interpreter/concrete_type_helpers.py | hafixo/xls | 21009ec2165d04d0037d9cf3583b207949ef7a6d | [
"Apache-2.0"
] | null | null | null | xls/dslx/interpreter/concrete_type_helpers.py | hafixo/xls | 21009ec2165d04d0037d9cf3583b207949ef7a6d | [
"Apache-2.0"
] | null | null | null | # Lint as: python3
#
# Copyright 2020 The XLS Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for dealing with concrete types and interpreter values."""
from typing import Tuple, Optional
from absl import logging
from xls.dslx import bit_helpers
from xls.dslx.interpreter.errors import FailureError
from xls.dslx.interpreter.value import Tag
from xls.dslx.interpreter.value import Value
from xls.dslx.python import cpp_ast as ast
from xls.dslx.python.cpp_concrete_type import ArrayType
from xls.dslx.python.cpp_concrete_type import BitsType
from xls.dslx.python.cpp_concrete_type import ConcreteType
from xls.dslx.python.cpp_concrete_type import EnumType
from xls.dslx.python.cpp_concrete_type import is_ubits
from xls.dslx.python.cpp_concrete_type import TupleType
from xls.dslx.python.cpp_pos import Span
from xls.dslx.python.cpp_scanner import Keyword
from xls.dslx.python.cpp_scanner import Token
from xls.dslx.python.cpp_scanner import TokenKind
from xls.dslx.python.cpp_scanner import TYPE_KEYWORDS_TO_SIGNEDNESS_AND_BITS
def _strength_reduce_enum(type_: ast.Enum, bit_count: int) -> ConcreteType:
"""Turns an enum to corresponding (bits) concrete type (w/signedness).
For example, used in conversion checks.
Args:
type_: AST node (enum definition) to convert.
bit_count: The bit count of the underlying bits type for the enum
definition, as determined by type inference or interpretation.
Returns:
The concrete type that represents the enum's underlying bits type.
"""
assert isinstance(type_, ast.Enum), type_
signed = type_.signed
assert isinstance(signed, bool), type_
return BitsType(signed, bit_count)
def concrete_type_from_value(value: Value) -> ConcreteType:
"""Returns the concrete type of 'value'.
Note that:
* Non-zero-length arrays are assumed (for zero length arrays we can't
currently deduce the type from the value because the concrete element type
is not reified in the array value.
* Enums are strength-reduced to their underlying bits (storage) type.
Args:
value: Value to determine the concrete type for.
"""
if value.tag in (Tag.UBITS, Tag.SBITS):
signed = value.tag == Tag.SBITS
return BitsType(signed, value.bits_payload.bit_count)
elif value.tag == Tag.ARRAY:
element_type = concrete_type_from_value(value.array_payload.index(0))
return ArrayType(element_type, len(value))
elif value.tag == Tag.TUPLE:
return TupleType(
tuple(concrete_type_from_value(m) for m in value.tuple_members))
else:
assert value.tag == Tag.ENUM, value
return _strength_reduce_enum(value.type_, value.bits_payload.bit_count)
def concrete_type_from_element_type_and_dims(
element_type: ConcreteType, dims: Tuple[int, ...]) -> ConcreteType:
"""Wraps element_type in arrays according to `dims`, dims[0] as most minor."""
t = element_type
for dim in dims:
t = ArrayType(t, dim)
return t
def concrete_type_from_dims(primitive: Token,
dims: Tuple[int, ...]) -> 'ConcreteType':
"""Creates a concrete type from the primitive type token and dims.
Args:
primitive: The token holding the primitive type as a keyword.
dims: Dimensions to apply to the primitive type; e.g. () is scalar, (5) is
1-D array of 5 elements having the primitive type.
Returns:
A concrete type object.
Raises:
ValueError: If the primitive keyword is unrecognized or dims are empty.
"""
if primitive.is_keyword(Keyword.BITS) or primitive.is_keyword(Keyword.UN):
base_type = BitsType(signed=False, size=dims[-1])
elif primitive.is_keyword(Keyword.SN):
base_type = BitsType(signed=True, size=dims[-1])
else:
assert primitive.kind == TokenKind.KEYWORD
signedness, bits = TYPE_KEYWORDS_TO_SIGNEDNESS_AND_BITS[primitive.value]
element_type = BitsType(signedness, bits)
while dims:
dims, minor = dims[:-1], dims[-1]
element_type = ArrayType(element_type, minor)
return element_type
result = concrete_type_from_element_type_and_dims(base_type, dims[:-1])
logging.vlog(4, '%r %r => %r', primitive, dims, result)
return result
def _value_compatible_with_type(module: ast.Module, type_: ConcreteType,
value: Value) -> bool:
"""Returns whether value is compatible with type_ (recursively)."""
assert isinstance(value, Value), value
if isinstance(type_, TupleType) and value.is_tuple():
return all(
_value_compatible_with_type(module, ct, m)
for ct, m in zip(type_.get_unnamed_members(), value.tuple_members))
if isinstance(type_, ArrayType) and value.is_array():
et = type_.get_element_type()
return all(
_value_compatible_with_type(module, et, m)
for m in value.array_payload.elements)
if isinstance(type_, EnumType) and value.tag == Tag.ENUM:
return type_.get_nominal_type(module) == value.type_
if isinstance(type_,
BitsType) and not type_.signed and value.tag == Tag.UBITS:
return value.bits_payload.bit_count == type_.get_total_bit_count()
if isinstance(type_, BitsType) and type_.signed and value.tag == Tag.SBITS:
return value.bits_payload.bit_count == type_.get_total_bit_count()
if value.tag == Tag.ENUM and isinstance(type_, BitsType):
return (value.type_.get_signedness() == type_.signed and
value.bits_payload.bit_count == type_.get_total_bit_count())
if value.tag == Tag.ARRAY and is_ubits(type_):
flat_bit_count = value.array_payload.flatten().bits_payload.bit_count
return flat_bit_count == type_.get_total_bit_count()
if isinstance(type_, EnumType) and value.is_bits():
return (type_.signed == (value.tag == Tag.SBITS) and
type_.get_total_bit_count() == value.get_bit_count())
raise NotImplementedError(type_, value)
def concrete_type_accepts_value(module: ast.Module, type_: ConcreteType,
value: Value) -> bool:
"""Returns whether 'value' conforms to this concrete type."""
if value.tag == Tag.UBITS:
return (isinstance(type_, BitsType) and not type_.signed and
value.bits_payload.bit_count == type_.get_total_bit_count())
if value.tag == Tag.SBITS:
return (isinstance(type_, BitsType) and type_.signed and
value.bits_payload.bit_count == type_.get_total_bit_count())
if value.tag in (Tag.ARRAY, Tag.TUPLE, Tag.ENUM):
return _value_compatible_with_type(module, type_, value)
raise NotImplementedError(type_, value)
def concrete_type_convert_value(module: ast.Module, type_: ConcreteType,
value: Value, span: Span,
enum_values: Optional[Tuple[Value, ...]],
enum_signed: Optional[bool]) -> Value:
"""Converts 'value' into a value of this concrete type."""
logging.vlog(3, 'Converting value %s to type %s', value, type_)
if value.tag == Tag.UBITS and isinstance(type_, ArrayType):
bits_per_element = type_.get_element_type().get_total_bit_count().value
bits = value.bits_payload
return Value.make_array(
tuple(bit_slice_value_at_index(i) for i in range(type_.size.value)))
if (isinstance(type_, EnumType) and
value.tag in (Tag.UBITS, Tag.SBITS, Tag.ENUM) and
value.get_bit_count() == type_.get_total_bit_count()):
# Check that the bits we're converting from are present in the enum type
# we're converting to.
nominal_type = type_.get_nominal_type(module)
for enum_value in enum_values:
if value.bits_payload == enum_value.bits_payload:
break
else:
raise FailureError(
span,
'Value is not valid for enum {}: {}'.format(nominal_type.identifier,
value))
return Value.make_enum(value.bits_payload, nominal_type)
if (value.tag == Tag.ENUM and isinstance(type_, BitsType) and
type_.get_total_bit_count() == value.get_bit_count()):
constructor = Value.make_sbits if type_.signed else Value.make_ubits
bit_count = type_.get_total_bit_count().value
return constructor(bit_count, value.bits_payload.value)
if value.tag == Tag.UBITS:
return zero_ext()
if value.tag == Tag.SBITS:
return sign_ext()
if value.tag == Tag.ENUM:
assert enum_signed is not None
return sign_ext() if enum_signed else zero_ext()
# If we're converting an array into bits, flatten the array payload.
if value.tag == Tag.ARRAY and isinstance(type_, BitsType):
return value.array_payload.flatten()
if concrete_type_accepts_value(module, type_, value): # Vacuous conversion.
return value
raise FailureError(
span,
'Interpreter failure: cannot convert value %s (of type %s) to type %s' %
(value, concrete_type_from_value(value), type_))
| 39.216216 | 80 | 0.713104 |
83655e2b69ea8d94a79a740f034c0045712e2d9d | 97 | py | Python | ABC/131/a.py | fumiyanll23/AtCoder | 362ca9fcacb5415c1458bc8dee5326ba2cc70b65 | [
"MIT"
] | null | null | null | ABC/131/a.py | fumiyanll23/AtCoder | 362ca9fcacb5415c1458bc8dee5326ba2cc70b65 | [
"MIT"
] | null | null | null | ABC/131/a.py | fumiyanll23/AtCoder | 362ca9fcacb5415c1458bc8dee5326ba2cc70b65 | [
"MIT"
] | null | null | null | S = str(input())
if S[0]==S[1] or S[1]==S[2] or S[2]==S[3]:
print("Bad")
else:
print("Good") | 16.166667 | 42 | 0.494845 |
83663c35c9a7b7d5e9b6087f0826f94225c82bb6 | 15,354 | py | Python | model_neu/optimized/hyperutils.py | lelange/cu-ssp | 9f1a7abf79a2fb6ef2ae0f37de79469c2dc3488f | [
"MIT"
] | null | null | null | model_neu/optimized/hyperutils.py | lelange/cu-ssp | 9f1a7abf79a2fb6ef2ae0f37de79469c2dc3488f | [
"MIT"
] | null | null | null | model_neu/optimized/hyperutils.py | lelange/cu-ssp | 9f1a7abf79a2fb6ef2ae0f37de79469c2dc3488f | [
"MIT"
] | null | null | null | from bson import json_util
import json
import os
import numpy as np
import tensorflow as tf
from keras.layers.core import K #import keras.backend as K
import time
import pandas as pd
import multiprocessing
#
from keras.preprocessing import text, sequence
from keras.preprocessing.text import Tokenizer
from keras.utils import to_categorical
RESULTS_DIR = "results/"
MAXLEN_SEQ = 700
data_root = '/nosave/lange/cu-ssp/data/'
residue_list = list('ACEDGFIHKMLNQPSRTWVYX') + ['NoSeq']
q8_list = list('LBEGIHST') + ['NoSeq']
"""Json utils to print, save and load training results."""
def print_json(result):
"""Pretty-print a jsonable structure (e.g.: result)."""
print(json.dumps(
result,
default=json_util.default, sort_keys=True,
indent=4, separators=(',', ': ')
))
def save_json_result(model_name, result):
"""Save json to a directory and a filename."""
result_name = '{}.txt.json'.format(model_name)
if not os.path.exists(RESULTS_DIR):
os.makedirs(RESULTS_DIR)
with open(os.path.join(RESULTS_DIR, result_name), 'w') as f:
json.dump(
result, f,
default=json_util.default, sort_keys=True,
indent=4, separators=(',', ': ')
)
def load_json_result(best_result_name):
"""Load json from a path (directory + filename)."""
result_path = os.path.join(RESULTS_DIR, best_result_name)
with open(result_path, 'r') as f:
return json.JSONDecoder().decode(
f.read()
# default=json_util.default,
# separators=(',', ': ')
)
# transformations for pssm:
# transformations for hmm:
# for both:
# Computes and returns the n-grams of a particular sequence, defaults to trigrams
## metrics for this task:
# The custom accuracy metric used for this task
def kullback_leibler_divergence(y_true, y_pred):
'''Calculates the Kullback-Leibler (KL) divergence between prediction
and target values.
'''
y_true = K.clip(y_true, K.epsilon(), 1)
y_pred = K.clip(y_pred, K.epsilon(), 1)
return K.sum(y_true * K.log(y_true / y_pred), axis=-1)
def matthews_correlation(y_true, y_pred):
'''Calculates the Matthews correlation coefficient measure for quality
of binary classification problems.
'''
y_pred_pos = K.round(K.clip(y_pred, 0, 1))
y_pred_neg = 1 - y_pred_pos
y_pos = K.round(K.clip(y_true, 0, 1))
y_neg = 1 - y_pos
tp = K.sum(y_pos * y_pred_pos)
tn = K.sum(y_neg * y_pred_neg)
fp = K.sum(y_neg * y_pred_pos)
fn = K.sum(y_pos * y_pred_neg)
numerator = (tp * tn - fp * fn)
denominator = K.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))
return numerator / (denominator + K.epsilon())
def precision(y_true, y_pred):
'''Calculates the precision, a metric for multi-label classification of
how many selected items are relevant.
'''
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
def recall(y_true, y_pred):
'''Calculates the recall, a metric for multi-label classification of
how many relevant items are selected.
'''
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
def fbeta_score(y_true, y_pred, beta=1):
'''Calculates the F score, the weighted harmonic mean of precision and recall.
This is useful for multi-label classification, where input samples can be
classified as sets of labels. By only using accuracy (precision) a model
would achieve a perfect score by simply assigning every class to every
input. In order to avoid this, a metric should penalize incorrect class
assignments as well (recall). The F-beta score (ranged from 0.0 to 1.0)
computes this, as a weighted mean of the proportion of correct class
assignments vs. the proportion of incorrect class assignments.
With beta = 1, this is equivalent to a F-measure. With beta < 1, assigning
correct classes becomes more important, and with beta > 1 the metric is
instead weighted towards penalizing incorrect class assignments.
'''
if beta < 0:
raise ValueError('The lowest choosable beta is zero (only precision).')
# If there are no true positives, fix the F score at 0 like sklearn.
if K.sum(K.round(K.clip(y_true, 0, 1))) == 0:
return 0
p = precision(y_true, y_pred)
r = recall(y_true, y_pred)
bb = beta ** 2
fbeta_score = (1 + bb) * (p * r) / (bb * p + r + K.epsilon())
return fbeta_score
# losses:
def nll(y_true, y_pred):
""" Negative log likelihood. """
# keras.losses.binary_crossentropy give the mean
# over the last axis. we require the sum
return K.sum(K.binary_crossentropy(y_true, y_pred), axis=-1)
'''
def get_data(npy_path, normalize_profiles):
# daten durcheinander wrfeln?
data = np.load(npy_path+'.npy')
max_len = 700
data_reshape = data.reshape(data.shape[0], 700, -1)
residue_onehot = data_reshape[:,:,0:22]
residue_q8_onehot = data_reshape[:,:,22:31]
profile = data_reshape[:,:,35:57]
#pad profiles to same length
zero_arr = np.zeros((profile.shape[0], max_len - profile.shape[1], profile.shape[2]))
profile_padded = np.concatenate([profile, zero_arr], axis=1)
residue_array = np.array(residue_list)[residue_onehot.argmax(2)]
q8_array = np.array(q8_list)[residue_q8_onehot.argmax(2)]
residue_str_list = []
q8_str_list = []
for vec in residue_array:
x = ''.join(vec[vec != 'NoSeq'])
residue_str_list.append(x)
for vec in q8_array:
x = ''.join(vec[vec != 'NoSeq'])
q8_str_list.append(x)
id_list = np.arange(1, len(residue_array) + 1)
len_list = np.array([len(x) for x in residue_str_list])
train_df = pd.DataFrame({'id': id_list, 'len': len_list, 'input': residue_str_list, 'expected': q8_str_list})
input_one_hot = residue_onehot
q8_onehot = residue_q8_onehot
train_input_seqs, train_target_seqs= train_df[['input', 'expected']][(train_df.len <= 700)].values.T
input_seqs
input_pssm = profile_padded
#SPTERE::
#nput_hmm = None
#rsa_onehot = None; output_data = [q8_onehot, rsa_onehot]
#input_data = [input_one_hot, input_seqs, input_pssm, input_hmm]
input_data = [input_one_hot, input_seqs, input_pssm]
output_data = q8_onehot
return input_data, output_data
'''
# fit_on_texts Updates internal vocabulary based on a list of texts
# texts_to_sequences Transforms each text in texts to a sequence of integers, 0 is reserved for padding
#fertig, nur get_data noch machen
def load_6133_filted():
'''
TRAIN data Cullpdb+profile_6133_filtered
Test data CB513\CASP10\CASP11
'''
print("Loading train data (Cullpdb_filted)...")
data = np.load()
data = np.reshape(data, (-1, 700, 57))
# print data.shape
datahot = data[:, :, 0:21] # sequence feature
# print 'sequence feature',dataonehot[1,:3,:]
datapssm = data[:, :, 35:56] # profile feature
# print 'profile feature',datapssm[1,:3,:]
labels = data[:, :, 22:30] # secondary struture label , 8-d
# shuffle data
# np.random.seed(2018)
num_seqs, seqlen, feature_dim = np.shape(data)
num_classes = labels.shape[2]
seq_index = np.arange(0, num_seqs) #
np.random.shuffle(seq_index)
# train data
trainhot = datahot[seq_index[:5278]] # 21
trainlabel = labels[seq_index[:5278]] # 8
trainpssm = datapssm[seq_index[:5278]] # 21
# val data
vallabel = labels[seq_index[5278:5534]] # 8
valpssm = datapssm[seq_index[5278:5534]] # 21
valhot = datahot[seq_index[5278:5534]] # 21
train_hot = np.ones((trainhot.shape[0], trainhot.shape[1]))
for i in xrange(trainhot.shape[0]):
for j in xrange(trainhot.shape[1]):
if np.sum(trainhot[i, j, :]) != 0:
train_hot[i, j] = np.argmax(trainhot[i, j, :])
val_hot = np.ones((valhot.shape[0], valhot.shape[1]))
for i in xrange(valhot.shape[0]):
for j in xrange(valhot.shape[1]):
if np.sum(valhot[i, j, :]) != 0:
val_hot[i, j] = np.argmax(valhot[i, j, :])
solvindex = range(33, 35)
trainsolvlabel = data[:5600, :, solvindex]
trainsolvvalue = trainsolvlabel[:, :, 0] * 2 + trainsolvlabel[:, :, 1]
trainsolvlabel = np.zeros((trainsolvvalue.shape[0], trainsolvvalue.shape[1], 4))
for i in xrange(trainsolvvalue.shape[0]):
for j in xrange(trainsolvvalue.shape[1]):
if np.sum(trainlabel[i, j, :]) != 0:
trainsolvlabel[i, j, trainsolvvalue[i, j]] = 1
return train_hot, trainpssm, trainlabel, val_hot, valpssm, vallabel
| 36.557143 | 127 | 0.681972 |
8369920fc0165d90314e66e5b7970c7cffdf56d6 | 106 | py | Python | spark_application/transformations/__init__.py | ketanvatsalya/pyspark_project_template | 72f6cc843ce04cbbf15eaf49c2435b7f31366194 | [
"MIT"
] | null | null | null | spark_application/transformations/__init__.py | ketanvatsalya/pyspark_project_template | 72f6cc843ce04cbbf15eaf49c2435b7f31366194 | [
"MIT"
] | null | null | null | spark_application/transformations/__init__.py | ketanvatsalya/pyspark_project_template | 72f6cc843ce04cbbf15eaf49c2435b7f31366194 | [
"MIT"
] | null | null | null | """
Package to hold the Transformation Classes
"""
from . import base
from . import spend_per_department
| 15.142857 | 42 | 0.764151 |
836a1f95f9bc7256c74547e4b46165f7f107b034 | 286 | py | Python | test_service.py | jgawrilo/qcr_ci | bd4c192444e03a551e3c5f4f0a275a4c029294de | [
"Apache-2.0"
] | 1 | 2020-03-05T13:27:39.000Z | 2020-03-05T13:27:39.000Z | test_service.py | jgawrilo/qcr_ci | bd4c192444e03a551e3c5f4f0a275a4c029294de | [
"Apache-2.0"
] | null | null | null | test_service.py | jgawrilo/qcr_ci | bd4c192444e03a551e3c5f4f0a275a4c029294de | [
"Apache-2.0"
] | null | null | null | import requests
import json
headers = {'Content-Type': 'application/json'}
data = json.load(open("./test_input2.json"))
url = "http://localhost:5001/api/impact"
response = requests.post(url,data=json.dumps({"data":data}),headers=headers)
print json.dumps(response.json(),indent=2)
| 22 | 76 | 0.727273 |
836a92d066a5c850634a4179920f5c67049059c7 | 16,969 | py | Python | google/appengine/ext/datastore_admin/backup_pb2.py | vladushakov987/appengine_python3 | 0dd481c73e2537a50ee10f1b79cd65938087e555 | [
"Apache-2.0"
] | null | null | null | google/appengine/ext/datastore_admin/backup_pb2.py | vladushakov987/appengine_python3 | 0dd481c73e2537a50ee10f1b79cd65938087e555 | [
"Apache-2.0"
] | null | null | null | google/appengine/ext/datastore_admin/backup_pb2.py | vladushakov987/appengine_python3 | 0dd481c73e2537a50ee10f1b79cd65938087e555 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
import google
from google.net.proto2.python.public import descriptor as _descriptor
from google.net.proto2.python.public import message as _message
from google.net.proto2.python.public import reflection as _reflection
from google.net.proto2.python.public import symbol_database as _symbol_database
from google.net.proto2.proto import descriptor_pb2
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='apphosting/ext/datastore_admin/backup.proto',
package='apphosting.ext.datastore_admin',
serialized_pb=_b('\n+apphosting/ext/datastore_admin/backup.proto\x12\x1e\x61pphosting.ext.datastore_admin\"\x8c\x01\n\x06\x42\x61\x63kup\x12?\n\x0b\x62\x61\x63kup_info\x18\x01 \x01(\x0b\x32*.apphosting.ext.datastore_admin.BackupInfo\x12\x41\n\tkind_info\x18\x02 \x03(\x0b\x32..apphosting.ext.datastore_admin.KindBackupInfo\"Q\n\nBackupInfo\x12\x13\n\x0b\x62\x61\x63kup_name\x18\x01 \x01(\t\x12\x17\n\x0fstart_timestamp\x18\x02 \x01(\x03\x12\x15\n\rend_timestamp\x18\x03 \x01(\x03\"\x8c\x01\n\x0eKindBackupInfo\x12\x0c\n\x04kind\x18\x01 \x02(\t\x12\x0c\n\x04\x66ile\x18\x02 \x03(\t\x12\x43\n\rentity_schema\x18\x03 \x01(\x0b\x32,.apphosting.ext.datastore_admin.EntitySchema\x12\x19\n\nis_partial\x18\x04 \x01(\x08:\x05\x66\x61lse\"\x90\x05\n\x0c\x45ntitySchema\x12\x0c\n\x04kind\x18\x01 \x01(\t\x12\x41\n\x05\x66ield\x18\x02 \x03(\x0b\x32\x32.apphosting.ext.datastore_admin.EntitySchema.Field\x1a\xb2\x01\n\x04Type\x12\x0f\n\x07is_list\x18\x01 \x01(\x08\x12R\n\x0eprimitive_type\x18\x02 \x03(\x0e\x32:.apphosting.ext.datastore_admin.EntitySchema.PrimitiveType\x12\x45\n\x0f\x65mbedded_schema\x18\x03 \x03(\x0b\x32,.apphosting.ext.datastore_admin.EntitySchema\x1aj\n\x05\x46ield\x12\x0c\n\x04name\x18\x01 \x02(\t\x12?\n\x04type\x18\x02 \x03(\x0b\x32\x31.apphosting.ext.datastore_admin.EntitySchema.Type\x12\x12\n\nfield_name\x18\x03 \x01(\t\"\x8d\x02\n\rPrimitiveType\x12\t\n\x05\x46LOAT\x10\x00\x12\x0b\n\x07INTEGER\x10\x01\x12\x0b\n\x07\x42OOLEAN\x10\x02\x12\n\n\x06STRING\x10\x03\x12\r\n\tDATE_TIME\x10\x04\x12\n\n\x06RATING\x10\x05\x12\x08\n\x04LINK\x10\x06\x12\x0c\n\x08\x43\x41TEGORY\x10\x07\x12\x10\n\x0cPHONE_NUMBER\x10\x08\x12\x12\n\x0ePOSTAL_ADDRESS\x10\t\x12\t\n\x05\x45MAIL\x10\n\x12\r\n\tIM_HANDLE\x10\x0b\x12\x0c\n\x08\x42LOB_KEY\x10\x0c\x12\x08\n\x04TEXT\x10\r\x12\x08\n\x04\x42LOB\x10\x0e\x12\x0e\n\nSHORT_BLOB\x10\x0f\x12\x08\n\x04USER\x10\x10\x12\r\n\tGEO_POINT\x10\x11\x12\r\n\tREFERENCE\x10\x12\x42\x14\x10\x02 \x02(\x02\x42\x0c\x42\x61\x63kupProtos')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_ENTITYSCHEMA_PRIMITIVETYPE = _descriptor.EnumDescriptor(
name='PrimitiveType',
full_name='apphosting.ext.datastore_admin.EntitySchema.PrimitiveType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='FLOAT', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INTEGER', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='BOOLEAN', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='STRING', index=3, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DATE_TIME', index=4, number=4,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RATING', index=5, number=5,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LINK', index=6, number=6,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CATEGORY', index=7, number=7,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PHONE_NUMBER', index=8, number=8,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='POSTAL_ADDRESS', index=9, number=9,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='EMAIL', index=10, number=10,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='IM_HANDLE', index=11, number=11,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='BLOB_KEY', index=12, number=12,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TEXT', index=13, number=13,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='BLOB', index=14, number=14,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SHORT_BLOB', index=15, number=15,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='USER', index=16, number=16,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='GEO_POINT', index=17, number=17,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='REFERENCE', index=18, number=18,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=836,
serialized_end=1105,
)
_sym_db.RegisterEnumDescriptor(_ENTITYSCHEMA_PRIMITIVETYPE)
_BACKUP = _descriptor.Descriptor(
name='Backup',
full_name='apphosting.ext.datastore_admin.Backup',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='backup_info', full_name='apphosting.ext.datastore_admin.Backup.backup_info', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='kind_info', full_name='apphosting.ext.datastore_admin.Backup.kind_info', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=80,
serialized_end=220,
)
_BACKUPINFO = _descriptor.Descriptor(
name='BackupInfo',
full_name='apphosting.ext.datastore_admin.BackupInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='backup_name', full_name='apphosting.ext.datastore_admin.BackupInfo.backup_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='start_timestamp', full_name='apphosting.ext.datastore_admin.BackupInfo.start_timestamp', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='end_timestamp', full_name='apphosting.ext.datastore_admin.BackupInfo.end_timestamp', index=2,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=222,
serialized_end=303,
)
_KINDBACKUPINFO = _descriptor.Descriptor(
name='KindBackupInfo',
full_name='apphosting.ext.datastore_admin.KindBackupInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='kind', full_name='apphosting.ext.datastore_admin.KindBackupInfo.kind', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='file', full_name='apphosting.ext.datastore_admin.KindBackupInfo.file', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='entity_schema', full_name='apphosting.ext.datastore_admin.KindBackupInfo.entity_schema', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='is_partial', full_name='apphosting.ext.datastore_admin.KindBackupInfo.is_partial', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=306,
serialized_end=446,
)
_ENTITYSCHEMA_TYPE = _descriptor.Descriptor(
name='Type',
full_name='apphosting.ext.datastore_admin.EntitySchema.Type',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='is_list', full_name='apphosting.ext.datastore_admin.EntitySchema.Type.is_list', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='primitive_type', full_name='apphosting.ext.datastore_admin.EntitySchema.Type.primitive_type', index=1,
number=2, type=14, cpp_type=8, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='embedded_schema', full_name='apphosting.ext.datastore_admin.EntitySchema.Type.embedded_schema', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=547,
serialized_end=725,
)
_ENTITYSCHEMA_FIELD = _descriptor.Descriptor(
name='Field',
full_name='apphosting.ext.datastore_admin.EntitySchema.Field',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='apphosting.ext.datastore_admin.EntitySchema.Field.name', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='type', full_name='apphosting.ext.datastore_admin.EntitySchema.Field.type', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='field_name', full_name='apphosting.ext.datastore_admin.EntitySchema.Field.field_name', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=727,
serialized_end=833,
)
_ENTITYSCHEMA = _descriptor.Descriptor(
name='EntitySchema',
full_name='apphosting.ext.datastore_admin.EntitySchema',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='kind', full_name='apphosting.ext.datastore_admin.EntitySchema.kind', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='field', full_name='apphosting.ext.datastore_admin.EntitySchema.field', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_ENTITYSCHEMA_TYPE, _ENTITYSCHEMA_FIELD, ],
enum_types=[
_ENTITYSCHEMA_PRIMITIVETYPE,
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=449,
serialized_end=1105,
)
_BACKUP.fields_by_name['backup_info'].message_type = _BACKUPINFO
_BACKUP.fields_by_name['kind_info'].message_type = _KINDBACKUPINFO
_KINDBACKUPINFO.fields_by_name['entity_schema'].message_type = _ENTITYSCHEMA
_ENTITYSCHEMA_TYPE.fields_by_name['primitive_type'].enum_type = _ENTITYSCHEMA_PRIMITIVETYPE
_ENTITYSCHEMA_TYPE.fields_by_name['embedded_schema'].message_type = _ENTITYSCHEMA
_ENTITYSCHEMA_TYPE.containing_type = _ENTITYSCHEMA
_ENTITYSCHEMA_FIELD.fields_by_name['type'].message_type = _ENTITYSCHEMA_TYPE
_ENTITYSCHEMA_FIELD.containing_type = _ENTITYSCHEMA
_ENTITYSCHEMA.fields_by_name['field'].message_type = _ENTITYSCHEMA_FIELD
_ENTITYSCHEMA_PRIMITIVETYPE.containing_type = _ENTITYSCHEMA
DESCRIPTOR.message_types_by_name['Backup'] = _BACKUP
DESCRIPTOR.message_types_by_name['BackupInfo'] = _BACKUPINFO
DESCRIPTOR.message_types_by_name['KindBackupInfo'] = _KINDBACKUPINFO
DESCRIPTOR.message_types_by_name['EntitySchema'] = _ENTITYSCHEMA
Backup = _reflection.GeneratedProtocolMessageType('Backup', (_message.Message,), dict(
DESCRIPTOR = _BACKUP,
__module__ = 'google.appengine.ext.datastore_admin.backup_pb2'
))
_sym_db.RegisterMessage(Backup)
BackupInfo = _reflection.GeneratedProtocolMessageType('BackupInfo', (_message.Message,), dict(
DESCRIPTOR = _BACKUPINFO,
__module__ = 'google.appengine.ext.datastore_admin.backup_pb2'
))
_sym_db.RegisterMessage(BackupInfo)
KindBackupInfo = _reflection.GeneratedProtocolMessageType('KindBackupInfo', (_message.Message,), dict(
DESCRIPTOR = _KINDBACKUPINFO,
__module__ = 'google.appengine.ext.datastore_admin.backup_pb2'
))
_sym_db.RegisterMessage(KindBackupInfo)
EntitySchema = _reflection.GeneratedProtocolMessageType('EntitySchema', (_message.Message,), dict(
Type = _reflection.GeneratedProtocolMessageType('Type', (_message.Message,), dict(
DESCRIPTOR = _ENTITYSCHEMA_TYPE,
__module__ = 'google.appengine.ext.datastore_admin.backup_pb2'
))
,
Field = _reflection.GeneratedProtocolMessageType('Field', (_message.Message,), dict(
DESCRIPTOR = _ENTITYSCHEMA_FIELD,
__module__ = 'google.appengine.ext.datastore_admin.backup_pb2'
))
,
DESCRIPTOR = _ENTITYSCHEMA,
__module__ = 'google.appengine.ext.datastore_admin.backup_pb2'
))
_sym_db.RegisterMessage(EntitySchema)
_sym_db.RegisterMessage(EntitySchema.Type)
_sym_db.RegisterMessage(EntitySchema.Field)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\020\002 \002(\002B\014BackupProtos'))
| 37.376652 | 1,971 | 0.736755 |
836acb8a4b706f8933f3b1012b5068f029201a8e | 11,254 | py | Python | PSBChart_support.py | georgepruitt/PSBChart | ee31497ffb12f818bab7ec750425f9fc7259c0f8 | [
"Apache-2.0"
] | 1 | 2019-08-02T06:36:05.000Z | 2019-08-02T06:36:05.000Z | PSBChart_support.py | schkr/PSBChart | bf19c2632491f18ba6ee6b3337bcb118350b9b3e | [
"Apache-2.0"
] | 1 | 2018-02-07T21:20:43.000Z | 2018-02-07T21:20:43.000Z | PSBChart_support.py | schkr/PSBChart | bf19c2632491f18ba6ee6b3337bcb118350b9b3e | [
"Apache-2.0"
] | 1 | 2019-08-02T06:35:30.000Z | 2019-08-02T06:35:30.000Z | #! /usr/bin/env python
#
# Support module generated by PAGE version 4.10
# In conjunction with Tcl version 8.6
# Jan 12, 2018 04:09:34 PM
import turtle
from turtle import TurtleScreen, RawTurtle, TK
from tkinter.filedialog import askopenfilename
import tkinter as tk
import os.path
import datetime
import csv
import sys
from PSBChart import ManageTrades
try:
from Tkinter import *
except ImportError:
from tkinter import *
try:
import ttk
py3 = 0
except ImportError:
import tkinter.ttk as ttk
py3 = 1
d = list()
dt = list()
o = list()
h = list()
l = list()
c = list()
v = list()
oi = list()
tradeDate = list()
tradeVal1 = list()
tradeType = list()
tradeSize = list()
tradeNtryOrXit = list()
tradePrice = list()
highestHigh = 0
lowestLow = 99999999
root = tk.Tk()
#root.withdraw()
##s = tk.ScrollBar(root)
T = tk.Text(root,height=10,width=50)
##s.pack(side=tk.RIGHT, fill = tk.Y)
T.pack(side=tk.RIGHT, fill = tk.Y)
##s.config(command=T.yview)
##T.config(yscrollcommand.set)
if __name__ == '__main__':
import PSBChart
PSBChart.vp_start_gui()
| 34.521472 | 150 | 0.494846 |
55c1580b2b075823f72830e0bcd2511007db68b9 | 9,790 | py | Python | test/low_use_test/test_reporter.py | KeithWhitley/LUAU | d7df6836e7c9c0ddc4099b9a17f7e0727eeeb179 | [
"Apache-2.0"
] | 1 | 2020-10-16T13:02:36.000Z | 2020-10-16T13:02:36.000Z | test/low_use_test/test_reporter.py | KeithWhitley/LUAU | d7df6836e7c9c0ddc4099b9a17f7e0727eeeb179 | [
"Apache-2.0"
] | 3 | 2019-02-04T11:44:06.000Z | 2019-02-05T14:09:04.000Z | test/low_use_test/test_reporter.py | KeithWhitley/LUAU | d7df6836e7c9c0ddc4099b9a17f7e0727eeeb179 | [
"Apache-2.0"
] | 1 | 2021-05-26T12:00:06.000Z | 2021-05-26T12:00:06.000Z | import unittest
import boto3
from moto import mock_dynamodb2, mock_ec2
from low_use.reporter import LowUseReporter
from util.aws import EC2Wrapper, DynamoWrapper
import os
def test_get_creator_report(self):
self.reporter.low_use_instances = [
{
'Creator': 'test1',
'InstanceID': 'test_id_1'
},
{
'Creator': 'test2',
'InstanceID': 'test_id_2'
}
]
self.reporter.instances_scheduled_for_deletion = [
{
'Creator': 'test1',
'InstanceID': 'test_id_1_delete'
},
{
'Creator': 'test2',
'InstanceID': 'test_id_2_delete'
}
]
expected_creator_reports = [
{
'creator': 'test1',
'low_use': [{
'Creator': 'test1',
'InstanceID': 'test_id_1'
}],
'scheduled_for_deletion': [{
'Creator': 'test1',
'InstanceID': 'test_id_1_delete'
}]},
{
'creator': 'test2',
'low_use': [{
'Creator': 'test2',
'InstanceID': 'test_id_2'
}],
'scheduled_for_deletion': [{
'Creator': 'test2',
'InstanceID': 'test_id_2_delete'
}]}
]
result = list(self.reporter.get_creator_report())
self.assertCountEqual(expected_creator_reports, result)
def test_start(self):
pass
| 33.758621 | 82 | 0.544637 |
55c1a75a2d6e9fa1c5acdea024449b58927aff23 | 1,009 | py | Python | splot/tests/test_viz_libpysal_mpl.py | renanxcortes/splot | c29e9b5cc92be4c4deee0358c1f462b60b0fe9f7 | [
"BSD-3-Clause"
] | null | null | null | splot/tests/test_viz_libpysal_mpl.py | renanxcortes/splot | c29e9b5cc92be4c4deee0358c1f462b60b0fe9f7 | [
"BSD-3-Clause"
] | null | null | null | splot/tests/test_viz_libpysal_mpl.py | renanxcortes/splot | c29e9b5cc92be4c4deee0358c1f462b60b0fe9f7 | [
"BSD-3-Clause"
] | null | null | null | from libpysal.weights.contiguity import Queen
import libpysal
from libpysal import examples
import matplotlib.pyplot as plt
import geopandas as gpd
from splot.libpysal import plot_spatial_weights | 33.633333 | 86 | 0.737364 |
55c1a9520e720c583feab19a26044ebc037a17c8 | 17,245 | py | Python | dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/numba/tests/test_ir.py | BadDevCode/lumberyard | 3d688932f919dbf5821f0cb8a210ce24abe39e9e | [
"AML"
] | 1,738 | 2017-09-21T10:59:12.000Z | 2022-03-31T21:05:46.000Z | dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/numba/tests/test_ir.py | olivier-be/lumberyard | 3d688932f919dbf5821f0cb8a210ce24abe39e9e | [
"AML"
] | 427 | 2017-09-29T22:54:36.000Z | 2022-02-15T19:26:50.000Z | dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/numba/tests/test_ir.py | olivier-be/lumberyard | 3d688932f919dbf5821f0cb8a210ce24abe39e9e | [
"AML"
] | 671 | 2017-09-21T08:04:01.000Z | 2022-03-29T14:30:07.000Z | from __future__ import print_function
import numba.unittest_support as unittest
from numba import compiler, ir, objmode
import numpy as np
# used later
_GLOBAL = 1234
if __name__ == '__main__':
unittest.main()
| 36.458774 | 80 | 0.534126 |
55c37842e6305ac81f748c98ec0be9fc4a30c176 | 13,629 | py | Python | pyannote/audio/applications/base.py | Ruslanmlnkv/pyannote-audio | b678920057ace936c8900c62d2975e958903fae2 | [
"MIT"
] | 2 | 2018-10-25T19:32:27.000Z | 2021-06-19T15:14:16.000Z | pyannote/audio/applications/base.py | Ruslanmlnkv/pyannote-audio | b678920057ace936c8900c62d2975e958903fae2 | [
"MIT"
] | null | null | null | pyannote/audio/applications/base.py | Ruslanmlnkv/pyannote-audio | b678920057ace936c8900c62d2975e958903fae2 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# encoding: utf-8
# The MIT License (MIT)
# Copyright (c) 2017 CNRS
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# AUTHORS
# Herv BREDIN - http://herve.niderb.fr
import time
import yaml
from os.path import dirname, basename
import numpy as np
from tqdm import tqdm
from glob import glob
from pyannote.database import FileFinder
from pyannote.database import get_protocol
from pyannote.audio.util import mkdir_p
from sortedcontainers import SortedDict
import tensorboardX
from functools import partial
def load_model(self, epoch, train_dir=None):
"""Load pretrained model
Parameters
----------
epoch : int
Which epoch to load.
train_dir : str, optional
Path to train directory. Defaults to self.train_dir_.
"""
if train_dir is None:
train_dir = self.train_dir_
import torch
weights_pt = self.WEIGHTS_PT.format(
train_dir=train_dir, epoch=epoch)
self.model_.load_state_dict(torch.load(weights_pt))
return self.model_
def get_number_of_epochs(self, train_dir=None, return_first=False):
"""Get information about completed epochs
Parameters
----------
train_dir : str, optional
Training directory. Defaults to self.train_dir_
return_first : bool, optional
Defaults (False) to return number of epochs.
Set to True to also return index of first epoch.
"""
if train_dir is None:
train_dir = self.train_dir_
directory = self.WEIGHTS_PT.format(train_dir=train_dir, epoch=0)[:-7]
weights = sorted(glob(directory + '*[0-9][0-9][0-9][0-9].pt'))
if not weights:
number_of_epochs = 0
first_epoch = None
else:
number_of_epochs = int(basename(weights[-1])[:-3]) + 1
first_epoch = int(basename(weights[0])[:-3])
return (number_of_epochs, first_epoch) if return_first \
else number_of_epochs
| 38.176471 | 81 | 0.595935 |
55c46dbffcc8bf64a692ba3c182ecb46d711b58d | 9,359 | py | Python | cogs/games/checkers.py | itsVale/Vale.py | 6b3cac68d53e8d814ee969a959aae4de52beda80 | [
"MIT"
] | 14 | 2018-08-06T06:45:19.000Z | 2018-12-28T14:20:33.000Z | cogs/games/checkers.py | Mystic-Alchemy/Vale.py | b4cc964d34672444c65e2801a15f37d774c5e6e3 | [
"MIT"
] | 10 | 2018-10-06T10:52:08.000Z | 2018-12-28T14:21:47.000Z | cogs/games/checkers.py | Mystic-Alchemy/Vale.py | b4cc964d34672444c65e2801a15f37d774c5e6e3 | [
"MIT"
] | 13 | 2018-09-23T20:13:10.000Z | 2019-01-26T11:02:37.000Z | import itertools
import random
import re
import discord
from more_itertools import chunked, pairwise, sliced, spy
from .base import Status, TwoPlayerGameCog, TwoPlayerSession
from utils.misc import emoji_url
BLACK, WHITE = False, True
PIECES = BK_PIECE, WH_PIECE = 'bw'
KINGS = BK_KING, WH_KING = 'BW'
CHECKERS_BLACK_KING = '\N{HEAVY BLACK HEART}'
CHECKERS_WHITE_KING = '\N{BLUE HEART}'
CHECKERS_BLACK_LAST_MOVE = ''
CHECKERS_WHITE_LAST_MOVE = ''
_is_king = str.isupper
_STARTING_BOARD = [
' ', BK_PIECE, ' ', BK_PIECE, ' ', BK_PIECE, ' ', BK_PIECE,
BK_PIECE, ' ', BK_PIECE, ' ', BK_PIECE, ' ', BK_PIECE, ' ',
' ', BK_PIECE, ' ', BK_PIECE, ' ', BK_PIECE, ' ', BK_PIECE,
' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ',
' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ',
WH_PIECE, ' ', WH_PIECE, ' ', WH_PIECE, ' ', WH_PIECE, ' ',
' ', WH_PIECE, ' ', WH_PIECE, ' ', WH_PIECE, ' ', WH_PIECE,
WH_PIECE, ' ', WH_PIECE, ' ', WH_PIECE, ' ', WH_PIECE, ' ',
]
X = 'abcdefgh'
Y = '87654321'
_STARTING_BOARD = [' '] * 64
_STARTING_BOARD[_to_i(3, 4)] = BK_PIECE
_STARTING_BOARD[_to_i(4, 3)] = WH_PIECE
# Generate lookup table for moves
_MOVES = _make_dict(_moves)
_CAPTURES = _make_dict(_captures)
# Below is the game logic. If you just want to copy the board, Ignore this.
_VALID_MOVE_REGEX = re.compile(r'^([a-h][1-8]\s?)+', re.IGNORECASE)
_MESSAGES = {
Status.PLAYING: 'Your turn, {user}',
Status.END: '{user} wins!',
Status.QUIT: '{user} ragequitted.',
Status.TIMEOUT: '{user} ran out of time.',
}
def setup(bot):
bot.add_cog(Checkers(bot))
| 30.093248 | 96 | 0.567048 |
55c5a244138d1f9a3e5a9c72e37cf112606b9cae | 767 | py | Python | setup.py | Fohlen/yente | bcba9ef3f766fea115de7eb381d7ad1b385d8df8 | [
"MIT"
] | null | null | null | setup.py | Fohlen/yente | bcba9ef3f766fea115de7eb381d7ad1b385d8df8 | [
"MIT"
] | null | null | null | setup.py | Fohlen/yente | bcba9ef3f766fea115de7eb381d7ad1b385d8df8 | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
with open("README.md") as f:
long_description = f.read()
setup(
name="yente",
version="1.3.5",
url="https://opensanctions.org/docs/api/",
long_description=long_description,
long_description_content_type="text/markdown",
license="MIT",
author="OpenSanctions",
author_email="info@opensanctions.org",
packages=find_packages(exclude=["examples", "test"]),
namespace_packages=[],
extras_require={
"dev": [
"pip>=10.0.0",
"bump2version",
"wheel>=0.29.0",
"twine",
"mypy",
"pytest",
"pytest-cov",
"flake8>=2.6.0",
"black",
],
},
zip_safe=False,
)
| 23.242424 | 57 | 0.548892 |
55c74a48da6996ad1f49dfbcbd9bd447049566b8 | 451 | py | Python | python-pulseaudio-master/setup.py | rrbutani/SoundAndColor | 44992fa188c109a3b11b2df137b9272a0b6203d8 | [
"Unlicense"
] | null | null | null | python-pulseaudio-master/setup.py | rrbutani/SoundAndColor | 44992fa188c109a3b11b2df137b9272a0b6203d8 | [
"Unlicense"
] | null | null | null | python-pulseaudio-master/setup.py | rrbutani/SoundAndColor | 44992fa188c109a3b11b2df137b9272a0b6203d8 | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python
from distutils.core import setup
setup(name='libpulseaudio',
version='1.1',
description='simple libpulseaudio bindings',
author='Valodim',
author_email='valodim@mugenguild.com',
license='LGPL',
url='http://github.com/valodim/python-pulseaudio',
packages=['pulseaudio'],
provides=['libpulseaudio'],
download_url='http://datatomb.de/~valodim/libpulseaudio-1.1.tar.gz'
)
| 28.1875 | 73 | 0.662971 |
55c807db743e48332bd230ddf2d2f732bbf1c1d4 | 2,006 | py | Python | vectorization.py | creadal/articles-classifier | d7b7df5687e57da91fae2bb095f1617d729a00a2 | [
"MIT"
] | null | null | null | vectorization.py | creadal/articles-classifier | d7b7df5687e57da91fae2bb095f1617d729a00a2 | [
"MIT"
] | null | null | null | vectorization.py | creadal/articles-classifier | d7b7df5687e57da91fae2bb095f1617d729a00a2 | [
"MIT"
] | null | null | null | import codecs
import numpy as np
import random
categories = ['science', 'style', 'culture', 'life', 'economics', 'business', 'travel', 'forces', 'media', 'sport']
dict_file = codecs.open('processed/dictionary.txt', 'r', 'utf_8_sig')
dictionary = []
for line in dict_file:
line = line[: len(line) - 1]
dictionary.append(line)
train_file = codecs.open('news_train.txt', 'r', 'utf_8_sig')
input_vectors = []
outputs = []
for line in train_file:
label, name, content = line.split('\t')
vector = line2vec(name, dictionary)
output = [0]*10
output[categories.index(label)] = 1
input_vectors.append(vector)
outputs.append(output)
train_vectors_i = codecs.open('processed/train_vectors_input.txt', 'w+', 'utf_8_sig')
train_vectors_o = codecs.open('processed/train_vectors_outputs.txt', 'w+', 'utf_8_sig')
for i in input_vectors:
train_vectors_i.write(str(i) + '\n')
for i in outputs:
train_vectors_o.write(str(i) +'\n')
print('text processed') | 25.717949 | 164 | 0.565803 |
55c88b114fda250da3b41e3041303ef9275c30e5 | 4,734 | py | Python | data/spca/preprocess.py | energydatalab/mrs | f2088fd25594ff0c67faac89013c2f1c58942485 | [
"MIT"
] | null | null | null | data/spca/preprocess.py | energydatalab/mrs | f2088fd25594ff0c67faac89013c2f1c58942485 | [
"MIT"
] | null | null | null | data/spca/preprocess.py | energydatalab/mrs | f2088fd25594ff0c67faac89013c2f1c58942485 | [
"MIT"
] | null | null | null | # Built-in
import os
from glob import glob
# Libs
import numpy as np
from tqdm import tqdm
from natsort import natsorted
# Own modules
from data import data_utils
from mrs_utils import misc_utils, process_block
# Settings
DS_NAME = 'spca'
if __name__ == '__main__':
img_files = natsorted(glob(os.path.join(r'/home/wh145/data/caemo', '*RGB.jpg')))
np.random.seed(931004)
ps = 512
ol = 0
pd = 0
create_dataset(data_dir=r'/home/wh145/data/caemo',
save_dir=r'/home/wh145/data/caemo/ps_512_ol_0', patch_size=(ps, ps), pad=pd, overlap=ol, visualize=False, valid_percent=0.1)
# val = get_stats_pb(r'/media/ei-edl01/data/uab_datasets/spca/data/Original_Tiles')[0]
# data_utils.patches_to_hdf5(r'/hdd/mrs/spca', r'/hdd/mrs/spca/ps512_pd0_ol0_hdf5')
| 44.660377 | 145 | 0.667934 |
55c8ccd7b221f69f74c7f2b403781f9c5546f908 | 3,182 | py | Python | tests/test_json_util.py | okutane/yandex-taxi-testsuite | 7e2e3dd5a65869ecbf37bf3f79cba7bb4e782b0c | [
"MIT"
] | 128 | 2020-03-10T09:13:41.000Z | 2022-02-11T20:16:16.000Z | tests/test_json_util.py | okutane/yandex-taxi-testsuite | 7e2e3dd5a65869ecbf37bf3f79cba7bb4e782b0c | [
"MIT"
] | 3 | 2021-11-01T12:31:27.000Z | 2022-02-11T13:08:38.000Z | tests/test_json_util.py | okutane/yandex-taxi-testsuite | 7e2e3dd5a65869ecbf37bf3f79cba7bb4e782b0c | [
"MIT"
] | 22 | 2020-03-05T07:13:12.000Z | 2022-03-15T10:30:58.000Z | import dateutil
import pytest
from testsuite.plugins import mockserver
from testsuite.utils import json_util
NOW = dateutil.parser.parse('2019-09-19-13:04:00.000000')
MOCKSERVER_INFO = mockserver.MockserverInfo(
'localhost', 123, 'http://localhost:123/', None,
)
MOCKSERVER_SSL_INFO = mockserver.MockserverInfo(
'localhost',
456,
'https://localhost:456/',
mockserver.SslInfo('/some_dir/cert.cert', '/some_dir/cert.key'),
)
| 30.596154 | 79 | 0.511942 |
55c8ce13de36aa35d1ea8a967ade5c81bd88fbbc | 1,066 | py | Python | Level/__init__.py | PyRectangle/GreyRectangle | 21c19002f52563a096566e9166040815005b830b | [
"MIT"
] | 3 | 2017-09-28T16:53:09.000Z | 2018-03-18T20:01:41.000Z | Level/__init__.py | PyRectangle/GreyRectangle | 21c19002f52563a096566e9166040815005b830b | [
"MIT"
] | null | null | null | Level/__init__.py | PyRectangle/GreyRectangle | 21c19002f52563a096566e9166040815005b830b | [
"MIT"
] | null | null | null | from Level.Render import Render
from Level.Data import Data
from Constants import *
import os
| 26 | 61 | 0.562852 |
55cc899799689985629d17decc9d13ef5c737a0d | 1,252 | py | Python | preparedstatement.py | shgysk8zer0/pyutils | f7fa2ea7717740f05ea739d20cd8a21701835800 | [
"MIT"
] | null | null | null | preparedstatement.py | shgysk8zer0/pyutils | f7fa2ea7717740f05ea739d20cd8a21701835800 | [
"MIT"
] | null | null | null | preparedstatement.py | shgysk8zer0/pyutils | f7fa2ea7717740f05ea739d20cd8a21701835800 | [
"MIT"
] | null | null | null | import sqlite3
| 26.083333 | 77 | 0.610224 |
55cce6a5f51b48ac0a3f7fb58d81fade424bd086 | 2,787 | py | Python | python/communitymanager/lib/basicauthpolicy.py | OpenCIOC/communityrepo | 63199a7b620f5c08624e534faf771e5dd2243adb | [
"Apache-2.0"
] | 2 | 2016-01-25T14:40:44.000Z | 2018-01-31T04:30:23.000Z | python/communitymanager/lib/basicauthpolicy.py | OpenCIOC/communityrepo | 63199a7b620f5c08624e534faf771e5dd2243adb | [
"Apache-2.0"
] | 5 | 2018-02-07T20:16:49.000Z | 2021-12-13T19:41:43.000Z | python/communitymanager/lib/basicauthpolicy.py | OpenCIOC/communityrepo | 63199a7b620f5c08624e534faf771e5dd2243adb | [
"Apache-2.0"
] | 1 | 2018-02-07T20:37:52.000Z | 2018-02-07T20:37:52.000Z | # From the Pyramid Cookbook:
# http://pyramid-cookbook.readthedocs.org/en/latest/auth/basic.html
import binascii
import base64
from paste.httpheaders import AUTHORIZATION
from paste.httpheaders import WWW_AUTHENTICATE
from pyramid.security import Everyone
from pyramid.security import Authenticated
| 30.626374 | 81 | 0.657696 |
55cd25162b525efcbd0ec6570ea61ed0a8074922 | 4,709 | py | Python | eventsourcing/examples/searchabletimestamps/postgres.py | ParikhKadam/eventsourcing | 8d7f8d28c527d7df47a631b009b19b5fdb53740b | [
"BSD-3-Clause"
] | 107 | 2021-10-30T14:47:19.000Z | 2022-03-31T10:52:42.000Z | eventsourcing/examples/searchabletimestamps/postgres.py | ParikhKadam/eventsourcing | 8d7f8d28c527d7df47a631b009b19b5fdb53740b | [
"BSD-3-Clause"
] | 12 | 2021-11-02T05:52:42.000Z | 2022-03-08T14:49:09.000Z | eventsourcing/examples/searchabletimestamps/postgres.py | ParikhKadam/eventsourcing | 8d7f8d28c527d7df47a631b009b19b5fdb53740b | [
"BSD-3-Clause"
] | 8 | 2021-10-29T22:35:54.000Z | 2022-03-03T04:16:17.000Z | from datetime import datetime
from typing import Any, List, Optional, Sequence, Tuple, cast
from uuid import UUID
from eventsourcing.domain import Aggregate
from eventsourcing.examples.searchabletimestamps.persistence import (
SearchableTimestampsRecorder,
)
from eventsourcing.persistence import ApplicationRecorder, StoredEvent
from eventsourcing.postgres import (
Factory,
PostgresApplicationRecorder,
PostgresConnection,
PostgresCursor,
PostgresDatastore,
)
del Factory
| 36.789063 | 87 | 0.647484 |
55cdd7e5e8bf1de41967431dfc57603e40486db0 | 313 | py | Python | complete/01 - 10/Problem6/main.py | this-jacob/project-euler | 8f9e700e2875e84d081eade44fd2107db0a0ae12 | [
"MIT"
] | null | null | null | complete/01 - 10/Problem6/main.py | this-jacob/project-euler | 8f9e700e2875e84d081eade44fd2107db0a0ae12 | [
"MIT"
] | null | null | null | complete/01 - 10/Problem6/main.py | this-jacob/project-euler | 8f9e700e2875e84d081eade44fd2107db0a0ae12 | [
"MIT"
] | null | null | null |
if __name__ == '__main__':
main()
| 19.5625 | 54 | 0.533546 |
55ce2377676e46ea6ca7f0b0a8a26da468d757a5 | 1,861 | py | Python | Sudoko.py | abirbhattacharya82/Sudoko-Solver | 36ea15d16561fe5031548ed3f4c58757280117f6 | [
"MIT"
] | 1 | 2021-07-25T03:02:39.000Z | 2021-07-25T03:02:39.000Z | Sudoko.py | abirbhattacharya82/Sudoku-Solver | 36ea15d16561fe5031548ed3f4c58757280117f6 | [
"MIT"
] | null | null | null | Sudoko.py | abirbhattacharya82/Sudoku-Solver | 36ea15d16561fe5031548ed3f4c58757280117f6 | [
"MIT"
] | null | null | null | board=[]
enter_datas(board)
show(board)
solve(board)
print("\n\n")
show(board)
'''
Enter the Datas in a Row
7 8 0 4 0 0 1 2 0
Enter the Datas in a Row
6 0 0 0 7 5 0 0 9
Enter the Datas in a Row
0 0 0 6 0 1 0 7 8
Enter the Datas in a Row
0 0 7 0 4 0 2 6 0
Enter the Datas in a Row
0 0 1 0 5 0 9 3 0
Enter the Datas in a Row
9 0 4 0 6 0 0 0 5
Enter the Datas in a Row
0 7 0 3 0 0 0 1 2
Enter the Datas in a Row
1 2 0 0 0 7 4 0 0
Enter the Datas in a Row
0 4 9 2 0 6 0 0 7
''' | 23.2625 | 51 | 0.476088 |
55cf3d9d9f70b37e8b09330bf7dcbd0d8aeb3b5f | 2,341 | py | Python | gbkfit_web/utility/display_names.py | ADACS-Australia/ADACS-GBKFIT | 20c7cafcabb6e75d8c287df06efb43113fdabd25 | [
"MIT"
] | null | null | null | gbkfit_web/utility/display_names.py | ADACS-Australia/ADACS-GBKFIT | 20c7cafcabb6e75d8c287df06efb43113fdabd25 | [
"MIT"
] | null | null | null | gbkfit_web/utility/display_names.py | ADACS-Australia/ADACS-GBKFIT | 20c7cafcabb6e75d8c287df06efb43113fdabd25 | [
"MIT"
] | null | null | null | """
Distributed under the MIT License. See LICENSE.txt for more info.
"""
# VARIABLES of this file must be unique
from django_hpc_job_controller.client.scheduler.status import JobStatus
# Dictionary to map names and corresponding display names (for UI)
DISPLAY_NAME_MAP = dict()
DISPLAY_NAME_MAP_HPC_JOB = dict()
# Job Status
NONE = 'none'
NONE_DISPLAY = 'None'
DRAFT = 'draft'
DRAFT_DISPLAY = 'Draft'
PENDING = 'pending'
PENDING_DISPLAY = 'Pending'
SUBMITTING = 'submitting'
SUBMITTING_DISPLAY = 'Submitting'
SUBMITTED = 'submitted'
SUBMITTED_DISPLAY = 'Submitted'
QUEUED = 'queued'
QUEUED_DISPLAY = 'Queued'
IN_PROGRESS = 'in_progress'
IN_PROGRESS_DISPLAY = 'In Progress'
CANCELLING = 'cancelling'
CANCELLING_DISPLAY = 'Cancelling'
CANCELLED = 'cancelled'
CANCELLED_DISPLAY = 'Cancelled'
ERROR = 'error'
ERROR_DISPLAY = 'Error'
WALL_TIME_EXCEEDED = 'wall_time_exceeded'
WALL_TIME_EXCEEDED_DISPLAY = 'Wall Time Exceeded'
OUT_OF_MEMORY = 'out_of_memory'
OUT_OF_MEMORY_DISPLAY = 'Out of Memory'
COMPLETED = 'completed'
COMPLETED_DISPLAY = 'Completed'
SAVED = 'saved'
SAVED_DISPLAY = 'Saved'
DELETING = 'deleting'
DELETING_DISPLAY = 'Deleting'
DELETED = 'deleted'
DELETED_DISPLAY = 'Deleted'
PUBLIC = 'public'
PUBLIC_DISPLAY = 'Public'
DISPLAY_NAME_MAP.update({
DRAFT: DRAFT_DISPLAY,
PENDING: PENDING_DISPLAY,
SUBMITTING: SUBMITTING_DISPLAY,
SUBMITTED: SUBMITTED_DISPLAY,
QUEUED: QUEUED_DISPLAY,
IN_PROGRESS: IN_PROGRESS_DISPLAY,
CANCELLING: CANCELLING_DISPLAY,
CANCELLED: CANCELLED_DISPLAY,
ERROR: ERROR_DISPLAY,
WALL_TIME_EXCEEDED: WALL_TIME_EXCEEDED_DISPLAY,
OUT_OF_MEMORY: OUT_OF_MEMORY_DISPLAY,
COMPLETED: COMPLETED_DISPLAY,
SAVED: SAVED_DISPLAY,
DELETING: DELETING_DISPLAY,
DELETED: DELETED_DISPLAY,
PUBLIC: PUBLIC_DISPLAY,
})
DISPLAY_NAME_MAP_HPC_JOB.update({
JobStatus.DRAFT: DRAFT,
JobStatus.PENDING: PENDING,
JobStatus.SUBMITTING: SUBMITTING,
JobStatus.SUBMITTED: SUBMITTED,
JobStatus.QUEUED: QUEUED,
JobStatus.RUNNING: IN_PROGRESS,
JobStatus.CANCELLING: CANCELLING,
JobStatus.CANCELLED: CANCELLED,
JobStatus.ERROR: ERROR,
JobStatus.WALL_TIME_EXCEEDED: WALL_TIME_EXCEEDED,
JobStatus.OUT_OF_MEMORY: OUT_OF_MEMORY,
JobStatus.DELETING: DELETING,
JobStatus.DELETED: DELETED,
JobStatus.COMPLETED: COMPLETED,
})
| 27.541176 | 71 | 0.76463 |
55d01698d8da5e9ff89aaf1c3a856cf2b9f42f2c | 5,227 | py | Python | heap/heap.py | xyycha/data-struct | 0a0d46bf6666681be2e4d5a2664b333dd9fb3a95 | [
"Apache-2.0"
] | 4 | 2020-03-10T07:45:44.000Z | 2020-03-12T02:00:32.000Z | heap/heap.py | xyycha/data-struct | 0a0d46bf6666681be2e4d5a2664b333dd9fb3a95 | [
"Apache-2.0"
] | 1 | 2020-03-14T01:32:19.000Z | 2020-03-14T03:06:34.000Z | heap/heap.py | xyycha/data-struct | 0a0d46bf6666681be2e4d5a2664b333dd9fb3a95 | [
"Apache-2.0"
] | null | null | null | # -*- encoding: utf-8 -*-
import random
from graphviz import Digraph
def test1():
h = Heap(cap=20)
for i in range(20):
value = random.randint(0, 100)
info = {"value": value, "key": str(value)}
node = HeapNode(value=value, info=info)
h.insert(node=node)
h.show(file_name="")
h.pop()
h.show(file_name="pop")
h.pop()
h.show(file_name="pop")
h.pop()
h.show(file_name="pop")
def test2():
node_list = []
pre_res = []
for i in range(20):
value = random.randint(0, 100)
pre_res.append(value)
info = {"value": value, "key": str(value)}
node = HeapNode(value=value, info=info)
node_list.append(node)
print(pre_res)
h = Heap(cap=20)
h.build_heap(node_list)
h.show(file_name="")
print("end")
if __name__ == "__main__":
test2()
| 32.067485 | 168 | 0.575091 |
55d1d3ad368bdd500bd5c9d98aeb00a9d5dd603d | 1,899 | py | Python | python/federatedml/param/encrypted_mode_calculation_param.py | QuantumA/FATE | 89a3dd593252128c1bf86fb1014b25a629bdb31a | [
"Apache-2.0"
] | 1 | 2022-02-07T06:23:15.000Z | 2022-02-07T06:23:15.000Z | python/federatedml/param/encrypted_mode_calculation_param.py | JavaGreenHands/FATE | ea1e94b6be50c70c354d1861093187e523af32f2 | [
"Apache-2.0"
] | 11 | 2020-10-09T09:53:50.000Z | 2021-12-06T16:14:51.000Z | python/federatedml/param/encrypted_mode_calculation_param.py | JavaGreenHands/FATE | ea1e94b6be50c70c354d1861093187e523af32f2 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from federatedml.param.base_param import BaseParam
| 35.830189 | 120 | 0.646656 |
55d2968cb14aa637fc9c4bccc7dba59fba67c074 | 5,832 | py | Python | optimization/solution.py | silx-kit/silx-training | 1e24d4fe383263e3466c029073190ed8bb70bb1f | [
"CC-BY-4.0"
] | 7 | 2017-05-02T10:03:12.000Z | 2021-06-28T14:11:32.000Z | optimization/solution.py | silx-kit/silx-training | 1e24d4fe383263e3466c029073190ed8bb70bb1f | [
"CC-BY-4.0"
] | 23 | 2016-11-21T17:55:11.000Z | 2021-11-24T13:43:13.000Z | optimization/solution.py | silx-kit/silx-training | 1e24d4fe383263e3466c029073190ed8bb70bb1f | [
"CC-BY-4.0"
] | 13 | 2016-11-17T10:47:22.000Z | 2022-02-07T09:38:47.000Z | """Solution of the exercises of Optimization of compute bound Python code"""
import math
import cmath
import numpy as np
import numexpr as ne
import numba as nb
# Needed here since it is used as global variables
# Maximum strain at surface
e0 = 0.01
# Width of the strain profile below the surface
w = 5.0
# Python: Circular crystal ###
# Alternative using Python `sum`
# Python: Circular strained crystal ###
# Alternative computing list of strained position
# numpy ###
# numexpr ###
# numba ###
| 32.4 | 98 | 0.477195 |
55d367bc88c080acffb11c453ca1f70ffffc2a4c | 9,300 | py | Python | examples/SSTDemoWeightedClauses_Interpret.py | jivitesh-sharma/Drop-Clause-Interpretable-TM | 4fb4d4be0f24a0c30f13fbcca974390889d7fe84 | [
"MIT"
] | null | null | null | examples/SSTDemoWeightedClauses_Interpret.py | jivitesh-sharma/Drop-Clause-Interpretable-TM | 4fb4d4be0f24a0c30f13fbcca974390889d7fe84 | [
"MIT"
] | null | null | null | examples/SSTDemoWeightedClauses_Interpret.py | jivitesh-sharma/Drop-Clause-Interpretable-TM | 4fb4d4be0f24a0c30f13fbcca974390889d7fe84 | [
"MIT"
] | null | null | null | import re
import string
import nltk
from nltk.tokenize import word_tokenize
from string import punctuation
from nltk.corpus import stopwords
nltk.download('punkt')
nltk.download('stopwords')
import pandas as pd
from nltk.stem import PorterStemmer
from nltk import FreqDist
from nltk.tokenize import RegexpTokenizer
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn import metrics
from PyTsetlinMachineCUDA.tm import MultiClassTsetlinMachine
nltk.download('wordnet')
from time import time
stop_words = set(stopwords.words('english'))
tokenizerR = RegexpTokenizer(r'\w+')
from numpy import save
from nltk.stem import WordNetLemmatizer
stop_words = set(stopwords.words('english'))
alpha = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']
from argparse import ArgumentParser
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
parser = ArgumentParser()
parser.add_argument('-interpret', type=bool, default=False)
parser.add_argument('-n_clauses_per_class', type=int, default=5000)
parser.add_argument('-s', type=float, default=5.0)
parser.add_argument('-T', type=int, default=80)
parser.add_argument('-drop_clause', type=float, default=0.0)
parser.add_argument('-state_bits', type=int, default=8)
parser.add_argument('-features', type=int, default=7500)
parser.add_argument('-gpus', type=int, default=1)
parser.add_argument('-stop_train', type=int, default=250)
config = parser.parse_args()
col_list = ["text", "label"]
df = pd.read_csv('sst2.csv')
label = df.iloc[:,0:1].values
textOrig = df.iloc[:,1:2].values
y = np.reshape(label, len(label))
print(textOrig.shape)
input_text = prepreocess(textOrig)
inputtext = []
for i in input_text:
ps = PorterStemmer()
temp4 = []
for m in i:
temp_temp =ps.stem(m)
temp4.append(temp_temp)
inputtext.append(temp4)
newVocab =[]
for i in inputtext:
for j in i:
newVocab.append(j)
print(len(newVocab))
fdist1 = FreqDist(newVocab)
tokens1 = fdist1.most_common(config.features)
full_token_fil = []
for i in tokens1:
full_token_fil.append(i[0])
sum1 = 0
for j in tokens1:
sum1 += j[1]
print('sum1', sum1)
vocab_unique = full_token_fil
vocab = np.asarray(full_token_fil)
np.savetxt('sst_vocab.csv', vocab, delimiter=',', fmt='%s')
X_text = binarization_text(inputtext)
print("Text length:", X_text.shape)
tt = 6920
X_train = X_text[0:tt,:]
print("X_train length:", X_train.shape)
X_test = X_text[tt:,:]
print("X_test length:", X_test.shape)
ytrain = y[0:tt]
ytest = y[tt:]
print(ytest.shape)
X_dev = X_text[tt:,:]
Y_dev = y[tt:]
tm1 = MultiClassTsetlinMachine(config.n_clauses_per_class*2, config.T*16, config.s, clause_drop_p=config.drop_clause, number_of_gpus=config.gpus, number_of_state_bits=config.state_bits)
f = open("sst_weighted_%.1f_%d_%d_%.2f_%d_aug.txt" % (s, clauses, T, drop_clause, number_of_state_bits), "w+")
r_25 = 0
r_50 = 0
max = 0.0
for i in range(config.stop_train):
start_training = time()
tm1.fit(X_train, ytrain, epochs=1, incremental=True)
stop_training = time()
start_testing = time()
result2 = 100*(tm1.predict(X_train) == ytrain).mean()
result1 = 100*(tm1.predict(X_test) == ytest).mean()
#result1 = 0
stop_testing = time()
if result1 > max:
max = result1
if i >= 350:
r_50+=result1
if i >= 375:
r_25+=result1
print("#%d AccuracyTrain: %.2f%% AccuracyTest: %.2f%% Training: %.2fs Testing: %.2fs" % (i+1, result2, result1, stop_training-start_training, stop_testing-start_testing), file=f)
print("Average Accuracy last 25 epochs: %.2f \n" %(r_25/25), file=f)
print("Average Accuracy last 50 epochs: %.2f \n" %(r_50/50), file=f)
print("Max Accuracy: %.2f \n" %(max), file=f)
if config.interpret:
print('predicted Class: ', tm1.predict(X_train[4245:4246,:]))
triggClause = tm1.transform(X_train[4245:4246,:])
clauseIndex = []
for i in range(len(triggClause[0])):
if triggClause[0][i] ==1:
clauseIndex.append(i)
import nltk
from nltk.probability import FreqDist
originalFeatures = []
negatedFeatures = []
number_of_features = 1000
for j in range(0, 1500, 2):
#print("Clause #%d (%d): " % (j, tm1.get_weight(1, j)), end=' ')
l = []
for k in range(number_of_features*2):
if tm1.ta_action(0, j, k) == 1:
if k < number_of_features:
l.append(" x%d" % (k))
originalFeatures.append(k)
else:
l.append("x%d" % (k-number_of_features))
negatedFeatures.append(k-number_of_features)
#print(" ".join(l))
fdist1 = FreqDist(negatedFeatures)
negatedWords = fdist1.most_common(200)
fdist2 = FreqDist(originalFeatures)
originalWords = fdist2.most_common(20)
print('full original word')
fulloriginalword=[]
for i in originalWords:
fulloriginalword.append(i[0])
fullnegatedword =[]
print('full negated word')
for i in negatedWords:
fullnegatedword.append(i[0])
originalFeatures2 = []
negatedFeatures2= []
for j in clauseIndex:
if j < 1500 and j%2==0:
#print("Clause #%d (%d): " % (j, tm1.get_weight(1, j)), end=' ')
l = []
for k in range(number_of_features*2):
if tm1.ta_action(0, j, k) == 1:
if k < number_of_features:
l.append(" x%d" % (k))
originalFeatures2.append(k)
else:
l.append("x%d" % (k-number_of_features))
negatedFeatures2.append(k-number_of_features)
fdist3 = FreqDist(negatedFeatures2)
negatedWords2 = fdist3.most_common(100)
fdist4 = FreqDist(originalFeatures2)
originalWords2 = fdist4.most_common(10)
neededoriginalword =[]
print('needed original word')
for i in originalWords2:
neededoriginalword.append(i[0])
needednegatedword =[]
print('needed negated word')
for i in negatedWords2:
needednegatedword.append(i[0])
#Save fulloriginalword, fullnegatedword, neededoriginalword, or needednegatedword (Preferred needednegatedword for interpretability)
interpretList = np.asarray(needednegatedword)
np.savetxt('interpretFile.csv', interpretList, fmt='%s')
df = pd.read_csv('interpretFile.csv', dtype=str, header=None)
df1 = df.iloc[:,:]
full1 = df.iloc[:,:].values
#full1= np.reshape(full1,(10,20))
index = np.arange(100)
letter2num = {}
for i in range(len(index)):
letter2num[full1[i][0]] =i
print(letter2num)
df2 = pd.DataFrame(np.array( [letter2num[i] for i in df1.values.flat] ).reshape(df1.shape))
print(df2)
colors = ["white"] # use hex colors here, if desired.
cmap = ListedColormap(colors)
full2 = df.iloc[:,:].values
full2= np.reshape(full2,(10,10))
full3 = df2.iloc[:,:].values
full3= np.reshape(full3,(10,10))
fig, ax = plt.subplots()
ax.imshow(full3,cmap='YlOrBr_r')
for i in range(len(full2)):
for j in range(10):
ax.text(j,i, full2[i,j], ha="center", va="center")
plt.axis('off')
ax.set_aspect(0.3)
plt.grid(True)
plt.show() | 30.097087 | 186 | 0.576882 |
55d3a610da3467d16c45533e5d12b2a9f0ad38ba | 1,457 | py | Python | adbc/zql/builders/core.py | aleontiev/apg | c6a10a9b0a576913c63ed4f093e2a0fa7469af87 | [
"MIT"
] | 2 | 2020-07-17T16:33:42.000Z | 2020-07-21T04:48:38.000Z | adbc/zql/builders/core.py | aleontiev/apg | c6a10a9b0a576913c63ed4f093e2a0fa7469af87 | [
"MIT"
] | null | null | null | adbc/zql/builders/core.py | aleontiev/apg | c6a10a9b0a576913c63ed4f093e2a0fa7469af87 | [
"MIT"
] | null | null | null | from adbc.zql.validator import Validator
| 18.922078 | 52 | 0.315031 |
55d3b92efdbe3c9a4d84e47ec3fda8ecb4588bca | 426 | py | Python | setup.py | InTheMorning/python-bme280 | 47af2784c937bed429d8986b5205b495e03d74f2 | [
"MIT"
] | null | null | null | setup.py | InTheMorning/python-bme280 | 47af2784c937bed429d8986b5205b495e03d74f2 | [
"MIT"
] | null | null | null | setup.py | InTheMorning/python-bme280 | 47af2784c937bed429d8986b5205b495e03d74f2 | [
"MIT"
] | null | null | null | from setuptools import setup
setup(name='bme280',
version='1.0.0',
packages=['bme280'],
install_requires=['smbus2'],
python_requires='>=2.7',
url='https://dev.mycrobase.de/gitea/cn/python-bme280',
author='Christian Nicolai',
description='A python library for accessing the BME280 combined humidity and pressure sensor from Bosch.',
long_description=open('README.md').read())
| 30.428571 | 112 | 0.671362 |
55d3d277d3db0f3730f055eade9ab037ac954a49 | 1,190 | py | Python | List/learnlist.py | shahasifbashir/LearnPython | 4ce6b81d66ea7bbf0a40427871daa4e563b6a184 | [
"MIT"
] | null | null | null | List/learnlist.py | shahasifbashir/LearnPython | 4ce6b81d66ea7bbf0a40427871daa4e563b6a184 | [
"MIT"
] | null | null | null | List/learnlist.py | shahasifbashir/LearnPython | 4ce6b81d66ea7bbf0a40427871daa4e563b6a184 | [
"MIT"
] | null | null | null | # A simple list
myList = [10,20,4,5,6,2,9,10,2,3,34,14]
#print the whole list
print("The List is {}".format(myList))
# printing elemts of the list one by one
print("printing elemts of the list one by one")
for elements in myList:
print(elements)
print("")
#printing elements that are greater than 10 only
print("printing elements that are greater than 10 only")
for elements in myList:
if(elements>10):
print(elements)
#printing elements that are greater that 10 but by using a list and appending the elements on it
newList = []
for elements in myList:
if(elements <10):
newList.append(elements)
print("")
print("Print the new List \n{}".format(newList))
#print the above list part using a single line
print(" The list is {}".format([item for item in myList if item < 10]))
# here [item { This is the out put} for item { the is the for part} in myList {This Is the input list} if item <10 {This is the condition}]
#Ask the user for an input and print the elemets of list less than that number
print("Input a number : ")
num = int(input())
print(" The elemnts of the list less that {} are {}".format(num,[item for item in myList if item < num]))
| 25.869565 | 139 | 0.696639 |
55d68de8c22f2deefdb481f4a73d47295a2e3b27 | 870 | py | Python | pmapi/app.py | jbushman/primemirror-api | 4844d57b5581a2d537996c77eec65956ef5f1dc9 | [
"Apache-2.0"
] | null | null | null | pmapi/app.py | jbushman/primemirror-api | 4844d57b5581a2d537996c77eec65956ef5f1dc9 | [
"Apache-2.0"
] | null | null | null | pmapi/app.py | jbushman/primemirror-api | 4844d57b5581a2d537996c77eec65956ef5f1dc9 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3
from pmapi.config import Config, get_logger
import os
import logging
import requests
import connexion
from flask import Flask, request
logger = get_logger()
# if not Config.TOKEN:
# data = {
# "hostname": Config.HOSTNAME,
# "ip": Config.IP,
# "state": Config.STATE,
# "url": Config.URL,
# "service_type": Config.SERVICE_TYPE,
# "roles": "'service', 'primemirror'",
# }
# logging.info("Registering Service: ".format(data))
# r = requests.post("{}/register/service".format(Config.DEPLOYMENT_API_URI), json=data, verify=False)
# resp = r.json()
# if "TOKEN" in resp:
# update_env("TOKEN", resp["TOKEN"])
flask_app = connexion.FlaskApp(__name__)
flask_app.add_api("openapi.yaml", validate_responses=True, strict_validation=True)
app = flask_app.app
app.config.from_object(Config)
| 24.857143 | 104 | 0.670115 |
55d7d78c6937d21c0eddc062cc73761c958ba202 | 1,175 | py | Python | python/setup.py | chrisdembia/StateMint | 53fdaabc7ba83fb477523ae9b79ccc964e791080 | [
"BSD-3-Clause"
] | null | null | null | python/setup.py | chrisdembia/StateMint | 53fdaabc7ba83fb477523ae9b79ccc964e791080 | [
"BSD-3-Clause"
] | null | null | null | python/setup.py | chrisdembia/StateMint | 53fdaabc7ba83fb477523ae9b79ccc964e791080 | [
"BSD-3-Clause"
] | null | null | null | import setuptools
with open('README.md') as f:
long_description=f.read()
setuptools.setup(
name="StateMint",
version="1.0.0",
author="Cameron Devine",
author_email="camdev@uw.edu",
description="A library for finding State Space models of dynamical systems.",
long_description=long_description,
long_description_content_type='text/markdown',
url="https://github.com/CameronDevine/StateMint",
packages=setuptools.find_packages(),
python_requires=">=2.7",
install_requires=("sympy>=0.7.3",),
classifiers=(
"Development Status :: 4 - Beta",
"Framework :: Jupyter",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: BSD License",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.0",
"Programming Language :: Python :: 3.1",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Operating System :: OS Independent",
),
)
| 31.756757 | 78 | 0.691064 |
55d8e1c6fdbebec334001ecd1716470ce185570d | 1,001 | py | Python | cha_bebe/presente/migrations/0001_initial.py | intelektos/Cha_bebe | 23df4af3901413c9c50e73bd305ade165c81001b | [
"MIT"
] | null | null | null | cha_bebe/presente/migrations/0001_initial.py | intelektos/Cha_bebe | 23df4af3901413c9c50e73bd305ade165c81001b | [
"MIT"
] | 9 | 2020-06-08T03:31:08.000Z | 2022-01-13T02:44:42.000Z | cha_bebe/presente/migrations/0001_initial.py | intelektos/Cha_bebe | 23df4af3901413c9c50e73bd305ade165c81001b | [
"MIT"
] | 1 | 2020-06-01T17:43:20.000Z | 2020-06-01T17:43:20.000Z | # Generated by Django 3.0.6 on 2020-05-14 18:13
from django.db import migrations, models
| 33.366667 | 114 | 0.566434 |
55da18f8f5bba77168080eaa5260eeadfe4bb7f4 | 2,376 | py | Python | src/rekognition_online_action_detection/models/feature_head.py | amazon-research/long-short-term-transformer | a425be4b52ab68fddd85c91d26571e4cdfe8379a | [
"Apache-2.0"
] | 52 | 2021-11-19T01:35:10.000Z | 2022-03-24T11:48:10.000Z | src/rekognition_online_action_detection/models/feature_head.py | amazon-research/long-short-term-transformer | a425be4b52ab68fddd85c91d26571e4cdfe8379a | [
"Apache-2.0"
] | 9 | 2021-11-24T18:50:13.000Z | 2022-03-10T05:13:53.000Z | src/rekognition_online_action_detection/models/feature_head.py | amazon-research/long-short-term-transformer | a425be4b52ab68fddd85c91d26571e4cdfe8379a | [
"Apache-2.0"
] | 8 | 2022-01-15T08:01:33.000Z | 2022-03-20T22:08:29.000Z | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
__all__ = ['build_feature_head']
import torch
import torch.nn as nn
from rekognition_online_action_detection.utils.registry import Registry
FEATURE_HEADS = Registry()
FEATURE_SIZES = {
'rgb_anet_resnet50': 2048,
'flow_anet_resnet50': 2048,
'rgb_kinetics_bninception': 1024,
'flow_kinetics_bninception': 1024,
'rgb_kinetics_resnet50': 2048,
'flow_kinetics_resnet50': 2048,
}
| 33.942857 | 83 | 0.673822 |
55dae12ae7fedf07888052fca21d9aabf3ce95df | 1,367 | py | Python | main.py | klarman-cell-observatory/cirrocumulus-app-engine | 52997ae790773364591ab8d7c747e4505700373b | [
"BSD-3-Clause"
] | null | null | null | main.py | klarman-cell-observatory/cirrocumulus-app-engine | 52997ae790773364591ab8d7c747e4505700373b | [
"BSD-3-Clause"
] | 1 | 2021-04-13T14:52:39.000Z | 2021-04-13T15:53:34.000Z | main.py | klarman-cell-observatory/cirrocumulus-app-engine | 52997ae790773364591ab8d7c747e4505700373b | [
"BSD-3-Clause"
] | null | null | null | import os
import sys
sys.path.append('lib')
from flask import Flask, send_from_directory
import cirrocumulus
from cirrocumulus.cloud_firestore_native import CloudFireStoreNative
from cirrocumulus.api import blueprint
from cirrocumulus.envir import CIRRO_AUTH_CLIENT_ID, CIRRO_AUTH, CIRRO_DATABASE, CIRRO_DATASET_PROVIDERS
from cirrocumulus.google_auth import GoogleAuth
from cirrocumulus.no_auth import NoAuth
from cirrocumulus.util import add_dataset_providers
client_path = os.path.join(cirrocumulus.__path__[0], 'client')
# If `entrypoint` is not defined in app.yaml, App Engine will look for an app
# called `app` in `main.py`.
app = Flask(__name__, static_folder=client_path, static_url_path='')
app.register_blueprint(blueprint, url_prefix='/api')
if os.environ.get(CIRRO_AUTH_CLIENT_ID) is not None:
app.config[CIRRO_AUTH] = GoogleAuth(os.environ.get(CIRRO_AUTH_CLIENT_ID))
else:
app.config[CIRRO_AUTH] = NoAuth()
app.config[CIRRO_DATABASE] = CloudFireStoreNative()
os.environ[CIRRO_DATASET_PROVIDERS] = ','.join(['cirrocumulus.zarr_dataset.ZarrDataset',
'cirrocumulus.parquet_dataset.ParquetDataset'])
add_dataset_providers()
if __name__ == '__main__':
app.run(host='127.0.0.1', port=5000, debug=True)
| 33.341463 | 104 | 0.766642 |
55db43f69d53783216fd36c9fb7e70e68c557460 | 823 | py | Python | utils/load_externals.py | uvasrg/FeatureSqueezing | 8448fbff07bf03ff81a52dbd7e014d5733035f56 | [
"MIT"
] | 56 | 2017-05-19T23:30:13.000Z | 2021-11-16T09:15:48.000Z | utils/load_externals.py | pengpengqiao/FeatureSqueezing | 5ca04dc704dda578df53f5234f4dabbfc3e3ec62 | [
"MIT"
] | 1 | 2018-03-12T03:47:45.000Z | 2018-03-12T03:47:45.000Z | utils/load_externals.py | pengpengqiao/FeatureSqueezing | 5ca04dc704dda578df53f5234f4dabbfc3e3ec62 | [
"MIT"
] | 19 | 2017-06-11T08:33:19.000Z | 2022-01-03T09:46:44.000Z | import sys, os
external_libs = {'Cleverhans v1.0.0': "externals/cleverhans",
'Tensorflow-Model-Resnet': "externals/tensorflow-models",
}
project_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
for lib_name, lib_path in external_libs.iteritems():
lib_path = os.path.join(project_path, lib_path)
if os.listdir(lib_path) == []:
cmd = "git submodule update --init --recursive"
print("Fetching external libraries...")
os.system(cmd)
if lib_name == 'Tensorflow-Model-Resnet':
lib_token_fpath = os.path.join(lib_path, 'resnet', '__init__.py')
if not os.path.isfile(lib_token_fpath):
open(lib_token_fpath, 'a').close()
sys.path.append(lib_path)
print("Located %s" % lib_name)
# print (sys.path)
| 32.92 | 75 | 0.64277 |
55dc16af3929e96db5e96a0d381158d79e762fbd | 2,333 | py | Python | research/seq_flow_lite/utils/misc_utils.py | hjkim-haga/TF-OD-API | 22ac477ff4dfb93fe7a32c94b5f0b1e74330902b | [
"Apache-2.0"
] | null | null | null | research/seq_flow_lite/utils/misc_utils.py | hjkim-haga/TF-OD-API | 22ac477ff4dfb93fe7a32c94b5f0b1e74330902b | [
"Apache-2.0"
] | null | null | null | research/seq_flow_lite/utils/misc_utils.py | hjkim-haga/TF-OD-API | 22ac477ff4dfb93fe7a32c94b5f0b1e74330902b | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Lint as: python3
"""A module for miscelaneous utils."""
import tensorflow as tf
def random_substr(str_tensor, max_words):
"""Select random substring if the input has more than max_words."""
word_batch_r = tf.strings.split(str_tensor)
row_splits = word_batch_r.row_splits
words = word_batch_r.values
start_idx = row_splits[:-1]
end_idx = row_splits[1:]
words_per_example = end_idx - start_idx
ones = tf.ones_like(end_idx)
max_val = tf.maximum(ones, words_per_example - max_words)
max_words_batch = tf.reduce_max(words_per_example)
rnd = tf.random.uniform(
tf.shape(start_idx), minval=0, maxval=max_words_batch, dtype=tf.int64)
off_start_idx = tf.math.floormod(rnd, max_val)
new_words_per_example = tf.where(
tf.equal(max_val, 1), words_per_example, ones * max_words)
new_start_idx = start_idx + off_start_idx
new_end_idx = new_start_idx + new_words_per_example
indices = tf.expand_dims(tf.range(tf.size(words), dtype=tf.int64), axis=0)
within_limit = tf.logical_and(
tf.greater_equal(indices, tf.expand_dims(new_start_idx, axis=1)),
tf.less(indices, tf.expand_dims(new_end_idx, axis=1)))
keep_indices = tf.reduce_any(within_limit, axis=0)
keep_indices = tf.cast(keep_indices, dtype=tf.int32)
_, selected_words = tf.dynamic_partition(words, keep_indices, 2)
row_splits = tf.math.cumsum(new_words_per_example)
row_splits = tf.concat([[0], row_splits], axis=0)
new_tensor = tf.RaggedTensor.from_row_splits(
values=selected_words, row_splits=row_splits)
return tf.strings.reduce_join(new_tensor, axis=1, separator=" ")
| 46.66 | 81 | 0.714102 |
55dc932db8d55326783afe7c9ef113e659643f67 | 2,503 | py | Python | parc/pra__/incomplete_13910.py | KwanHoo/Data-Structure__Algorithm | b985f8b41a366b9c028da711ea43a643151268e2 | [
"MIT"
] | null | null | null | parc/pra__/incomplete_13910.py | KwanHoo/Data-Structure__Algorithm | b985f8b41a366b9c028da711ea43a643151268e2 | [
"MIT"
] | null | null | null | parc/pra__/incomplete_13910.py | KwanHoo/Data-Structure__Algorithm | b985f8b41a366b9c028da711ea43a643151268e2 | [
"MIT"
] | null | null | null | ## 13910
##
##
## ( )
'''
##! ex) N = 4, 5 X, 4 3 X => 4 4
##* ex) N = 5, 1,3 / first : 1+3 = 4 , second : 1 => 5 --> 2
##* , =>
# In1 ) N M : ( ) N | ( ) M
# In2 ) S : S M ( )
# out ) | -1
'''
'''
ex1I) 5 2
ex1I) 1 3
out ) 2
ex2I) 6 2
ex2I) 1 3
out ) 2
5 2
2 4
=> 4|1 1<2 : -1
13 3
'''
import sys
##
## 1 , 2
##
if __name__ == "__main__":
print('hello')
N, M = map(int, sys.stdin.readline().split())
wig = list(map(int, sys.stdin.readline().split()))
wig.sort()#
# print(wig)
# print(cooking(N, M, wig)) ##
# print(cooking2(N,M,wig)) ## 2
print(cooking4(N,M,wig)) ## | 20.68595 | 83 | 0.411506 |
55dce36c7d1bd205aea80744f2bd0ceb8afc6832 | 1,169 | py | Python | manage/db_logger.py | ReanGD/web-home-manage | bbc5377a1f7fde002442fee7720e4ab9e9ad22b3 | [
"Apache-2.0"
] | null | null | null | manage/db_logger.py | ReanGD/web-home-manage | bbc5377a1f7fde002442fee7720e4ab9e9ad22b3 | [
"Apache-2.0"
] | null | null | null | manage/db_logger.py | ReanGD/web-home-manage | bbc5377a1f7fde002442fee7720e4ab9e9ad22b3 | [
"Apache-2.0"
] | null | null | null | import sys
import traceback
from manage.models import LoadLog
| 25.977778 | 79 | 0.597092 |
55dcf3dd3bd27fb171fb592911ad357dd0bb432c | 5,623 | py | Python | api/src/result_handler.py | Aragos/tichu-tournament | 4cdf727a30af8820ad56fe3097ec9a8e84892068 | [
"MIT"
] | 7 | 2016-12-12T02:29:42.000Z | 2020-05-12T21:21:21.000Z | api/src/result_handler.py | Aragos/tichu-tournament | 4cdf727a30af8820ad56fe3097ec9a8e84892068 | [
"MIT"
] | 31 | 2017-01-05T06:07:28.000Z | 2018-05-27T13:13:06.000Z | api/src/result_handler.py | Aragos/tichu-tournament | 4cdf727a30af8820ad56fe3097ec9a8e84892068 | [
"MIT"
] | 3 | 2017-12-21T23:30:12.000Z | 2019-01-03T20:51:52.000Z | import webapp2
import json
from generic_handler import GenericHandler
from python.calculator import Calculate
from python.calculator import GetMaxRounds
from google.appengine.api import users
from handler_utils import BuildMovementAndMaybeSetStatus
from handler_utils import CheckUserOwnsTournamentAndMaybeReturnStatus
from handler_utils import GetTourneyWithIdAndMaybeReturnStatus
from handler_utils import SetErrorStatus
from python.jsonio import ReadJSONInput
from python.jsonio import OutputJSON
from python.xlsxio import WriteResultsToXlsx
from python.xlsxio import OutputWorkbookAsBytesIO
from models import PlayerPair
from models import Tournament
def GetPlayerListForTourney(tourney):
''' Returns a list of tuples of names for every pair.'''
name_list = range(1, tourney.no_pairs + 1)
for player_pair in PlayerPair.query(ancestor=tourney.key).fetch():
if player_pair.players:
player_list = player_pair.player_list()
if not player_list:
continue
elif len(player_list) == 1:
name_list[player_pair.pair_no - 1] = (player_list[0].get("name"),
None)
else:
name_list[player_pair.pair_no - 1] = (player_list[0].get("name"),
player_list[1].get("name"))
else:
name_list[player_pair.pair_no - 1] = (None, None)
return name_list
| 38.251701 | 111 | 0.686111 |
55de8a6657e59552d97157f0e3318b5e7abae0d2 | 323 | py | Python | electsysApi/shared/exception.py | yuxiqian/electsys-api | 52b42729e797f8bdf6a0827e9d62a50919d56d65 | [
"MIT"
] | 5 | 2019-01-21T00:44:33.000Z | 2022-01-03T16:45:25.000Z | electsysApi/shared/exception.py | yuxiqian/electsys-api | 52b42729e797f8bdf6a0827e9d62a50919d56d65 | [
"MIT"
] | 1 | 2021-10-24T00:46:59.000Z | 2021-10-24T00:46:59.000Z | electsysApi/shared/exception.py | yuxiqian/electsys-api | 52b42729e797f8bdf6a0827e9d62a50919d56d65 | [
"MIT"
] | 2 | 2019-01-12T03:18:33.000Z | 2021-06-16T11:19:49.000Z | #!/usr/bin/env python
# encoding: utf-8
'''
@author: yuxiqian
@license: MIT
@contact: akaza_akari@sjtu.edu.cn
@software: electsys-api
@file: electsysApi/shared/exception.py
@time: 2019/1/9
'''
| 14.043478 | 38 | 0.721362 |
55ded0b36a3a4b147484ae30e7276b05b17dc456 | 2,375 | py | Python | src/CryptoPlus/Cipher/ARC2.py | voytecPL/pycryptoplus | 86905bbb8661e00cfb2afdc4461d4a79b6429d8a | [
"MIT"
] | 1 | 2022-02-27T17:46:18.000Z | 2022-02-27T17:46:18.000Z | src/CryptoPlus/Cipher/ARC2.py | voytecPL/pycryptoplus | 86905bbb8661e00cfb2afdc4461d4a79b6429d8a | [
"MIT"
] | null | null | null | src/CryptoPlus/Cipher/ARC2.py | voytecPL/pycryptoplus | 86905bbb8661e00cfb2afdc4461d4a79b6429d8a | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from .blockcipher import *
import Crypto.Cipher.ARC2
import Crypto
from pkg_resources import parse_version
def new(key,mode=MODE_ECB,IV=None,counter=None,segment_size=None,effective_keylen=None):
"""Create a new cipher object
ARC2 using pycrypto for algo and pycryptoplus for ciphermode
key = raw string containing the keys
mode = python_AES.MODE_ECB/CBC/CFB/OFB/CTR/CMAC, default is ECB
IV = IV as a raw string, default is "all zero" IV
-> only needed for CBC mode
counter = counter object (CryptoPlus.Util.util.Counter)
-> only needed for CTR mode
segment_size = amount of bits to use from the keystream in each chain part
-> supported values: multiple of 8 between 8 and the blocksize
of the cipher (only per byte access possible), default is 8
-> only needed for CFB mode
effective_keylen = how much bits to effectively use from the supplied key
-> will only be used when the pycrypto version on your system is >2.0.1
EXAMPLES:
**********
IMPORTING:
-----------
>>> import codecs
>>> from CryptoPlus.Cipher import ARC2
http://www.ietf.org/rfc/rfc2268.txt
Doctest will fail when using pycrypto 2.0.1 and older
------------------------------------
>>> key = codecs.decode("0000000000000000", 'hex')
>>> plaintext = codecs.decode("0000000000000000", 'hex')
>>> ek = 63
>>> cipher = ARC2.new(key,ARC2.MODE_ECB,effective_keylen=ek)
>>> codecs.encode(cipher.encrypt(plaintext), 'hex')
b'ebb773f993278eff'
"""
return ARC2(key,mode,IV,counter,effective_keylen,segment_size)
if __name__ == "__main__":
_test()
| 38.306452 | 88 | 0.656421 |
55e1293b8209552c67ecb749af45c55f2d9be6aa | 1,121 | py | Python | extensions/roles.py | iLuiizUHD/Expertise-Bot-v2 | 2b5264804d14d74ce1c0511dede434b7225683e0 | [
"MIT"
] | 2 | 2020-11-01T02:44:58.000Z | 2021-02-21T18:05:39.000Z | extensions/roles.py | iLuiizUHD/Expertise-Bot-v2 | 2b5264804d14d74ce1c0511dede434b7225683e0 | [
"MIT"
] | 1 | 2020-09-13T20:53:26.000Z | 2020-09-13T20:53:26.000Z | extensions/roles.py | iLuiizUHD/ExpertiseBot2 | 2b5264804d14d74ce1c0511dede434b7225683e0 | [
"MIT"
] | null | null | null | # Utilities
import json
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
# Imports
from discord.ext import commands
from discord import Guild, Role
# Loading config file...
with open("./config.json", "r", encoding="utf-8") as config:
configFile = json.load(config)
| 26.069767 | 66 | 0.64942 |
55e14400b4aed5430ec4803712092997b45a1d19 | 4,076 | py | Python | amun/measure_accuracy.py | Elkoumy/amun | db07129450979cb8dd95b086b8e4187facb85bb8 | [
"Apache-2.0"
] | 10 | 2020-12-03T08:30:51.000Z | 2021-12-12T11:03:47.000Z | amun/measure_accuracy.py | Elkoumy/amun | db07129450979cb8dd95b086b8e4187facb85bb8 | [
"Apache-2.0"
] | 1 | 2021-10-01T09:52:26.000Z | 2021-10-07T08:52:46.000Z | amun/measure_accuracy.py | Elkoumy/amun | db07129450979cb8dd95b086b8e4187facb85bb8 | [
"Apache-2.0"
] | null | null | null | """
In this module, we implement the accuracy measures to evaluate the effect of differential privacy injection.
In this module, we support the following measures:
* F1-score.
* Earth Mover's distance.
"""
from scipy.stats import wasserstein_distance
from pm4py.algo.discovery.inductive import factory as inductive_miner
from pm4py.evaluation.replay_fitness import factory as replay_factory
from math import fabs
import pandas as pd
| 41.591837 | 177 | 0.707802 |
55e1eb5bf2eb00d7ba492fd1c7a964baab5327be | 10,845 | py | Python | mkt/translations/models.py | ngokevin/zamboni | a33dcd489175d8e7ba1c02ee4dabb6cfdc405e69 | [
"BSD-3-Clause"
] | null | null | null | mkt/translations/models.py | ngokevin/zamboni | a33dcd489175d8e7ba1c02ee4dabb6cfdc405e69 | [
"BSD-3-Clause"
] | null | null | null | mkt/translations/models.py | ngokevin/zamboni | a33dcd489175d8e7ba1c02ee4dabb6cfdc405e69 | [
"BSD-3-Clause"
] | null | null | null | import collections
from itertools import groupby
from django.db import connections, models, router
from django.db.models.deletion import Collector
from django.utils import encoding
import bleach
import commonware.log
from mkt.site.models import ManagerBase, ModelBase
from mkt.site.utils import linkify_with_outgoing
from . import utils
log = commonware.log.getLogger('z.translations')
def delete_translation(obj, fieldname):
field = obj._meta.get_field(fieldname)
trans_id = getattr(obj, field.attname)
obj.update(**{field.name: None})
if trans_id:
Translation.objects.filter(id=trans_id).delete()
def attach_trans_dict(model, objs):
"""Put all translations into a translations dict."""
# Get the ids of all the translations we need to fetch.
fields = model._meta.translated_fields
ids = [getattr(obj, f.attname) for f in fields
for obj in objs if getattr(obj, f.attname, None) is not None]
# Get translations in a dict, ids will be the keys. It's important to
# consume the result of groupby, which is an iterator.
qs = Translation.objects.filter(id__in=ids, localized_string__isnull=False)
all_translations = dict((k, list(v)) for k, v in
_sorted_groupby(qs, lambda trans: trans.id))
def get_locale_and_string(translation, new_class):
"""Convert the translation to new_class (making PurifiedTranslations
and LinkifiedTranslations work) and return locale / string tuple."""
converted_translation = new_class()
converted_translation.__dict__ = translation.__dict__
return (converted_translation.locale.lower(),
unicode(converted_translation))
# Build and attach translations for each field on each object.
for obj in objs:
obj.translations = collections.defaultdict(list)
for field in fields:
t_id = getattr(obj, field.attname, None)
field_translations = all_translations.get(t_id, None)
if not t_id or field_translations is None:
continue
obj.translations[t_id] = [get_locale_and_string(t, field.rel.to)
for t in field_translations]
| 36.0299 | 100 | 0.643799 |
55e27b739ace5413321cb8d38b36117252a799e4 | 2,564 | py | Python | flow/sequential.py | altosaar/hierarchical-variational-models-physics | 611d91e0281664d7d5ba1679bec7adfb3aac41e2 | [
"MIT"
] | 14 | 2020-05-10T20:44:49.000Z | 2022-01-12T23:06:24.000Z | flow/sequential.py | altosaar/hierarchical-variational-models-physics | 611d91e0281664d7d5ba1679bec7adfb3aac41e2 | [
"MIT"
] | null | null | null | flow/sequential.py | altosaar/hierarchical-variational-models-physics | 611d91e0281664d7d5ba1679bec7adfb3aac41e2 | [
"MIT"
] | null | null | null | import torch
from torch import nn
| 35.123288 | 74 | 0.710608 |
55e3a6acd9ba82563797c1dceb04e6f788b6036d | 3,827 | py | Python | inmoov/scripts/animation_executor.py | mish3albaiz/Robotics_ECE579 | efb654040015671a0656eaee4c78ec085d862996 | [
"BSD-3-Clause"
] | 1 | 2020-02-13T21:13:08.000Z | 2020-02-13T21:13:08.000Z | inmoov/scripts/animation_executor.py | mish3albaiz/Robotics_ECE579 | efb654040015671a0656eaee4c78ec085d862996 | [
"BSD-3-Clause"
] | null | null | null | inmoov/scripts/animation_executor.py | mish3albaiz/Robotics_ECE579 | efb654040015671a0656eaee4c78ec085d862996 | [
"BSD-3-Clause"
] | null | null | null |
import time
from os.path import join, dirname
import sys
whereami = dirname(__file__)
scripts_dir= join(whereami, "../scripts/")
sys.path.append(scripts_dir)
from json_parsing import read_json
import Inmoov
filename_pose = join(whereami, '../json/pose.json')
filename_animation = join(whereami, '../json/animations.json')
# global objects that hold the json file contents
# so i can control when/how often to read the json file
# in the inmoov object, when it receives messages, it only needs to update at bootup. json will not change after bootup.
# in the gui, it should update each time it tries to run, because the gui is editing the files.
global_poses = None
global_animations = None
# TODO: if we are keeping the killlist idea, make it cleaner & easy to remove when transferring to a robot that doesn't need it
# TODO: be more intelligent about when we need to read the animation/pose json files
#killtime = 1
killlist = ["left_shoulder_lift_front","left_arm_rotate","right_arm_rotate","right_shoulder_lift_front"]
if __name__ == '__main__':
this_inmoov = Inmoov.Inmoov()
do_animation(this_inmoov, 'rps_paper')
time.sleep(5)
exit()
do_animation(this_inmoov, 'headright_anim')
time.sleep(5)
do_animation(this_inmoov, 'headleft_anim')
time.sleep(5)
do_animation(this_inmoov, 'headright_anim')
time.sleep(5)
| 37.891089 | 127 | 0.686961 |
55e3e019d60ec9acd28cad6159176037b75aa670 | 930 | py | Python | Python/1629.py | GeneralLi95/leetcode | f42392f2283e19ec76273d81b2912944f9039568 | [
"MIT"
] | null | null | null | Python/1629.py | GeneralLi95/leetcode | f42392f2283e19ec76273d81b2912944f9039568 | [
"MIT"
] | null | null | null | Python/1629.py | GeneralLi95/leetcode | f42392f2283e19ec76273d81b2912944f9039568 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from typing import List, Optional
from collections import defaultdict, deque
from itertools import product,combinations,permutations
# -------------------------
# -------------------------
a = Solution()
b = [9,29,49,50]
c = "cbcd"
b2 = [19,22,28,29,66,81,93,97]
c2 = "fnfaaxha"
b3 = [12,23,36,46,62]
c3 = "spuda"
print(Solution.slowestKey(a, b3, c3)) | 20.217391 | 72 | 0.615054 |
55e424ce8e62dc85462716ba6efd8eff1ffa1fd9 | 530 | py | Python | hexrd/sglite/setup.py | glemaitre/hexrd | b68b1ba72e0f480d29bdaae2adbd6c6e2380cc7c | [
"BSD-3-Clause"
] | null | null | null | hexrd/sglite/setup.py | glemaitre/hexrd | b68b1ba72e0f480d29bdaae2adbd6c6e2380cc7c | [
"BSD-3-Clause"
] | null | null | null | hexrd/sglite/setup.py | glemaitre/hexrd | b68b1ba72e0f480d29bdaae2adbd6c6e2380cc7c | [
"BSD-3-Clause"
] | null | null | null | from distutils.core import setup, Extension
srclist = ['sgglobal.c','sgcb.c','sgcharmx.c','sgfile.c',
'sggen.c','sghall.c','sghkl.c','sgltr.c','sgmath.c','sgmetric.c',
'sgnorm.c','sgprop.c','sgss.c','sgstr.c','sgsymbols.c',
'sgtidy.c','sgtype.c','sgutil.c','runtests.c','sglitemodule.c']
module = Extension('sglite', sources=srclist,
define_macros = [('PythonTypes', 1)])
setup (name='sglite',
description = 'space group info',
ext_modules = [module]
)
| 33.125 | 76 | 0.584906 |
55e48ca73e642e82cfdfccf386ed40c0b2fba12d | 725 | py | Python | app/blogging/routes.py | Sjors/patron | a496097ad0821b677c8e710e8aceb587928be31c | [
"MIT"
] | 114 | 2018-12-30T20:43:37.000Z | 2022-03-21T18:57:47.000Z | app/blogging/routes.py | Sjors/patron | a496097ad0821b677c8e710e8aceb587928be31c | [
"MIT"
] | 17 | 2019-04-25T20:20:57.000Z | 2022-03-29T21:48:35.000Z | app/blogging/routes.py | Sjors/patron | a496097ad0821b677c8e710e8aceb587928be31c | [
"MIT"
] | 17 | 2019-01-02T06:37:11.000Z | 2022-03-29T22:22:40.000Z | from app.blogging import bp
from datetime import datetime
from flask import flash, redirect, url_for
from flask_login import current_user
| 31.521739 | 68 | 0.666207 |
55e5362057afc71bf0071723cb854344bbc9e957 | 409 | py | Python | mini_cluster_07.py | jgpattis/Desres-sars-cov-2-apo-mpro | 90c07414040c0ea0bf54028e2f194d6509c8f526 | [
"MIT"
] | null | null | null | mini_cluster_07.py | jgpattis/Desres-sars-cov-2-apo-mpro | 90c07414040c0ea0bf54028e2f194d6509c8f526 | [
"MIT"
] | null | null | null | mini_cluster_07.py | jgpattis/Desres-sars-cov-2-apo-mpro | 90c07414040c0ea0bf54028e2f194d6509c8f526 | [
"MIT"
] | null | null | null | #cluster data into a small amount of clusters to later pull out structures
import pyemma.coordinates as coor
import numpy as np
sys = 'back'
tica_data = coor.load('tica_data_05/back_tica_data.h5')
n_clusters = 50
cl = coor.cluster_kmeans(tica_data, k=n_clusters, max_iter=50)
cl.save(f'{sys}_{n_clusters}_mini_cluster_object.h5', overwrite=True)
cl.write_to_hdf5(f'{sys}_{n_clusters}_cluster_dtrajs.h5')
| 27.266667 | 74 | 0.787286 |
55e68ec4c6def4aa1f467b3936144273058e5304 | 698 | py | Python | pydaily/images/tests/test_color.py | codingPingjun/pydaily | 966b96db05b3170f926aeb830ca6f81093a5371a | [
"Apache-2.0"
] | null | null | null | pydaily/images/tests/test_color.py | codingPingjun/pydaily | 966b96db05b3170f926aeb830ca6f81093a5371a | [
"Apache-2.0"
] | null | null | null | pydaily/images/tests/test_color.py | codingPingjun/pydaily | 966b96db05b3170f926aeb830ca6f81093a5371a | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import os, sys, pdb
from pydaily.images import graymask2rgb
from pydaily import DATA_DIR
import numpy as np
from scipy import misc
import matplotlib.pyplot as plt
if __name__ == '__main__':
test_graymask2rgb()
| 24.068966 | 85 | 0.694842 |
55e92561b0ff7599f7ae6a6d6d8a27dbdab535a8 | 63 | py | Python | reqinstall/commands/freeze/__init__.py | QualiSystems/reqinstall | 57268b185428b31368cb7246a20a6c7548fb44dc | [
"MIT"
] | null | null | null | reqinstall/commands/freeze/__init__.py | QualiSystems/reqinstall | 57268b185428b31368cb7246a20a6c7548fb44dc | [
"MIT"
] | null | null | null | reqinstall/commands/freeze/__init__.py | QualiSystems/reqinstall | 57268b185428b31368cb7246a20a6c7548fb44dc | [
"MIT"
] | null | null | null | from reqinstall.commands.freeze.freeze import PipFreezeCommand
| 31.5 | 62 | 0.888889 |
55ea56448f1d5c8396e0645cb61cbcf3e70761cc | 1,784 | py | Python | scripts/configure.py | materialdigital/pmd-server | fdc12fe3865e7783046ab5c50f00b71aceb07ebd | [
"BSD-3-Clause"
] | 1 | 2021-07-05T21:54:44.000Z | 2021-07-05T21:54:44.000Z | scripts/configure.py | materialdigital/pmd-server | fdc12fe3865e7783046ab5c50f00b71aceb07ebd | [
"BSD-3-Clause"
] | 8 | 2021-06-14T15:03:06.000Z | 2022-01-26T15:48:03.000Z | scripts/configure.py | materialdigital/pmd-server | fdc12fe3865e7783046ab5c50f00b71aceb07ebd | [
"BSD-3-Clause"
] | 3 | 2021-10-01T12:07:50.000Z | 2021-11-22T10:59:44.000Z | #! /usr/bin/env python3
import json, sys, argparse
from os.path import isfile
# ******************************************************************************
parser = argparse.ArgumentParser(description='Reads config.json and writes out docker-environment files.')
parser.add_argument('file', nargs='?', help='optional input file, if omitted, read from stdin', default='-')
parser.add_argument('-v', '--verbose', action='store_true', help="be verbose")
args = parser.parse_args()
# ******************************************************************************
if __name__ == '__main__':
config = load_config('static.json')
# prevents script from trying to read interactively from tty, only "proper" pipe allowed
if (args.file == '-' and sys.stdin.isatty()):
print ("Won't read input from tty (please use -h for help)", file=sys.stderr)
exit(1)
else:
filename = args.file
for env_file, entry in load_config(filename).items():
if env_file in config:
config[env_file].update(entry)
else:
config[env_file] = entry
shared_vars = config.pop('shared', dict())
for env_file, entry in config.items():
with open(env_file, 'w') as fh:
lines = [ "{}={}\n".format(key, get_value(val)) for key, val in entry.items()]
print("### Writing {}...". format(env_file))
fh.writelines(lines)
| 33.037037 | 108 | 0.570628 |
55eab24c8b73ac11d50c210b2451b3c1e941b6bd | 676 | py | Python | src/lib/jianshu_parser/jianshuparser.py | eebook/jianshu2e-book | d638fb8c2f47cf8e91e9f74e2e1e5f61f3c98a48 | [
"MIT"
] | 7 | 2019-01-02T14:52:48.000Z | 2021-11-05T06:11:46.000Z | src/lib/jianshu_parser/jianshuparser.py | knarfeh/jianshu2e-book | d638fb8c2f47cf8e91e9f74e2e1e5f61f3c98a48 | [
"MIT"
] | 2 | 2021-03-22T17:11:32.000Z | 2021-12-13T19:36:17.000Z | src/lib/jianshu_parser/jianshuparser.py | ee-book/jianshu2e-book | d638fb8c2f47cf8e91e9f74e2e1e5f61f3c98a48 | [
"MIT"
] | 2 | 2019-04-18T05:44:24.000Z | 2021-06-10T09:35:44.000Z | # -*- coding: utf-8 -*-
from bs4 import BeautifulSoup
from src.lib.jianshu_parser.base import BaseParser
from src.lib.jianshu_parser.content.JianshuAuthor import JianshuAuthorInfo
from src.lib.jianshu_parser.content.JianshuArticle import JianshuArticle
| 28.166667 | 74 | 0.724852 |
55ebf274b2c9e17190671385e32d419938db93a1 | 306 | py | Python | vox/utils/__init__.py | DSciLab/voxpy | 4d06ffc9a52f4a2ae1eaacda7da998e75d0cc4aa | [
"MIT"
] | null | null | null | vox/utils/__init__.py | DSciLab/voxpy | 4d06ffc9a52f4a2ae1eaacda7da998e75d0cc4aa | [
"MIT"
] | null | null | null | vox/utils/__init__.py | DSciLab/voxpy | 4d06ffc9a52f4a2ae1eaacda7da998e75d0cc4aa | [
"MIT"
] | null | null | null | import numpy as np
from .one_hot import one_hot
from .rescale import LinearNormRescale255, \
CentralNormRescale255, \
GeneralNormRescale255
| 23.538462 | 45 | 0.611111 |
55ec22a317bb062a3d79bbd46b18d734b28581cf | 58 | py | Python | minimally_sufficient_pandas/__init__.py | dexplo/minimally_sufficient_pandas | d07710f03daa757f5778aa66ee68952d03467809 | [
"BSD-3-Clause"
] | null | null | null | minimally_sufficient_pandas/__init__.py | dexplo/minimally_sufficient_pandas | d07710f03daa757f5778aa66ee68952d03467809 | [
"BSD-3-Clause"
] | null | null | null | minimally_sufficient_pandas/__init__.py | dexplo/minimally_sufficient_pandas | d07710f03daa757f5778aa66ee68952d03467809 | [
"BSD-3-Clause"
] | null | null | null | from ._pandas_accessor import _MSP
__version__ = '0.0.1'
| 14.5 | 34 | 0.758621 |
55ecaf06199d8ec889aab34a7ac5ad6a8dc82793 | 350 | py | Python | src/rl/genotypes.py | xkp793003821/nas-segm-pytorch | c4b59ab56bd539bf08493c6d85072849213a3d62 | [
"BSD-2-Clause"
] | null | null | null | src/rl/genotypes.py | xkp793003821/nas-segm-pytorch | c4b59ab56bd539bf08493c6d85072849213a3d62 | [
"BSD-2-Clause"
] | null | null | null | src/rl/genotypes.py | xkp793003821/nas-segm-pytorch | c4b59ab56bd539bf08493c6d85072849213a3d62 | [
"BSD-2-Clause"
] | null | null | null | """List of operations"""
from collections import namedtuple
Genotype = namedtuple('Genotype', 'encoder decoder')
OP_NAMES = [
'conv1x1',
'conv3x3',
'sep_conv_3x3',
'sep_conv_5x5',
'global_average_pool',
'conv3x3_dil3',
'conv3x3_dil12',
'sep_conv_3x3_dil3',
'sep_conv_5x5_dil6',
'skip_connect',
'none'
]
| 17.5 | 52 | 0.648571 |
55ed312dab5a46153b2af52c1c2cf41104214f04 | 2,284 | py | Python | tools/download_typed_ast.py | hugovk/typed_ast | 8eed936014f81a55a3e17310629c40c0203327c3 | [
"Apache-2.0"
] | null | null | null | tools/download_typed_ast.py | hugovk/typed_ast | 8eed936014f81a55a3e17310629c40c0203327c3 | [
"Apache-2.0"
] | null | null | null | tools/download_typed_ast.py | hugovk/typed_ast | 8eed936014f81a55a3e17310629c40c0203327c3 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# Hacky script to download linux and windows typed_ast wheels from appveyor and gcloud
import os
import os.path
import json
import sys
from urllib.request import urlopen
# Appveyor download for windows wheels
api_url = 'https://ci.appveyor.com/api/'
# gcloud downloads for linux wehels
MIN_VER = 5
MAX_VER = 9
GCLOUD_URL = "https://storage.googleapis.com/typed-ast/typed_ast-{version}-cp3{pyver}-cp3{pyver}{abi_tag}-{platform}.whl"
if __name__ == '__main__':
main(sys.argv)
| 28.911392 | 121 | 0.652802 |
55ee2be125f56e9339bd29f2a5e248d4c0042d7f | 220 | py | Python | Contest/Keyence2021/a/main.py | mpses/AtCoder | 9c101fcc0a1394754fcf2385af54b05c30a5ae2a | [
"CC0-1.0"
] | null | null | null | Contest/Keyence2021/a/main.py | mpses/AtCoder | 9c101fcc0a1394754fcf2385af54b05c30a5ae2a | [
"CC0-1.0"
] | null | null | null | Contest/Keyence2021/a/main.py | mpses/AtCoder | 9c101fcc0a1394754fcf2385af54b05c30a5ae2a | [
"CC0-1.0"
] | null | null | null | #!/usr/bin/env python3
(n,), a, b = [[*map(int, o.split())] for o in open(0)]
from itertools import*
*A, = accumulate(a, max)
print(ans := a[0] * b[0])
for i in range(1, n):
ans = max(ans, A[i] * b[i])
print(ans) | 27.5 | 54 | 0.554545 |
55efeb23d40cb01ba113e0e658a5c2e41b236597 | 10,879 | py | Python | service.py | ViscaElAyush/CSE598 | 8e95436015d466d168005846473e9e3978423913 | [
"MIT"
] | 35 | 2020-10-31T20:21:01.000Z | 2022-01-29T18:28:44.000Z | service.py | ViscaElAyush/CSE598 | 8e95436015d466d168005846473e9e3978423913 | [
"MIT"
] | null | null | null | service.py | ViscaElAyush/CSE598 | 8e95436015d466d168005846473e9e3978423913 | [
"MIT"
] | 10 | 2021-01-10T18:40:03.000Z | 2022-02-09T04:19:27.000Z | #!/usr/bin/env python
# @author Simon Stepputtis <sstepput@asu.edu>, Interactive Robotics Lab, Arizona State University
from __future__ import absolute_import, division, print_function, unicode_literals
import sys
import rclpy
from policy_translation.srv import NetworkPT, TuneNetwork
from model_src.model import PolicyTranslationModel
from utils.network import Network
from utils.tf_util import trainOnCPU, limitGPUMemory
from utils.intprim.gaussian_model import GaussianModel
import tensorflow as tf
import numpy as np
import re
from cv_bridge import CvBridge, CvBridgeError
import cv2
import matplotlib.pyplot as plt
from utils.intprim.gaussian_model import GaussianModel
import glob
import json
import pickle
import copy
# Force TensorFlow to use the CPU
FORCE_CPU = True
# Use dropout at run-time for stochastif-forward passes
USE_DROPOUT = True
# Where can we find the trained model?
MODEL_PATH = "../GDrive/model/policy_translation"
# Where is a pre-trained faster-rcnn?
FRCNN_PATH = "../GDrive/rcnn"
# Where are the GloVe word embeddings?
GLOVE_PATH = "../GDrive/glove.6B.50d.txt"
# Where is the normalization of the dataset?
NORM_PATH = "../GDrive/normalization_v2.pkl"
if FORCE_CPU:
trainOnCPU()
else:
limitGPUMemory()
print("Running Policy Translation Model")
model = PolicyTranslationModel(
od_path=FRCNN_PATH,
glove_path=GLOVE_PATH,
special=None
)
bs = 2
model((
np.ones((bs, 15), dtype=np.int64),
np.ones((bs, 6, 5), dtype=np.float32),
np.ones((bs, 500, 7), dtype=np.float32)
))
model.load_weights(MODEL_PATH)
model.summary()
if __name__ == "__main__":
ot = NetworkService()
ot.runNode() | 40.593284 | 165 | 0.603364 |
55f05ed10bf6e796822641491b85dc1b12b2b7ba | 375 | py | Python | model/pet_breed.py | IDRISSOUM/hospital_management | 56a768f29269a77bc890d40479a8aacb90867189 | [
"Unlicense"
] | null | null | null | model/pet_breed.py | IDRISSOUM/hospital_management | 56a768f29269a77bc890d40479a8aacb90867189 | [
"Unlicense"
] | null | null | null | model/pet_breed.py | IDRISSOUM/hospital_management | 56a768f29269a77bc890d40479a8aacb90867189 | [
"Unlicense"
] | null | null | null | # # -*- coding: utf-8 -*-
# # Part of BrowseInfo. See LICENSE file for full copyright and licensing details.
#
# from odoo import api, fields, models, _
#
# class pet_breed(models.Model):
# _name = 'pet.breed'
#
# name = fields.Char('Name', required = True)
# code = fields.Char('Code')
#
#
# # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | 28.846154 | 82 | 0.653333 |
55f120e7cddd6dd7d7bb9b4780eee99d7d17ddcc | 797 | py | Python | src/fireo/utils/utils.py | jshep23/FireO | f4ccac8461bcf821ae9665a942847aa9f28ee92b | [
"Apache-2.0"
] | null | null | null | src/fireo/utils/utils.py | jshep23/FireO | f4ccac8461bcf821ae9665a942847aa9f28ee92b | [
"Apache-2.0"
] | null | null | null | src/fireo/utils/utils.py | jshep23/FireO | f4ccac8461bcf821ae9665a942847aa9f28ee92b | [
"Apache-2.0"
] | null | null | null | import re
from google.cloud import firestore
| 18.97619 | 76 | 0.61606 |
55f43053f0d67231d40b9280a1fec18d43d92658 | 169 | py | Python | src/rlib/debug.py | SOM-st/PySOM | 65ef72f44252439b724a7429408dac7f8d1b1d98 | [
"MIT"
] | 22 | 2015-10-29T05:11:06.000Z | 2022-03-01T11:18:45.000Z | src/rlib/debug.py | smarr/PySOM | 65ef72f44252439b724a7429408dac7f8d1b1d98 | [
"MIT"
] | 16 | 2021-03-07T22:09:33.000Z | 2021-08-24T12:36:15.000Z | src/rlib/debug.py | SOM-st/PySOM | 65ef72f44252439b724a7429408dac7f8d1b1d98 | [
"MIT"
] | 5 | 2015-01-02T03:51:29.000Z | 2020-10-02T07:05:46.000Z | try:
from rpython.rlib.debug import make_sure_not_resized # pylint: disable=W
except ImportError:
"NOT_RPYTHON"
| 21.125 | 77 | 0.715976 |
55f5635ca095ac94e1e398b32c7f23cd1b5b52ae | 12,173 | py | Python | emr_eks_cdk/studio_live_stack.py | aws-samples/aws-cdk-for-emr-on-eks | 20c51b8c845172ea77ee4e1dbde7ffd41cad427a | [
"MIT-0"
] | 9 | 2021-03-23T06:01:32.000Z | 2021-12-28T09:01:45.000Z | emr_eks_cdk/studio_live_stack.py | aws-samples/aws-cdk-for-emr-on-eks | 20c51b8c845172ea77ee4e1dbde7ffd41cad427a | [
"MIT-0"
] | 2 | 2021-07-27T09:53:04.000Z | 2021-08-05T04:55:15.000Z | emr_eks_cdk/studio_live_stack.py | aws-samples/aws-cdk-for-emr-on-eks | 20c51b8c845172ea77ee4e1dbde7ffd41cad427a | [
"MIT-0"
] | 9 | 2021-03-23T06:01:31.000Z | 2021-12-29T14:03:14.000Z | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
from aws_cdk import aws_ec2 as ec2, aws_eks as eks, core, aws_emrcontainers as emrc, aws_iam as iam, aws_s3 as s3, custom_resources as custom, aws_acmpca as acmpca, aws_emr as emr
"""
This stack deploys the following:
- EMR Studio
""" | 43.78777 | 179 | 0.559271 |
55f657ac810bd7adff3d28ddcf6b426dbce9f289 | 291 | py | Python | dev/user-agent-stacktrace/lib/utils.py | Katharine/apisnoop | 46c0e101c6e1e13a783f5022a6f77787c0824032 | [
"Apache-2.0"
] | null | null | null | dev/user-agent-stacktrace/lib/utils.py | Katharine/apisnoop | 46c0e101c6e1e13a783f5022a6f77787c0824032 | [
"Apache-2.0"
] | 13 | 2018-08-21T04:00:44.000Z | 2019-07-03T22:36:07.000Z | dev/user-agent-stacktrace/lib/utils.py | Katharine/apisnoop | 46c0e101c6e1e13a783f5022a6f77787c0824032 | [
"Apache-2.0"
] | 1 | 2019-05-09T18:47:22.000Z | 2019-05-09T18:47:22.000Z | from collections import defaultdict
| 22.384615 | 45 | 0.639175 |
55f6b77678597fe15229ac3cf620e327925c88f6 | 1,217 | py | Python | WebMirror/management/rss_parser_funcs/feed_parse_extractKaedesan721TumblrCom.py | fake-name/ReadableWebProxy | ed5c7abe38706acc2684a1e6cd80242a03c5f010 | [
"BSD-3-Clause"
] | 193 | 2016-08-02T22:04:35.000Z | 2022-03-09T20:45:41.000Z | WebMirror/management/rss_parser_funcs/feed_parse_extractKaedesan721TumblrCom.py | fake-name/ReadableWebProxy | ed5c7abe38706acc2684a1e6cd80242a03c5f010 | [
"BSD-3-Clause"
] | 533 | 2016-08-23T20:48:23.000Z | 2022-03-28T15:55:13.000Z | WebMirror/management/rss_parser_funcs/feed_parse_extractKaedesan721TumblrCom.py | rrosajp/ReadableWebProxy | ed5c7abe38706acc2684a1e6cd80242a03c5f010 | [
"BSD-3-Clause"
] | 19 | 2015-08-13T18:01:08.000Z | 2021-07-12T17:13:09.000Z | def extractKaedesan721TumblrCom(item):
'''
Parser for 'kaedesan721.tumblr.com'
'''
bad_tags = [
'FanArt',
"htr asks",
'Spanish translations',
'htr anime','my thoughts',
'Cats',
'answered',
'ask meme',
'relay convos',
'translation related post',
'nightmare fuel',
'htr manga',
'memes',
'htrweek',
'Video Games',
'Animation',
'replies',
'jazz',
'Music',
]
if any([bad in item['tags'] for bad in bad_tags]):
return None
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
if "my translations" in item['tags']:
tagmap = [
('Hakata Tonkotsu Ramens', 'Hakata Tonkotsu Ramens', 'translated'),
('hakata tonktosu ramens', 'Hakata Tonkotsu Ramens', 'translated'),
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False | 26.456522 | 105 | 0.576828 |
55f710f1ba87dd022df6c57369e502a39ab22bee | 8,244 | py | Python | l0bnb/tree.py | rahulmaz/L0BnB | 72c262581dd2d7e1489668c2fb2052214b6bbcdd | [
"MIT"
] | 1 | 2020-04-16T03:40:36.000Z | 2020-04-16T03:40:36.000Z | l0bnb/tree.py | rahulmaz/L0BnB | 72c262581dd2d7e1489668c2fb2052214b6bbcdd | [
"MIT"
] | 1 | 2020-04-16T04:12:12.000Z | 2020-04-16T04:12:12.000Z | l0bnb/tree.py | rahulmaz/L0BnB | 72c262581dd2d7e1489668c2fb2052214b6bbcdd | [
"MIT"
] | 1 | 2020-04-16T03:42:19.000Z | 2020-04-16T03:42:19.000Z | import time
import queue
import sys
import numpy as np
from scipy import optimize as sci_opt
from .node import Node
from .utilities import branch, is_integral
| 37.135135 | 81 | 0.533236 |
55f78570dc2c54902bbba417e6ce4621cf9434e6 | 1,819 | py | Python | miniGithub/migrations/0003_auto_20200119_0955.py | stefan096/UKS | aeabe6a9995143c006ad4143e8e876a102e9d69b | [
"MIT"
] | null | null | null | miniGithub/migrations/0003_auto_20200119_0955.py | stefan096/UKS | aeabe6a9995143c006ad4143e8e876a102e9d69b | [
"MIT"
] | 36 | 2020-01-12T17:00:23.000Z | 2020-03-21T13:25:28.000Z | miniGithub/migrations/0003_auto_20200119_0955.py | stefan096/UKS | aeabe6a9995143c006ad4143e8e876a102e9d69b | [
"MIT"
] | null | null | null | # Generated by Django 3.0.2 on 2020-01-19 09:55
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
| 41.340909 | 206 | 0.632216 |
55f7fc91f85571caa12221e2e54d28b60ea32a14 | 4,468 | py | Python | megatron/model/gpt_model.py | vat99/Megatron-LM | fd61ae95aa8f3f41aa970cb86e943a7e5bfe0d1a | [
"MIT"
] | 1 | 2022-03-29T09:16:39.000Z | 2022-03-29T09:16:39.000Z | megatron/model/gpt_model.py | vat99/Megatron-LM | fd61ae95aa8f3f41aa970cb86e943a7e5bfe0d1a | [
"MIT"
] | 5 | 2022-01-20T08:06:03.000Z | 2022-03-10T10:01:32.000Z | megatron/model/gpt_model.py | vat99/Megatron-LM | fd61ae95aa8f3f41aa970cb86e943a7e5bfe0d1a | [
"MIT"
] | 1 | 2022-03-25T12:00:47.000Z | 2022-03-25T12:00:47.000Z | # coding=utf-8
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GPT-2 model."""
import torch
from megatron import get_args
from megatron import mpu
from .module import MegatronModule
from .enums import AttnMaskType
from .language_model import parallel_lm_logits
from .language_model import get_language_model
from .utils import init_method_normal
from .utils import scaled_init_method_normal
| 35.744 | 81 | 0.660922 |
55f89e67422221688251900fa69112d9cc2e2083 | 5,324 | py | Python | tests/utest/test_default_config.py | ngoan1608/robotframework-robocop | 3444bbc98102f74ebae08dcb26cd63346f9ed03e | [
"Apache-2.0"
] | 2 | 2021-12-22T01:50:52.000Z | 2022-01-05T06:32:27.000Z | tests/utest/test_default_config.py | marcel-veselka/robotframework-robocop | 4711c0dd389baa2d0346e62e1fda3c02c2dcc73b | [
"Apache-2.0"
] | null | null | null | tests/utest/test_default_config.py | marcel-veselka/robotframework-robocop | 4711c0dd389baa2d0346e62e1fda3c02c2dcc73b | [
"Apache-2.0"
] | 1 | 2021-06-30T11:01:51.000Z | 2021-06-30T11:01:51.000Z | import os
import sys
import importlib
from pathlib import Path
from unittest.mock import patch
import pytest
import robocop.config
from robocop.exceptions import InvalidArgumentError
class TestDefaultConfig:
def test_find_project_root_same_dir(self, path_to_test_data, config):
src = path_to_test_data / 'default_config'
os.chdir(str(src))
root = config.find_file_in_project_root('.robocop')
assert root == src / '.robocop'
| 42.592 | 116 | 0.646506 |
55f8affa309482626692f2a65c9326ebb9be7625 | 646 | py | Python | tests/test_forms.py | haoziyeung/elasticstack | 1fb4eb46317b402e0617badbc9034fb411a39992 | [
"BSD-2-Clause"
] | 2 | 2020-11-23T11:03:03.000Z | 2020-11-23T11:03:31.000Z | tests/test_forms.py | haoziyeung/elasticstack | 1fb4eb46317b402e0617badbc9034fb411a39992 | [
"BSD-2-Clause"
] | null | null | null | tests/test_forms.py | haoziyeung/elasticstack | 1fb4eb46317b402e0617badbc9034fb411a39992 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_elasticstack
------------
Tests for `elasticstack` forms module.
"""
from django import forms
from django.test import TestCase
from elasticstack.forms import SearchForm
| 21.533333 | 62 | 0.633127 |
55fa09f3a8c3fad0ee952c33bd12012b56fb9d68 | 668 | py | Python | AnkiIn/notetypes/ListCloze.py | Clouder0/AnkiIn | ca944bb9f79ce49bc2db62a0bfaeffe7908b48da | [
"MIT"
] | 1 | 2021-07-04T08:10:53.000Z | 2021-07-04T08:10:53.000Z | AnkiIn/notetypes/ListCloze.py | Clouder0/AnkiIn | ca944bb9f79ce49bc2db62a0bfaeffe7908b48da | [
"MIT"
] | 35 | 2021-07-03T10:50:20.000Z | 2022-01-09T09:33:17.000Z | AnkiIn/notetypes/ListCloze.py | Clouder0/AnkiIn | ca944bb9f79ce49bc2db62a0bfaeffe7908b48da | [
"MIT"
] | 2 | 2021-08-21T11:33:00.000Z | 2021-10-15T18:59:33.000Z | from .Cloze import get as cget
from ..config import dict as conf
from ..config import config_updater
notetype_name = "ListCloze"
if notetype_name not in conf["notetype"]:
conf["notetype"][notetype_name] = {}
settings = conf["notetype"][notetype_name]
priority = None
config_updater.append((update_list_cloze_config, 15))
| 23.034483 | 66 | 0.712575 |
55fadfd4280d478b35858e331edea1ce48c5383a | 9,697 | py | Python | app/routes.py | ptkaczyk/Ithacartists | 0d8effafe64b29ae1756169cac1eb4d6bc980c1d | [
"MIT"
] | null | null | null | app/routes.py | ptkaczyk/Ithacartists | 0d8effafe64b29ae1756169cac1eb4d6bc980c1d | [
"MIT"
] | null | null | null | app/routes.py | ptkaczyk/Ithacartists | 0d8effafe64b29ae1756169cac1eb4d6bc980c1d | [
"MIT"
] | null | null | null | from flask import render_template, Flask, flash, redirect, url_for, abort, request
from flask_login import login_user, logout_user, login_required
from werkzeug.urls import url_parse
from app import app, db
from app.forms import *
from app.models import *
| 42.530702 | 191 | 0.662267 |
55fb46ee1813e2c980cdc6a6a49ca860bf41a84e | 2,861 | py | Python | src/bloombox/schema/services/devices/v1beta1/DevicesService_Beta1_pb2_grpc.py | Bloombox/Python | 1b125fbdf54efb390afe12aaa966f093218c4387 | [
"Apache-2.0"
] | 4 | 2018-01-23T20:13:11.000Z | 2018-07-28T22:36:09.000Z | src/bloombox/schema/services/devices/v1beta1/DevicesService_Beta1_pb2_grpc.py | Bloombox/Python | 1b125fbdf54efb390afe12aaa966f093218c4387 | [
"Apache-2.0"
] | 159 | 2018-02-02T09:55:52.000Z | 2021-07-21T23:41:59.000Z | src/bloombox/schema/services/devices/v1beta1/DevicesService_Beta1_pb2_grpc.py | Bloombox/Python | 1b125fbdf54efb390afe12aaa966f093218c4387 | [
"Apache-2.0"
] | 3 | 2018-01-23T20:13:15.000Z | 2020-01-17T01:07:53.000Z | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from devices.v1beta1 import DevicesService_Beta1_pb2 as devices_dot_v1beta1_dot_DevicesService__Beta1__pb2
def add_DevicesServicer_to_server(servicer, server):
rpc_method_handlers = {
'Ping': grpc.unary_unary_rpc_method_handler(
servicer.Ping,
request_deserializer=devices_dot_v1beta1_dot_DevicesService__Beta1__pb2.Ping.Request.FromString,
response_serializer=devices_dot_v1beta1_dot_DevicesService__Beta1__pb2.Ping.Response.SerializeToString,
),
'Activate': grpc.unary_unary_rpc_method_handler(
servicer.Activate,
request_deserializer=devices_dot_v1beta1_dot_DevicesService__Beta1__pb2.Activation.Request.FromString,
response_serializer=devices_dot_v1beta1_dot_DevicesService__Beta1__pb2.Activation.Response.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'bloombox.schema.services.devices.v1beta1.Devices', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
| 42.701493 | 119 | 0.774205 |
55fb9d49fcf1a873c80991e0f909fcb04543c2ba | 10,052 | py | Python | oslo-modules/oslo_messaging/_drivers/amqp.py | esse-io/zen-common | 8ede82ab81bad53c3b947084b812c44e329f159b | [
"Apache-2.0"
] | 1 | 2021-02-17T15:30:45.000Z | 2021-02-17T15:30:45.000Z | oslo-modules/oslo_messaging/_drivers/amqp.py | esse-io/zen-common | 8ede82ab81bad53c3b947084b812c44e329f159b | [
"Apache-2.0"
] | null | null | null | oslo-modules/oslo_messaging/_drivers/amqp.py | esse-io/zen-common | 8ede82ab81bad53c3b947084b812c44e329f159b | [
"Apache-2.0"
] | 2 | 2015-11-03T03:21:55.000Z | 2015-12-01T08:56:14.000Z | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright 2011 - 2012, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Shared code between AMQP based openstack.common.rpc implementations.
The code in this module is shared between the rpc implementations based on
AMQP. Specifically, this includes impl_kombu and impl_qpid. impl_carrot also
uses AMQP, but is deprecated and predates this code.
"""
import collections
import logging
import uuid
from oslo_config import cfg
import six
from oslo_messaging._drivers import common as rpc_common
from oslo_messaging._drivers import pool
deprecated_durable_opts = [
cfg.DeprecatedOpt('amqp_durable_queues',
group='DEFAULT'),
cfg.DeprecatedOpt('rabbit_durable_queues',
group='DEFAULT')
]
amqp_opts = [
cfg.BoolOpt('amqp_durable_queues',
default=False,
deprecated_opts=deprecated_durable_opts,
help='Use durable queues in AMQP.'),
cfg.BoolOpt('amqp_auto_delete',
default=False,
deprecated_group='DEFAULT',
help='Auto-delete queues in AMQP.'),
cfg.BoolOpt('send_single_reply',
default=False,
help='Send a single AMQP reply to call message. The current '
'behaviour since oslo-incubator is to send two AMQP '
'replies - first one with the payload, a second one to '
'ensure the other have finish to send the payload. We '
'are going to remove it in the N release, but we must '
'keep backward compatible at the same time. This option '
'provides such compatibility - it defaults to False in '
'Liberty and can be turned on for early adopters with a '
'new installations or for testing. Please note, that '
'this option will be removed in the Mitaka release.')
]
UNIQUE_ID = '_unique_id'
LOG = logging.getLogger(__name__)
# NOTE(sileht): Even if rabbit/qpid have only one Connection class,
# this connection can be used for two purposes:
# * wait and receive amqp messages (only do read stuffs on the socket)
# * send messages to the broker (only do write stuffs on the socket)
# The code inside a connection class is not concurrency safe.
# Using one Connection class instance for doing both, will result
# of eventlet complaining of multiple greenthreads that read/write the
# same fd concurrently... because 'send' and 'listen' run in different
# greenthread.
# So, a connection cannot be shared between thread/greenthread and
# this two variables permit to define the purpose of the connection
# to allow drivers to add special handling if needed (like heatbeat).
# amqp drivers create 3 kind of connections:
# * driver.listen*(): each call create a new 'PURPOSE_LISTEN' connection
# * driver.send*(): a pool of 'PURPOSE_SEND' connections is used
# * driver internally have another 'PURPOSE_LISTEN' connection dedicated
# to wait replies of rpc call
PURPOSE_LISTEN = 'listen'
PURPOSE_SEND = 'send'
def unpack_context(conf, msg):
"""Unpack context from msg."""
context_dict = {}
for key in list(msg.keys()):
key = six.text_type(key)
if key.startswith('_context_'):
value = msg.pop(key)
context_dict[key[9:]] = value
context_dict['msg_id'] = msg.pop('_msg_id', None)
context_dict['reply_q'] = msg.pop('_reply_q', None)
context_dict['conf'] = conf
return RpcContext.from_dict(context_dict)
def pack_context(msg, context):
"""Pack context into msg.
Values for message keys need to be less than 255 chars, so we pull
context out into a bunch of separate keys. If we want to support
more arguments in rabbit messages, we may want to do the same
for args at some point.
"""
if isinstance(context, dict):
context_d = six.iteritems(context)
else:
context_d = six.iteritems(context.to_dict())
msg.update(('_context_%s' % key, value)
for (key, value) in context_d)
def _add_unique_id(msg):
"""Add unique_id for checking duplicate messages."""
unique_id = uuid.uuid4().hex
msg.update({UNIQUE_ID: unique_id})
| 37.092251 | 78 | 0.643355 |
55fbb1e9d0d4e9b678c1d12c81f6b84f0a9bebb8 | 1,551 | py | Python | scripts/agenda.py | benjaminogles/vim-head | be3e01b53d314b6f7e0d72a736fe40f38de2cf5f | [
"MIT"
] | 3 | 2020-04-13T17:47:05.000Z | 2020-05-11T17:23:02.000Z | scripts/agenda.py | benjaminogles/vim-head | be3e01b53d314b6f7e0d72a736fe40f38de2cf5f | [
"MIT"
] | 3 | 2020-04-13T16:51:27.000Z | 2020-04-13T16:53:54.000Z | scripts/agenda.py | benjaminogles/vim-head | be3e01b53d314b6f7e0d72a736fe40f38de2cf5f | [
"MIT"
] | null | null | null | #!/bin/python3
import datetime
import itertools
import sys
from heading import *
days = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
if __name__ == '__main__':
import argparse
inputs = from_fields_file(sys.stdin)
todos = filter(has_date, inputs)
todos = filter(is_pending, todos)
todos = sorted(todos, key=date_key)
todos = itertools.groupby(todos, key=date_key)
today = datetime.date.today()
warned = False
for date, todo_group in todos:
if date < today and not warned:
warned = True
print('\n! Overdue !')
elif date == today:
print ('\n= Today =')
elif date > today:
print('\n= %s %s =' % (days[date.weekday()], date))
prioritized = sorted(todo_group, key=priority_key())
for todo in prioritized:
print(todo)
| 27.210526 | 110 | 0.617666 |
55fbb54a4881fb0eed71b1a082583ae85646db84 | 5,635 | py | Python | clusterpy/core/toolboxes/cluster/componentsAlg/areamanager.py | CentroGeo/clusterpy_python3 | 5c2600b048836e54495dc5997a250af72f72f6e7 | [
"BSD-3-Clause"
] | 3 | 2019-09-29T15:27:57.000Z | 2021-01-23T02:05:07.000Z | clusterpy/core/toolboxes/cluster/componentsAlg/areamanager.py | CentroGeo/clusterpy_python3 | 5c2600b048836e54495dc5997a250af72f72f6e7 | [
"BSD-3-Clause"
] | null | null | null | clusterpy/core/toolboxes/cluster/componentsAlg/areamanager.py | CentroGeo/clusterpy_python3 | 5c2600b048836e54495dc5997a250af72f72f6e7 | [
"BSD-3-Clause"
] | null | null | null | # encoding: latin2
"""Algorithm utilities
G{packagetree core}
"""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from builtins import range
from builtins import object
from past.utils import old_div
__author__ = "Juan C. Duque"
__credits__ = "Copyright (c) 2009-11 Juan C. Duque"
__license__ = "New BSD License"
__version__ = "1.0.0"
__maintainer__ = "RiSE Group"
__email__ = "contacto@rise-group.org"
from .areacl import AreaCl
from .dist2Regions import distanceStatDispatcher
| 33.343195 | 132 | 0.546584 |