hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
496b6450d30926a47dd2ad486b0b0f71fa6e56dd | 10,910 | py | Python | train_margin.py | youmingdeng/DMLPlayground | 37070c10278597a4413303061d60d69ce2c4f87e | [
"Apache-2.0"
] | 1 | 2021-11-11T16:05:56.000Z | 2021-11-11T16:05:56.000Z | train_margin.py | youmingdeng/DMLPlayground | 37070c10278597a4413303061d60d69ce2c4f87e | [
"Apache-2.0"
] | null | null | null | train_margin.py | youmingdeng/DMLPlayground | 37070c10278597a4413303061d60d69ce2c4f87e | [
"Apache-2.0"
] | 1 | 2020-04-01T04:50:36.000Z | 2020-04-01T04:50:36.000Z | from __future__ import division
import logging
import mxnet as mx
import numpy as np
from mxnet import autograd as ag, nd
from mxnet import gluon
from tqdm import tqdm
from common.evaluate import evaluate
from common.parser import TrainingParser
from common.utils import average_results, format_results, get_context, parse_steps, get_lr, append_postfix
from dataset import get_dataset_iterator, get_dataset
from dataset.dataloader import DatasetIterator
from models import get_feature_model
from models.marginmodels import MarginNet, MarginLoss
def validate(net, val_data, ctx, use_threads=True):
"""Test a model."""
outputs = []
labels = []
ctx_cpu = mx.cpu()
for batch in tqdm(val_data, desc='Computing test embeddings'):
data = mx.gluon.utils.split_and_load(batch[0], ctx_list=ctx, batch_axis=0, even_split=False)
label = mx.gluon.utils.split_and_load(batch[1], ctx_list=ctx, batch_axis=0, even_split=False)
for x in data:
outputs.append(net(x).as_in_context(ctx_cpu))
labels += [l.as_in_context(ctx_cpu) for l in label]
outputs = mx.nd.concatenate(outputs, axis=0)
labels = mx.nd.concatenate(labels, axis=0)
return evaluate(outputs, labels, val_data._dataset.num_classes(), use_threads=use_threads)
def train(net, beta, opt, train_dataloader, val_dataloader, batch_size, context, run_id):
"""Training function."""
if not opt.skip_pretrain_validation:
validation_results = validate(net, val_dataloader, context, use_threads=opt.num_workers > 0)
for name, val_acc in validation_results:
logging.info('Pre-train validation: %s=%f' % (name, val_acc))
steps = parse_steps(opt.steps, opt.epochs, logging)
opt_options = {'learning_rate': opt.lr, 'wd': opt.wd}
if opt.optimizer == 'sgd':
opt_options['momentum'] = 0.9
if opt.optimizer == 'adam':
opt_options['epsilon'] = 1e-7
trainer = gluon.Trainer(net.collect_params(), opt.optimizer, opt_options, kvstore=opt.kvstore)
train_beta = not isinstance(beta, float)
if train_beta:
# Jointly train class-specific beta
beta.initialize(mx.init.Constant(opt.beta), ctx=context)
trainer_beta = gluon.Trainer(beta.collect_params(), 'sgd',
{'learning_rate': opt.lr_beta, 'momentum': 0.9}, kvstore=opt.kvstore)
loss = MarginLoss(batch_size, opt.batch_k, beta, margin=opt.margin, nu=opt.nu, train_beta=train_beta)
if not opt.disable_hybridize:
loss.hybridize()
best_results = [] # R@1, NMI
for epoch in range(1, opt.epochs + 1):
prev_loss, cumulative_loss = 0.0, 0.0
# Learning rate schedule.
trainer.set_learning_rate(get_lr(opt.lr, epoch, steps, opt.factor))
logging.info('Epoch %d learning rate=%f', epoch, trainer.learning_rate)
if train_beta:
trainer_beta.set_learning_rate(get_lr(opt.lr_beta, epoch, steps, opt.factor))
logging.info('Epoch %d beta learning rate=%f', epoch, trainer_beta.learning_rate)
p_bar = tqdm(train_dataloader, desc='[Run %d/%d] Epoch %d' % (run_id, opt.number_of_runs, epoch),
total=opt.iteration_per_epoch)
for batch in p_bar:
data = gluon.utils.split_and_load(batch[0][0], ctx_list=context, batch_axis=0)
label = gluon.utils.split_and_load(batch[1][0].astype('float32'), ctx_list=context, batch_axis=0)
Ls = []
with ag.record():
for x, y in zip(data, label):
embedings = net(x)
L = loss(embedings, y)
Ls.append(L)
cumulative_loss += nd.mean(L).asscalar()
for L in Ls:
L.backward()
trainer.step(batch[0].shape[1])
if opt.lr_beta > 0.0:
trainer_beta.step(batch[0].shape[1])
p_bar.set_postfix({'loss': cumulative_loss - prev_loss})
prev_loss = cumulative_loss
logging.info('[Epoch %d] training loss=%f' % (epoch, cumulative_loss))
validation_results = validate(net, val_dataloader, context, use_threads=opt.num_workers > 0)
for name, val_acc in validation_results:
logging.info('[Epoch %d] validation: %s=%f' % (epoch, name, val_acc))
if (len(best_results) == 0) or (validation_results[0][1] > best_results[0][1]):
best_results = validation_results
if opt.save_model_prefix.lower() != 'none':
filename = '%s.params' % opt.save_model_prefix
logging.info('Saving %s.' % filename)
net.save_parameters(filename)
logging.info('New best validation: R@1: %f NMI: %f' % (best_results[0][1], best_results[-1][1]))
return best_results
if __name__ == '__main__':
train_margin(parse_args())
| 44.530612 | 120 | 0.634372 |
496d5dcb74bbf5f2fa198d1e5b24c0ea5fec7ece | 6,187 | py | Python | doc/tools/doc_merge.py | N0hbdy/godot | d4a222cd9d849a63f0535f70cbf78700bc5c815b | [
"CC-BY-3.0",
"Apache-2.0",
"MIT"
] | 39 | 2018-12-17T07:11:37.000Z | 2021-09-28T10:02:45.000Z | doc/tools/doc_merge.py | N0hbdy/godot | d4a222cd9d849a63f0535f70cbf78700bc5c815b | [
"CC-BY-3.0",
"Apache-2.0",
"MIT"
] | 38 | 2021-07-29T01:15:35.000Z | 2022-03-20T01:01:28.000Z | doc/tools/doc_merge.py | N0hbdy/godot | d4a222cd9d849a63f0535f70cbf78700bc5c815b | [
"CC-BY-3.0",
"Apache-2.0",
"MIT"
] | 3 | 2021-09-06T18:28:23.000Z | 2021-09-11T11:59:54.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import xml.etree.ElementTree as ET
tree = ET.parse(sys.argv[1])
old_doc = tree.getroot()
tree = ET.parse(sys.argv[2])
new_doc = tree.getroot()
f = file(sys.argv[3], "wb")
tab = 0
old_classes = {}
write_string(f, '<?xml version="1.0" encoding="UTF-8" ?>')
write_string(f, '<doc version="' + new_doc.attrib["version"] + '">')
for c in list(old_doc):
old_classes[c.attrib["name"]] = c
for c in list(new_doc):
write_class(c)
write_string(f, '</doc>\n')
| 28.643519 | 170 | 0.537902 |
496d668dab143daad188848fbd26c751e580633a | 357 | py | Python | contentcuration/contentcuration/migrations/0059_merge.py | Tlazypanda/studio | cd1c2f169c705027cdd808cbbcae907d0a9b21d2 | [
"MIT"
] | 1 | 2019-03-30T18:14:25.000Z | 2019-03-30T18:14:25.000Z | contentcuration/contentcuration/migrations/0059_merge.py | Tlazypanda/studio | cd1c2f169c705027cdd808cbbcae907d0a9b21d2 | [
"MIT"
] | 4 | 2016-05-06T17:19:30.000Z | 2019-03-15T01:51:24.000Z | contentcuration/contentcuration/migrations/0059_merge.py | Tlazypanda/studio | cd1c2f169c705027cdd808cbbcae907d0a9b21d2 | [
"MIT"
] | 4 | 2016-10-18T22:49:08.000Z | 2019-09-17T11:20:51.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2017-03-29 19:12
from __future__ import unicode_literals
from django.db import migrations
| 21 | 59 | 0.680672 |
496df5ac2b816d0d93ed95d0c8119c0af62b55d9 | 91 | py | Python | controller/ORCA_CLEAN/execute.py | nestorcalvo/Backend-AudioClean | 7edb373c518193bc5643e9524d78d9ba32163b3f | [
"MIT"
] | null | null | null | controller/ORCA_CLEAN/execute.py | nestorcalvo/Backend-AudioClean | 7edb373c518193bc5643e9524d78d9ba32163b3f | [
"MIT"
] | null | null | null | controller/ORCA_CLEAN/execute.py | nestorcalvo/Backend-AudioClean | 7edb373c518193bc5643e9524d78d9ba32163b3f | [
"MIT"
] | null | null | null | from predict import predict
if __name__ == "__main__":
# predict()
print("A ")
| 15.166667 | 27 | 0.604396 |
496f6fa945313ae8eb812d0256476b19fbb908f6 | 174 | py | Python | fperms_iscore/main.py | druids/django-fperms-iscore | 8e919cdc70ed57e0eb6407469de9ef2441ae06ad | [
"MIT"
] | 1 | 2019-10-07T12:40:38.000Z | 2019-10-07T12:40:38.000Z | fperms_iscore/main.py | druids/django-fperms-iscore | 8e919cdc70ed57e0eb6407469de9ef2441ae06ad | [
"MIT"
] | 3 | 2019-08-09T14:10:21.000Z | 2022-02-01T13:48:01.000Z | fperms_iscore/main.py | druids/django-fperms-iscore | 8e919cdc70ed57e0eb6407469de9ef2441ae06ad | [
"MIT"
] | null | null | null | from is_core.main import DjangoUiRestCore
from fperms_iscore.mixins import PermCoreMixin
| 19.333333 | 60 | 0.833333 |
496f9fb09ed8ca073a1b323b69ca4902f734d230 | 1,269 | py | Python | distanceCalc.py | jmoehler/CityDistance | 0a7eb898db8ea0dbada43239652ae4aad935dda3 | [
"MIT"
] | null | null | null | distanceCalc.py | jmoehler/CityDistance | 0a7eb898db8ea0dbada43239652ae4aad935dda3 | [
"MIT"
] | null | null | null | distanceCalc.py | jmoehler/CityDistance | 0a7eb898db8ea0dbada43239652ae4aad935dda3 | [
"MIT"
] | null | null | null | from math import cos, acos, pi, sqrt, sin | 34.297297 | 149 | 0.624901 |
496fe4328017b0a5588279aa7e57db6731bb4964 | 95 | py | Python | zoo/auditing/apps.py | uliana291/the-zoo | a15a4162c39553abe91224f4feff5d3b66f9413e | [
"MIT"
] | 90 | 2018-11-20T10:58:24.000Z | 2022-02-19T16:12:46.000Z | zoo/auditing/apps.py | uliana291/the-zoo | a15a4162c39553abe91224f4feff5d3b66f9413e | [
"MIT"
] | 348 | 2018-11-21T09:22:31.000Z | 2021-11-03T13:45:08.000Z | zoo/auditing/apps.py | aexvir/the-zoo | 7816afb9a0a26c6058b030b4a987c73e952d92bd | [
"MIT"
] | 11 | 2018-12-08T18:42:07.000Z | 2021-02-21T06:27:58.000Z | from django.apps import AppConfig
| 15.833333 | 33 | 0.757895 |
49706257061fd5cb42e071e2e21ada1c26eefe8c | 593 | py | Python | graviteeio_cli/commands/apim/apis/definition.py | Shaker5191/graviteeio-cli | 318748bb8e631743ea58afaee24333249ca3d227 | [
"Apache-2.0"
] | null | null | null | graviteeio_cli/commands/apim/apis/definition.py | Shaker5191/graviteeio-cli | 318748bb8e631743ea58afaee24333249ca3d227 | [
"Apache-2.0"
] | null | null | null | graviteeio_cli/commands/apim/apis/definition.py | Shaker5191/graviteeio-cli | 318748bb8e631743ea58afaee24333249ca3d227 | [
"Apache-2.0"
] | null | null | null | import click
from .definition_group.apply import apply
from .definition_group.diff import diff
from .definition_group.generate import generate
from .definition_group.create import create
# from .definition_group.lint import lint
definition.add_command(apply)
definition.add_command(diff)
definition.add_command(create)
definition.add_command(generate)
# definition.add_command(lint)
| 26.954545 | 91 | 0.819562 |
497231bff7e8e9d345553a23f55adb1bd3c5a759 | 1,761 | py | Python | graphx.py | clever-username/baseball-card-inventory | 9940ba746072892961b7ade586e63f7deb26d2e6 | [
"MIT"
] | 1 | 2021-05-18T21:32:43.000Z | 2021-05-18T21:32:43.000Z | graphx.py | clever-username/baseball-card-inventory | 9940ba746072892961b7ade586e63f7deb26d2e6 | [
"MIT"
] | null | null | null | graphx.py | clever-username/baseball-card-inventory | 9940ba746072892961b7ade586e63f7deb26d2e6 | [
"MIT"
] | 2 | 2015-05-18T14:52:01.000Z | 2015-05-19T18:21:51.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Simple color picker program."""
BANNER = """ .::::::::::::::::::::::::::::::::::::::::::::::::.
.. .... ..
.. ...... ... ..
|S .F.Cards.. F.G ia
nt sS.F.Gi||BASE BAL LB
AS EBALLBASEBA||S .F. Gi
an tsS.F.Giants S. F.
Gi ||BASEBALLBA SE BA
LL BASEBA||N.Y.Yankees.F .Gia nt
sS .F.Gi||BASEBALLBASEBALLBASEB A|
|S .F.MetsS.F.GiantsS.F.Gi||BASE BA
LL BA SEBALLBASEBA||S.T.L.Cards.Reds S.
F. Gi||B ASEBALLBASEBALLBASEBA||S.F.GiantsS.F .G
ia nt sS.F.Gi||BASEBALLBASEBALLBASEBA||S.F .G
ia ntsT.B.Rayss.F.Gi||BASEBALL BA
S EBALLBASEBA|'`''''''''''' S
:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
____ ____ ____ _ ____ ____ ____
| __ )| __ ) / ___| / \ | _ \| _ \/ ___|
| _ \| _ \ _____| | / _ \ | |_) | | | \___ \
| |_) | |_) |_____| |___ / ___ \| _ <| |_| |___) |
|____/|____/ \____/_/ \_|_| \_|____/|____/
"""
| 60.724138 | 76 | 0.244747 |
4972f556700ff0374ba0d495d120ef3679c33357 | 1,176 | py | Python | src/examples/colors.py | schneiderfelipe/kay | a7bf69e3bbd1b845286667b20eb1fba88faf9ea4 | [
"MIT"
] | 14 | 2021-11-18T14:56:48.000Z | 2022-03-26T08:02:13.000Z | src/examples/colors.py | getcuia/cuia | 685d258b3cb366d40100e6a563661b307aef5ae3 | [
"MIT"
] | 8 | 2021-11-25T13:47:12.000Z | 2022-03-25T12:01:09.000Z | src/examples/colors.py | schneiderfelipe/kay | a7bf69e3bbd1b845286667b20eb1fba88faf9ea4 | [
"MIT"
] | null | null | null | """An example of using colors module."""
import asyncio
from typing import Text
import cuia
if __name__ == "__main__":
asyncio.run(main())
| 26.727273 | 68 | 0.536565 |
49733958b3756fb3220a69c0ceb6f8c4a2dd5ef8 | 2,571 | py | Python | app/voxity/channel.py | voxity/vox-ui-api | 9da442a2ae8e5fec92485cf7dc4adf1a560aa8f5 | [
"MIT"
] | null | null | null | app/voxity/channel.py | voxity/vox-ui-api | 9da442a2ae8e5fec92485cf7dc4adf1a560aa8f5 | [
"MIT"
] | null | null | null | app/voxity/channel.py | voxity/vox-ui-api | 9da442a2ae8e5fec92485cf7dc4adf1a560aa8f5 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, unicode_literals
from flask import current_app
from . import connectors, check_respons, pager_dict
from .objects.channel import Channel
def get(ret_object=False, **kwargs):
"""
:retyp: list
:return: device list
"""
con = connectors()
if con:
resp = con.get(get_base_url(), params=kwargs)
if check_respons(resp):
ret = resp.json().get('result', [])
ret = Channel.litst_obj_from_list(ret, **kwargs)
if not ret_object:
r = []
for c in ret:
r.append(c.to_dict())
return r
else:
return ret
return None
def get_id(d_id, ret_object=False):
"""
:param str d_ind: device id
:retype: dict|Channel
:return: one device
"""
con = connectors()
if con:
resp = con.get(get_base_url() + d_id)
if check_respons(resp):
ret = resp.json().get('data', [])
if not ret_object:
return ret
else:
return Channel(**ret)
return None | 25.205882 | 78 | 0.525088 |
49736e792f64fdf62a5e05e4cdd1a7fca2758ba4 | 1,637 | py | Python | chapter 2 - linked list/2.7.py | anuraagdjain/cracking_the_coding_interview | 09083b4c464f41d5752c7ca3d27ab7c992793619 | [
"MIT"
] | null | null | null | chapter 2 - linked list/2.7.py | anuraagdjain/cracking_the_coding_interview | 09083b4c464f41d5752c7ca3d27ab7c992793619 | [
"MIT"
] | null | null | null | chapter 2 - linked list/2.7.py | anuraagdjain/cracking_the_coding_interview | 09083b4c464f41d5752c7ca3d27ab7c992793619 | [
"MIT"
] | null | null | null | from linkedlist import LinkedList
from node import Node
if __name__ == "__main__":
a = Node(1)
b = Node(2)
c = Node(7)
d = Node(6)
e = Node(4)
f = Node(9)
g = Node(5)
h = Node(1)
i = Node(3)
x = Node(1)
y = Node(2)
z = Node(7)
z.next = y
y.next = x
i.next = h
h.next = g
g.next = f
f.next = c # with intersection
# f.next = z # without intersection
e.next = d
d.next = c
c.next = b
b.next = a
result = intersection(i, e)
if result.result:
print("Intersection found at node instance: " + str(result.node))
else:
print("No intersection")
| 18.602273 | 73 | 0.542456 |
4973e2d2ceab6b66fabf235caf79153e33be991a | 2,307 | py | Python | app_core/api/comments.py | Great-Li-Xin/LiCMS | 9d7f78647766b49a325123f4b5ad59d6a1808eb7 | [
"MIT"
] | 9 | 2020-02-18T01:50:17.000Z | 2020-05-26T09:25:41.000Z | app_core/api/comments.py | realJustinLee/LiCMS | 9d7f78647766b49a325123f4b5ad59d6a1808eb7 | [
"MIT"
] | 1 | 2021-04-19T15:26:20.000Z | 2021-04-19T15:26:20.000Z | app_core/api/comments.py | Great-Li-Xin/LiCMS | 9d7f78647766b49a325123f4b5ad59d6a1808eb7 | [
"MIT"
] | 5 | 2020-02-18T01:50:19.000Z | 2020-05-26T09:25:45.000Z | from flask import jsonify, request, g, url_for, current_app
from app_core import db
from app_core.api import api
from app_core.api.decorators import permission_required
from app_core.models import Post, Permission, Comment
| 33.926471 | 107 | 0.686173 |
4974d4d303e4a516e97419ba5b4f79eb5a463128 | 2,557 | py | Python | ipyhop/state.py | YashBansod/IPyHOP | f3b75b420e470c693606a67cc70bdcb24eccda62 | [
"BSD-3-Clause"
] | null | null | null | ipyhop/state.py | YashBansod/IPyHOP | f3b75b420e470c693606a67cc70bdcb24eccda62 | [
"BSD-3-Clause"
] | null | null | null | ipyhop/state.py | YashBansod/IPyHOP | f3b75b420e470c693606a67cc70bdcb24eccda62 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
"""
File Description: File used for definition of State Class.
"""
# ****************************************** Libraries to be imported ****************************************** #
from copy import deepcopy
# ****************************************** Class Declaration Start ****************************************** #
# ****************************************** Class Declaration End ****************************************** #
# ****************************************** Demo / Test Routine ****************************************** #
if __name__ == '__main__':
print("Test instantiation of State class ...")
test_state = State('test_state')
test_state.test_var_1 = {'key1': 'val1'}
test_state.test_var_2 = {'key1': 0}
test_state.test_var_3 = {'key2': {'key3': 5}, 'key3': {'key2': 5}}
print(test_state)
"""
Author(s): Yash Bansod
Repository: https://github.com/YashBansod/IPyHOP
"""
| 39.953125 | 120 | 0.431756 |
4974d677e63b39744893c4f6fa71c6ce00ac7913 | 2,240 | py | Python | ckanext/scheming/logic.py | vrk-kpa/ckanext-scheming | b82e20e04acdc4a71163675f843ac9be74f29d41 | [
"MIT"
] | null | null | null | ckanext/scheming/logic.py | vrk-kpa/ckanext-scheming | b82e20e04acdc4a71163675f843ac9be74f29d41 | [
"MIT"
] | null | null | null | ckanext/scheming/logic.py | vrk-kpa/ckanext-scheming | b82e20e04acdc4a71163675f843ac9be74f29d41 | [
"MIT"
] | 1 | 2021-12-15T12:50:40.000Z | 2021-12-15T12:50:40.000Z | from ckantoolkit import get_or_bust, side_effect_free, ObjectNotFound
from ckanext.scheming.helpers import (
scheming_dataset_schemas, scheming_get_dataset_schema,
scheming_group_schemas, scheming_get_group_schema,
scheming_organization_schemas, scheming_get_organization_schema,
)
| 28 | 78 | 0.729018 |
49755e37e2029b777679857be7a2f1b70a206d0d | 2,700 | py | Python | omnithinker/api/nytimes.py | stuycs-softdev-fall-2013/proj2-pd6-04-omnithinker | 53bf397ce2f67e7d5c5689486ab75475e99b0eba | [
"MIT",
"BSD-3-Clause"
] | 1 | 2022-01-18T02:03:15.000Z | 2022-01-18T02:03:15.000Z | omnithinker/api/nytimes.py | stuycs-softdev-fall-2013/proj2-pd6-04-omnithinker | 53bf397ce2f67e7d5c5689486ab75475e99b0eba | [
"MIT",
"BSD-3-Clause"
] | null | null | null | omnithinker/api/nytimes.py | stuycs-softdev-fall-2013/proj2-pd6-04-omnithinker | 53bf397ce2f67e7d5c5689486ab75475e99b0eba | [
"MIT",
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/python
import json
from urllib import urlopen
# http://api.nytimes.com/svc/search/v2/articlesearch.json?fq=Obama&FACET_FIELD=day_of_week&BEGIN_DATE=19000101
# &API-KEY=5772CD9A42F195C96DA0E930A7182688:14:68439177
# The original link is above. What happens is because we don't specify an end date, the panda article, which was
# coincidentally published today, becomes the first article that we see and gives us keywords like zoo.
# If we add an end date before then, then we can filter it out.
| 35.526316 | 153 | 0.605185 |
4975838c1788d4788a4a9397bb1062a6a910a29e | 694 | py | Python | tests/test_pydantic_integration.py | bsnacks000/yearmonth | c6a6084931e6cc4696de5f8a7f8e48ceca83b944 | [
"MIT"
] | null | null | null | tests/test_pydantic_integration.py | bsnacks000/yearmonth | c6a6084931e6cc4696de5f8a7f8e48ceca83b944 | [
"MIT"
] | null | null | null | tests/test_pydantic_integration.py | bsnacks000/yearmonth | c6a6084931e6cc4696de5f8a7f8e48ceca83b944 | [
"MIT"
] | null | null | null | from typing import List
from yearmonth.yearmonth import YearMonth
import pydantic
| 22.387097 | 71 | 0.665706 |
4975be83811ebc74df1baade17e5a1895d1cf649 | 353 | py | Python | C_D_Playlist.py | fairoz-ahmed/Casper_Player | f71a26002907e474a9274771565ce781beddcca4 | [
"MIT"
] | null | null | null | C_D_Playlist.py | fairoz-ahmed/Casper_Player | f71a26002907e474a9274771565ce781beddcca4 | [
"MIT"
] | null | null | null | C_D_Playlist.py | fairoz-ahmed/Casper_Player | f71a26002907e474a9274771565ce781beddcca4 | [
"MIT"
] | null | null | null | import tkinter.messagebox
from tkinter import *
from tkinter import ttk
from tkinter import filedialog
import threading
from pygame import mixer
from mutagen.mp3 import MP3
import os
import easygui
import time
import playlist_window as pw
import Main as main
#from PIL import ImageTk,Image
| 20.764706 | 31 | 0.78187 |
49777977c495be3e64d10459c0324e75b00b5f3b | 569 | py | Python | docker-image/render-template.py | osism/generics | 2dd914f2338c2d60d1595d7cdc4db0c107a9fb47 | [
"Apache-2.0"
] | null | null | null | docker-image/render-template.py | osism/generics | 2dd914f2338c2d60d1595d7cdc4db0c107a9fb47 | [
"Apache-2.0"
] | 3 | 2020-12-10T09:57:02.000Z | 2020-12-10T09:57:17.000Z | docker-image/render-template.py | osism/travis | 2dd914f2338c2d60d1595d7cdc4db0c107a9fb47 | [
"Apache-2.0"
] | null | null | null | import os
import sys
import jinja2
import yaml
with open(".information.yml") as fp:
information = yaml.safe_load(fp)
loader = jinja2.FileSystemLoader(searchpath="")
environment = jinja2.Environment(loader=loader, keep_trailing_newline=True)
template = environment.get_template(sys.argv[1])
result = template.render({
"docker_image_name": information.get("docker_image_name", "NONE"),
"readme_note": information.get("readme_note", None),
"versions": information.get("versions", ["latest"])
})
with open(sys.argv[1], "w+") as fp:
fp.write(result)
| 27.095238 | 75 | 0.72935 |
49788254641401f0ac3bea81c52abecf9425c9b7 | 58 | py | Python | test/__init__.py | stungkit/tfidf_matcher | 24182504d21f1eb978839b700f1c402c6288df2f | [
"MIT"
] | 13 | 2020-02-24T18:29:15.000Z | 2021-12-28T09:41:35.000Z | test/__init__.py | stungkit/tfidf_matcher | 24182504d21f1eb978839b700f1c402c6288df2f | [
"MIT"
] | null | null | null | test/__init__.py | stungkit/tfidf_matcher | 24182504d21f1eb978839b700f1c402c6288df2f | [
"MIT"
] | 3 | 2020-07-21T04:32:45.000Z | 2021-10-21T11:00:56.000Z | # AUTHOR: Louis Tsiattalou
# DESCRIPTION: Init for Tests.
| 19.333333 | 30 | 0.758621 |
4978db654876ffc9e3f0801f73bab29baba94038 | 29,541 | py | Python | isitek.py | will-bainbridge/ISITEK | 53e90e0511bbd7cd08614b943c1286c56adbee5e | [
"MIT"
] | 3 | 2018-06-26T15:04:46.000Z | 2019-09-14T09:23:44.000Z | isitek.py | will-bainbridge/ISITEK | 53e90e0511bbd7cd08614b943c1286c56adbee5e | [
"MIT"
] | null | null | null | isitek.py | will-bainbridge/ISITEK | 53e90e0511bbd7cd08614b943c1286c56adbee5e | [
"MIT"
] | 3 | 2016-11-28T12:19:37.000Z | 2020-02-04T00:18:56.000Z | #!/usr/bin/python
################################################################################
import numpy
import os
import cPickle as pickle
import scipy.misc
import scipy.sparse
import scipy.sparse.linalg
import scipy.special
import sys
import time
################################################################################
def nodegrid(a,b):
return [ x.T for x in numpy.meshgrid(a,b) ]
def dot_sequence(*args):
if len(args) == 1: return args[0]
else: return numpy.dot( args[0] , dot_sequence(*args[1:]) )
def string_multiple_replace(string,dict):
for s,r in dict.iteritems():
string = string.replace(s,r)
return string
################################################################################
#------------------------------------------------------------------------------#
#------------------------------------------------------------------------------#
#------------------------------------------------------------------------------#
#------------------------------------------------------------------------------#
#------------------------------------------------------------------------------#
#------------------------------------------------------------------------------#
#------------------------------------------------------------------------------#
#------------------------------------------------------------------------------#
#------------------------------------------------------------------------------#
#------------------------------------------------------------------------------#
#------------------------------------------------------------------------------#
#------------------------------------------------------------------------------#
#------------------------------------------------------------------------------#
################################################################################
path = sys.argv[1]
action = sys.argv[2].lower()
directory = os.path.dirname(path)
name = os.path.basename(path)
input_filename = directory + os.sep + name + '.input'
data_filename = directory + os.sep + name + '.data'
display_filename = directory + os.sep + name + '.display'
do = Struct(pre = 'p' in action , re = 'r' in action , init = 'i' in action , solve = 's' in action , display = 'd' in action )
#------------------------------------------------------------------------------#
if not do.pre:
with Timer('reading data from "%s"' % data_filename):
node,face,element,boundary,u,order = read_data_file(data_filename)
with Timer('reading input from "%s"' % input_filename):
input_data = read_input_file(input_filename)
if do.pre:
geometry_filename = directory + os.sep + input_data[0]
order = input_data[1]
if do.pre or do.re:
boundary = input_data[2]
if do.init:
initial = input_data[3]
if do.solve:
for i in range(0,len(boundary)): boundary[i].value = input_data[2][i].value
term = input_data[4]
wind = input_data[5]
iterations = input_data[6]
if do.display:
mesh_size = input_data[7]
with Timer('generating constants'):
(gauss_locations,gauss_weights,
hammer_locations,hammer_weights,
taylor_coefficients,taylor_powers,powers_taylor,
factorial) = generate_constants(order)
if do.pre:
with Timer('reading and processing geometry from "%s"' % geometry_filename):
node,face,element = read_geometry(geometry_filename)
with Timer('generating unknowns'):
u = generate_unknowns()
if do.pre or do.re:
with Timer('assigning boundaries to faces'):
assign_boundaries()
with Timer('calculating element matrices'):
calculate_element_matrices()
if do.init:
with Timer('initialising the unknowns'):
initialise_unknowns()
if do.solve:
with Timer('iterating',True):
index = [ numpy.zeros(u.shape,dtype=bool) for v in range(0,len(order)) ]
for e in range(0,len(element)):
for v in range(0,len(order)):
index[v][element[e].unknown[v]] = True
for i in range(0,iterations):
J,f = generate_system()
print ' ' + ' '.join([ '%.4e' % numpy.max(numpy.abs(f[i])) for i in index ])
u += scipy.sparse.linalg.spsolve(J,-f)
if do.display:
with Timer('saving display to "%s"' % display_filename):
write_display_file(display_filename,mesh_size)
if do.pre or do.re or do.init or do.solve:
with Timer('saving data to "%s"' % data_filename):
write_data_file(data_filename)
################################################################################
| 34.35 | 173 | 0.597982 |
497a5c9b65658e4fea7858123fdca1c39b46407f | 2,343 | py | Python | holobot/framework/kernel.py | rexor12/holobot | 89b7b416403d13ccfeee117ef942426b08d3651d | [
"MIT"
] | 1 | 2021-05-24T00:17:46.000Z | 2021-05-24T00:17:46.000Z | holobot/framework/kernel.py | rexor12/holobot | 89b7b416403d13ccfeee117ef942426b08d3651d | [
"MIT"
] | 41 | 2021-03-24T22:50:09.000Z | 2021-12-17T12:15:13.000Z | holobot/framework/kernel.py | rexor12/holobot | 89b7b416403d13ccfeee117ef942426b08d3651d | [
"MIT"
] | null | null | null | from holobot.framework.lifecycle import LifecycleManagerInterface
from holobot.sdk import KernelInterface
from holobot.sdk.database import DatabaseManagerInterface
from holobot.sdk.integration import IntegrationInterface
from holobot.sdk.ioc.decorators import injectable
from holobot.sdk.logging import LogInterface
from holobot.sdk.system import EnvironmentInterface
from holobot.sdk.utils import when_all
from typing import Tuple
import asyncio
| 45.057692 | 126 | 0.722151 |
497a5f4c2e39ef62c200675216c42fbc21c52436 | 34 | py | Python | tests/snmp/test_base.py | zohassadar/netdisc | 9ce4d5c2b0f30d36e71118ffbe6b7ffd93e0dfc8 | [
"MIT"
] | null | null | null | tests/snmp/test_base.py | zohassadar/netdisc | 9ce4d5c2b0f30d36e71118ffbe6b7ffd93e0dfc8 | [
"MIT"
] | null | null | null | tests/snmp/test_base.py | zohassadar/netdisc | 9ce4d5c2b0f30d36e71118ffbe6b7ffd93e0dfc8 | [
"MIT"
] | null | null | null | from netdisc.snmp import snmpbase
| 17 | 33 | 0.852941 |
497aef1b3a2cad12da85ea306e770352bb104646 | 13,063 | py | Python | venv/lib/python2.7/site-packages/ansible/modules/storage/netapp/na_ontap_svm.py | haind27/test01 | 7f86c0a33eb0874a6c3f5ff9a923fd0cfc8ef852 | [
"MIT"
] | 37 | 2017-08-15T15:02:43.000Z | 2021-07-23T03:44:31.000Z | venv/lib/python2.7/site-packages/ansible/modules/storage/netapp/na_ontap_svm.py | haind27/test01 | 7f86c0a33eb0874a6c3f5ff9a923fd0cfc8ef852 | [
"MIT"
] | 12 | 2018-01-10T05:25:25.000Z | 2021-11-28T06:55:48.000Z | venv/lib/python2.7/site-packages/ansible/modules/storage/netapp/na_ontap_svm.py | haind27/test01 | 7f86c0a33eb0874a6c3f5ff9a923fd0cfc8ef852 | [
"MIT"
] | 49 | 2017-08-15T09:52:13.000Z | 2022-03-21T17:11:54.000Z | #!/usr/bin/python
# (c) 2018, NetApp, Inc
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: na_ontap_svm
short_description: Manage NetApp Ontap svm
extends_documentation_fragment:
- netapp.na_ontap
version_added: '2.6'
author: Sumit Kumar (sumit4@netapp.com), Archana Ganesan (garchana@netapp.com)
description:
- Create, modify or delete svm on NetApp Ontap
options:
state:
description:
- Whether the specified SVM should exist or not.
choices: ['present', 'absent']
default: 'present'
name:
description:
- The name of the SVM to manage.
required: true
new_name:
description:
- New name of the SVM to be renamed
root_volume:
description:
- Root volume of the SVM. Required when C(state=present).
root_volume_aggregate:
description:
- The aggregate on which the root volume will be created.
- Required when C(state=present).
root_volume_security_style:
description:
- Security Style of the root volume.
- When specified as part of the vserver-create,
this field represents the security style for the Vserver root volume.
- When specified as part of vserver-get-iter call,
this will return the list of matching Vservers.
- The 'unified' security style, which applies only to Infinite Volumes,
cannot be applied to a Vserver's root volume.
- Required when C(state=present)
choices: ['unix', 'ntfs', 'mixed', 'unified']
allowed_protocols:
description:
- Allowed Protocols.
- When specified as part of a vserver-create,
this field represent the list of protocols allowed on the Vserver.
- When part of vserver-get-iter call,
this will return the list of Vservers
which have any of the protocols specified
as part of the allowed-protocols.
- When part of vserver-modify,
this field should include the existing list
along with new protocol list to be added to prevent data disruptions.
- Possible values
- nfs NFS protocol,
- cifs CIFS protocol,
- fcp FCP protocol,
- iscsi iSCSI protocol,
- ndmp NDMP protocol,
- http HTTP protocol,
- nvme NVMe protocol
aggr_list:
description:
- List of aggregates assigned for volume operations.
- These aggregates could be shared for use with other Vservers.
- When specified as part of a vserver-create,
this field represents the list of aggregates
that are assigned to the Vserver for volume operations.
- When part of vserver-get-iter call,
this will return the list of Vservers
which have any of the aggregates specified as part of the aggr-list.
'''
EXAMPLES = """
- name: Create SVM
na_ontap_svm:
state: present
name: ansibleVServer
root_volume: vol1
root_volume_aggregate: aggr1
root_volume_security_style: mixed
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
"""
RETURN = """
"""
import traceback
import ansible.module_utils.netapp as netapp_utils
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
if __name__ == '__main__':
main()
| 37.645533 | 86 | 0.580724 |
497d558f6807d6cee34934135fc08d3e5e24fbf5 | 487 | py | Python | server/apps/api/notice/migrations/0003_alter_event_priority.py | NikitaGrishchenko/csp-tender-hack-server | 56055f51bf472f0f1e56b419a48d993cc91e0f3a | [
"MIT"
] | null | null | null | server/apps/api/notice/migrations/0003_alter_event_priority.py | NikitaGrishchenko/csp-tender-hack-server | 56055f51bf472f0f1e56b419a48d993cc91e0f3a | [
"MIT"
] | null | null | null | server/apps/api/notice/migrations/0003_alter_event_priority.py | NikitaGrishchenko/csp-tender-hack-server | 56055f51bf472f0f1e56b419a48d993cc91e0f3a | [
"MIT"
] | null | null | null | # Generated by Django 3.2.9 on 2021-11-27 12:21
from django.db import migrations, models
| 25.631579 | 151 | 0.616016 |
497e1c5d29374050c770b786c91bc5c1ccabcd85 | 650 | py | Python | gdpr_assist/app_settings.py | mserrano07/django-gdpr-assist | 3c23d0aadadc676c128ef57aebc36570f3936ff1 | [
"BSD-3-Clause"
] | null | null | null | gdpr_assist/app_settings.py | mserrano07/django-gdpr-assist | 3c23d0aadadc676c128ef57aebc36570f3936ff1 | [
"BSD-3-Clause"
] | null | null | null | gdpr_assist/app_settings.py | mserrano07/django-gdpr-assist | 3c23d0aadadc676c128ef57aebc36570f3936ff1 | [
"BSD-3-Clause"
] | null | null | null | """
Settings
"""
from yaa_settings import AppSettings
| 28.26087 | 79 | 0.755385 |
497f0f54faebc451ce2dc9315e86227db65fd970 | 2,382 | py | Python | config-tests/test_server_details.py | mozilla-services/kinto-integration-tests | ec5199f5e9c7452c78d8f6fb41dcaa02504f34f7 | [
"Apache-2.0"
] | 2 | 2017-09-01T19:41:43.000Z | 2018-11-08T14:42:00.000Z | config-tests/test_server_details.py | Kinto/kinto-integration-tests | ec5199f5e9c7452c78d8f6fb41dcaa02504f34f7 | [
"Apache-2.0"
] | 89 | 2017-01-25T21:44:26.000Z | 2021-01-01T08:39:07.000Z | config-tests/test_server_details.py | mozilla-services/kinto-integration-tests | ec5199f5e9c7452c78d8f6fb41dcaa02504f34f7 | [
"Apache-2.0"
] | 6 | 2017-03-14T13:40:38.000Z | 2020-04-03T15:32:57.000Z | import pytest
import requests
def aslist_cronly(value):
""" Split the input on lines if it's a valid string type"""
if isinstance(value, str):
value = filter(None, [x.strip() for x in value.splitlines()])
return list(value)
def aslist(value, flatten=True):
""" Return a list of strings, separating the input based on newlines
and, if flatten=True (the default), also split on spaces within
each line."""
values = aslist_cronly(value)
if not flatten:
return values
result = []
for value in values:
subvalues = value.split()
result.extend(subvalues)
return result
| 25.891304 | 72 | 0.665407 |
49803c62b083c02f67f3cea8900cbba0f19179c1 | 635 | py | Python | tests/db/test_factory.py | albertteoh/data_pipeline | a99f1c7412375b3e9f4115108fcdde517b2e71a6 | [
"Apache-2.0"
] | null | null | null | tests/db/test_factory.py | albertteoh/data_pipeline | a99f1c7412375b3e9f4115108fcdde517b2e71a6 | [
"Apache-2.0"
] | null | null | null | tests/db/test_factory.py | albertteoh/data_pipeline | a99f1c7412375b3e9f4115108fcdde517b2e71a6 | [
"Apache-2.0"
] | null | null | null | import pytest
import data_pipeline.db.factory as dbfactory
import data_pipeline.constants.const as const
from data_pipeline.db.exceptions import UnsupportedDbTypeError
| 30.238095 | 62 | 0.738583 |
49806a87d676d3fa46db3e3b6f5f01048f4d408e | 5,142 | py | Python | etl/data_extraction/scrapers/sozialeinsatz.py | Betadinho/einander-helfen | 272f11397d80ab5267f39a7b36734495f1c00b0c | [
"MIT"
] | 7 | 2020-04-23T20:16:11.000Z | 2022-01-04T14:57:16.000Z | etl/data_extraction/scrapers/sozialeinsatz.py | Betadinho/einander-helfen | 272f11397d80ab5267f39a7b36734495f1c00b0c | [
"MIT"
] | 361 | 2020-04-23T17:20:14.000Z | 2022-03-02T11:29:45.000Z | etl/data_extraction/scrapers/sozialeinsatz.py | Betadinho/einander-helfen | 272f11397d80ab5267f39a7b36734495f1c00b0c | [
"MIT"
] | 1 | 2021-11-29T06:02:52.000Z | 2021-11-29T06:02:52.000Z | import math
import re
from data_extraction.scraper import Scraper
| 36.992806 | 110 | 0.556593 |
4980cf418b1fec3383b451b2c9e98a8148676569 | 1,671 | py | Python | fitbenchmarking/parsing/base_parser.py | arm61/fitbenchmarking | c745c684e3ca4895a666eb863426746d8f06636c | [
"BSD-3-Clause"
] | null | null | null | fitbenchmarking/parsing/base_parser.py | arm61/fitbenchmarking | c745c684e3ca4895a666eb863426746d8f06636c | [
"BSD-3-Clause"
] | null | null | null | fitbenchmarking/parsing/base_parser.py | arm61/fitbenchmarking | c745c684e3ca4895a666eb863426746d8f06636c | [
"BSD-3-Clause"
] | null | null | null | """
Implements the base Parser as a Context Manager.
"""
from abc import ABCMeta, abstractmethod
| 26.109375 | 78 | 0.581089 |
498189a8b987526464b2fd92c5dba221e497e78b | 10,223 | py | Python | src/offline/news/item-feature-update-batch/src/item-feature-update-batch.py | shenshaoyong/recommender-system-dev-workshop-code | ce422627181472ad513f473b65bf42410c46304a | [
"Apache-2.0"
] | 1 | 2021-07-14T09:15:40.000Z | 2021-07-14T09:15:40.000Z | src/offline/news/item-feature-update-batch/src/item-feature-update-batch.py | shenshaoyong/recommender-system-dev-workshop-code | ce422627181472ad513f473b65bf42410c46304a | [
"Apache-2.0"
] | null | null | null | src/offline/news/item-feature-update-batch/src/item-feature-update-batch.py | shenshaoyong/recommender-system-dev-workshop-code | ce422627181472ad513f473b65bf42410c46304a | [
"Apache-2.0"
] | null | null | null | from __future__ import print_function
# from tqdm import tqdm
import argparse
import glob
import os
import pickle
import boto3
import numpy as np
import pandas as pd
import encoding
import kg
# tqdm.pandas()
# pandarallel.initialize(progress_bar=True)
# bucket = os.environ.get("BUCKET_NAME", " ")
# raw_data_folder = os.environ.get("RAW_DATA", " ")
# logger = logging.getLogger()
# logger.setLevel(logging.INFO)
# tqdm_notebook().pandas()
########################################
# s3
########################################
parser = argparse.ArgumentParser()
parser.add_argument('--bucket', type=str)
parser.add_argument('--prefix', type=str)
parser.add_argument("--region", type=str, help="aws region")
args, _ = parser.parse_known_args()
print("args:", args)
region = None
if args.region:
region = args.region
print("region:", args.region)
boto3.setup_default_session(region_name=args.region)
bucket = args.bucket
prefix = args.prefix
print("bucket={}".format(bucket))
print("prefix='{}'".format(prefix))
s3client = boto3.client('s3')
out_s3_path = "s3://{}/{}/feature/content/inverted-list".format(bucket, prefix)
local_folder = 'info'
if not os.path.exists(local_folder):
os.makedirs(local_folder)
file_name_list = ['complete_dkn_word_embedding.npy']
s3_folder = '{}/model/rank/content/dkn_embedding_latest/'.format(prefix)
sync_s3(file_name_list, s3_folder, local_folder)
file_name_list = ['item.csv']
s3_folder = '{}/system/item-data'.format(prefix)
sync_s3(file_name_list, s3_folder, local_folder)
file_name_list = ['entities_dbpedia.dict', 'relations_dbpedia.dict',
'kg_dbpedia.txt', 'entities_dbpedia_train.dict',
'relations_dbpedia_train.dict', 'kg_dbpedia_train.txt',
]
s3_folder = '{}/model/meta_files/'.format(prefix)
sync_s3(file_name_list, s3_folder, local_folder)
df_filter_item = pd.read_csv('info/item.csv', sep='_!_',
names=['news_id', 'type_code', 'type', 'title', 'keywords', 'popularity', 'new'])
complete_dkn_word_embed = np.load("info/complete_dkn_word_embedding.npy")
# prepare model for batch process
meta_file_prefix = "{}/model/meta_files".format(prefix)
os.environ['GRAPH_BUCKET'] = bucket
os.environ['KG_DBPEDIA_KEY'] = '{}/kg_dbpedia.txt'.format(meta_file_prefix)
os.environ['KG_ENTITY_KEY'] = '{}/entities_dbpedia.dict'.format(
meta_file_prefix)
os.environ['KG_RELATION_KEY'] = '{}/relations_dbpedia.dict'.format(
meta_file_prefix)
os.environ['KG_DBPEDIA_TRAIN_KEY'] = '{}/kg_dbpedia_train.txt'.format(
meta_file_prefix)
os.environ['KG_ENTITY_TRAIN_KEY'] = '{}/entities_dbpedia_train.dict'.format(
meta_file_prefix)
os.environ['KG_RELATION_TRAIN_KEY'] = '{}/relations_dbpedia_train.dict'.format(
meta_file_prefix)
os.environ['KG_ENTITY_INDUSTRY_KEY'] = '{}/entity_industry.txt'.format(
meta_file_prefix)
os.environ['KG_VOCAB_KEY'] = '{}/vocab.json'.format(meta_file_prefix)
os.environ['DATA_INPUT_KEY'] = ''
os.environ['TRAIN_OUTPUT_KEY'] = '{}/model/rank/content/dkn_embedding_latest/'.format(
prefix)
kg_path = os.environ['GRAPH_BUCKET']
dbpedia_key = os.environ['KG_DBPEDIA_KEY']
entity_key = os.environ['KG_ENTITY_KEY']
relation_key = os.environ['KG_RELATION_KEY']
dbpedia_train_key = os.environ['KG_DBPEDIA_TRAIN_KEY']
entity_train_key = os.environ['KG_ENTITY_TRAIN_KEY']
relation_train_key = os.environ['KG_RELATION_TRAIN_KEY']
entity_industry_key = os.environ['KG_ENTITY_INDUSTRY_KEY']
vocab_key = os.environ['KG_VOCAB_KEY']
data_input_key = os.environ['DATA_INPUT_KEY']
train_output_key = os.environ['TRAIN_OUTPUT_KEY']
env = {
'GRAPH_BUCKET': kg_path,
'KG_DBPEDIA_KEY': dbpedia_key,
'KG_ENTITY_KEY': entity_key,
'KG_RELATION_KEY': relation_key,
'KG_DBPEDIA_TRAIN_KEY': dbpedia_train_key,
'KG_ENTITY_TRAIN_KEY': entity_train_key,
'KG_RELATION_TRAIN_KEY': relation_train_key,
'KG_ENTITY_INDUSTRY_KEY': entity_industry_key,
'KG_VOCAB_KEY': vocab_key,
'DATA_INPUT_KEY': data_input_key,
'TRAIN_OUTPUT_KEY': train_output_key
}
print("Kg env: {}".format(env))
graph = kg.Kg(env, region=region) # Where we keep the model when it's loaded
model = encoding.encoding(graph, env, region=region)
news_id_news_feature_dict = {}
map_words = {}
map_entities = {}
for row in df_filter_item.iterrows():
item_row = row[1]
program_id = str(item_row['news_id'])
title_result = model[item_row['title']]
current_words = title_result[0]
current_entities = title_result[1]
filter_words = []
filter_entities = []
analyze_map(current_words, map_words, filter_words)
analyze_map(current_entities, map_entities, filter_entities)
# filter entities & filter words
program_dict = {
'entities': filter_entities,
'words': filter_words
}
news_id_news_feature_dict[program_id] = program_dict
# clean data for graph train
# path = '/home/ec2-user/workplace/recommender-system-solution/src/offline/news/item-feature-update-batch/aws-gcr-rs-sol-demo-ap-southeast-1-522244679887/sample-data/model/meta_files'
path = "info"
entities_dbpedia = os.path.join(path, 'entities_dbpedia.dict')
relations_dbpedia = os.path.join(path, 'relations_dbpedia.dict')
kg_dbpedia = os.path.join(path, 'kg_dbpedia.txt')
entities_dbpedia_train_path = os.path.join(path, 'entities_dbpedia_train.dict')
relations_dbpedia_train_path = os.path.join(
path, 'relations_dbpedia_train.dict')
kg_dbpedia_train_path = os.path.join(path, 'kg_dbpedia_train.txt')
entities_dbpedia_f = pd.read_csv(
entities_dbpedia, header=None, names=['e', 'e_name'])
relations_dbpedia_f = pd.read_csv(
relations_dbpedia, header=None, names=['e', 'e_name'])
kg_dbpedia_f = pd.read_csv(kg_dbpedia, delimiter='\t',
header=None, names=['h', 'r', 't'])
# map_entities -> train_entites
# constrcut from entites:
entities_dbpedia_slim = {}
relations_dbpedia_slim = {}
entities_dbpedia_train = {}
relations_dbpedia_train = {}
entities_dbpedia_train[0] = '0'
relations_dbpedia_train[0] = '0'
new_list_kg = []
for raw_entity, new_idx in map_entities.items():
entity_id = raw_entity
map_head_id = analyze_map_hrt(
entity_id, entities_dbpedia_slim, entities_dbpedia_f, entities_dbpedia_train)
kg_found_pd = kg_dbpedia_f[kg_dbpedia_f.h == entity_id]
# print(kg_found_pd)
for found_row in kg_found_pd.iterrows():
relation_id = found_row[1]['r']
tail_id = found_row[1]['t']
map_relation_id = analyze_map_hrt(relation_id, relations_dbpedia_slim, relations_dbpedia_f,
relations_dbpedia_train)
map_tail_id = analyze_map_hrt(
tail_id, entities_dbpedia_slim, entities_dbpedia_f, entities_dbpedia_train)
# create new kg : h-r-t
kg_row = {}
kg_row['h'] = map_head_id
kg_row['r'] = map_relation_id
kg_row['t'] = map_tail_id
new_list_kg.append(kg_row)
kg_dbpedia_slim = pd.DataFrame(new_list_kg)
kg_dbpedia_slim.to_csv(kg_dbpedia_train_path, sep='\t',
header=False, index=False)
with open(entities_dbpedia_train_path, 'w') as f:
for key in entities_dbpedia_train.keys():
f.write("%s,%s\n" % (key, entities_dbpedia_train[key]))
with open(relations_dbpedia_train_path, 'w') as f:
for key in relations_dbpedia_train.keys():
f.write("%s,%s\n" % (key, relations_dbpedia_train[key]))
# slim version
list_word_embedding = []
list_word_embedding.append([0] * 300)
for raw_key, map_v in map_words.items():
list_word_embedding.append(complete_dkn_word_embed[raw_key])
file_name = 'info/dkn_word_embedding.npy'
with open(file_name, "wb") as f:
np.save(f, np.array(list_word_embedding))
write_to_s3(file_name,
bucket,
'{}/model/rank/content/dkn_embedding_latest/dkn_word_embedding.npy'.format(prefix))
write_to_s3(kg_dbpedia_train_path,
bucket,
'{}/kg_dbpedia_train.txt'.format(meta_file_prefix))
write_to_s3(entities_dbpedia_train_path,
bucket,
'{}/entities_dbpedia_train.dict'.format(meta_file_prefix))
write_to_s3(relations_dbpedia_train_path,
bucket,
'{}/relations_dbpedia_train.dict'.format(meta_file_prefix))
file_name = 'info/news_id_news_feature_dict.pickle'
out_file = open(file_name, 'wb')
pickle.dump(news_id_news_feature_dict, out_file)
out_file.close()
# s3_url = S3Uploader.upload(file_name, out_s3_path)
s3_url = write_to_s3(file_name, bucket,
'{}/feature/content/inverted-list/news_id_news_feature_dict.pickle'.format(prefix))
| 35.010274 | 183 | 0.702142 |
498246054897849d72b07dc078d8b150091d7c85 | 5,054 | py | Python | wirepas_backend_client/tools/utils.py | bencorrado/backend-client | 628c9999f8d98b0c1e56d87bfd2dbf1ca1ea138c | [
"Apache-2.0"
] | null | null | null | wirepas_backend_client/tools/utils.py | bencorrado/backend-client | 628c9999f8d98b0c1e56d87bfd2dbf1ca1ea138c | [
"Apache-2.0"
] | null | null | null | wirepas_backend_client/tools/utils.py | bencorrado/backend-client | 628c9999f8d98b0c1e56d87bfd2dbf1ca1ea138c | [
"Apache-2.0"
] | 1 | 2021-03-12T17:20:56.000Z | 2021-03-12T17:20:56.000Z | """
Utils
=======
Contains multipurpose utilities for serializing objects and obtaining
arguments from the command line.
.. Copyright:
Copyright 2019 Wirepas Ltd under Apache License, Version 2.0.
See file LICENSE for full license details.
"""
import binascii
import datetime
import json
import threading
from google.protobuf import json_format
def deferred_thread(fn):
"""
Decorator to handle a request on its own Thread
to avoid blocking the calling Thread on I/O.
It creates a new Thread but it shouldn't impact the performances
as requests are not supposed to be really frequent (few per seconds)
"""
return wrapper
def flatten(input_dict, separator="/", prefix=""):
"""
Flattens a dictionary with nested dictionaries and lists
into a single dictionary.
The key compression is done using the chosen separator.
"""
output_dict = {}
step(input_dict)
return output_dict
def chunker(seq, size) -> list():
"""
Splits a sequence in multiple parts
Args:
seq ([]) : an array
size (int) : length of each array part
Returns:
array ([]) : a chunk of SEQ with given SIZE
"""
return (seq[pos : pos + size] for pos in range(0, len(seq), size))
| 25.917949 | 75 | 0.565295 |
4984d7b37bc39c03cdb2148c437346639993c3a9 | 25,733 | py | Python | pysph/base/tree/point_tree.py | nauaneed/pysph-nav | 66589021f453f25b77549f6f102b6afcc89e338d | [
"BSD-3-Clause"
] | 1 | 2022-03-15T11:48:17.000Z | 2022-03-15T11:48:17.000Z | pysph/base/tree/point_tree.py | nauaneed/pysph-nav | 66589021f453f25b77549f6f102b6afcc89e338d | [
"BSD-3-Clause"
] | null | null | null | pysph/base/tree/point_tree.py | nauaneed/pysph-nav | 66589021f453f25b77549f6f102b6afcc89e338d | [
"BSD-3-Clause"
] | null | null | null | from pysph.base.tree.tree import Tree
from pysph.base.tree.helpers import ParticleArrayWrapper, get_helper, \
make_vec_dict, ctype_to_dtype, get_vector_dtype
from compyle.opencl import profile_kernel, DeviceWGSException, get_queue, \
named_profile, get_context
from compyle.array import Array
from pytools import memoize
import sys
import numpy as np
import pyopencl as cl
from pyopencl.scan import GenericScanKernel
import pyopencl.tools
from mako.template import Template
# Support for 1D
def register_custom_pyopencl_ctypes():
cl.tools.get_or_register_dtype('float1', np.dtype([('s0', np.float32)]))
cl.tools.get_or_register_dtype('double1', np.dtype([('s0', np.float64)]))
register_custom_pyopencl_ctypes()
| 38.350224 | 80 | 0.522636 |
49850af7a6ca8eea66c58c865c235297d9610189 | 2,815 | py | Python | senti_analysis/data.py | hotbaby/sentiment-analysis | efb880870d905c4c02528d7d242ba06b90f0e259 | [
"MIT"
] | null | null | null | senti_analysis/data.py | hotbaby/sentiment-analysis | efb880870d905c4c02528d7d242ba06b90f0e259 | [
"MIT"
] | 2 | 2020-09-25T21:17:58.000Z | 2022-02-10T00:28:19.000Z | senti_analysis/data.py | hotbaby/sentiment-analysis | efb880870d905c4c02528d7d242ba06b90f0e259 | [
"MIT"
] | null | null | null | # encoding: utf8
import numpy as np
import pandas as pd
from collections import OrderedDict
from senti_analysis import config
from senti_analysis import constants
from senti_analysis.preprocess import (load_tokenizer, load_sentences,
encode_sentence, label_transform)
def load_data_set():
"""
Load data set.
:return: train_data_set, validation_data_set, test_data_set
"""
train_data_set = pd.read_csv(config.TRAIN_SET_PATH)
validation_data_set = pd.read_csv(config.VALIDATION_SET_PATH)
test_data_set = pd.read_csv(config.TEST_SET_PATH)
return train_data_set, validation_data_set, test_data_set
def y_data():
"""
generate y label data.
:return: train_label_data dict, validation_label_data dict
"""
train_set = pd.read_csv(config.TRAIN_SET_PATH)
val_set = pd.read_csv(config.VALIDATION_SET_PATH)
y_train, y_val = transform_y_data(train_set, val_set, constants.COLS)
return y_train, y_val
| 30.597826 | 99 | 0.713677 |
4985efb3cec903d0cb0d0b5c74721d37a531530f | 93 | py | Python | pyball/models/config/stats_group.py | SebastianDang/PyBall | d1965aa01477b5ee0db9c0463ec584a7e3997395 | [
"MIT"
] | 74 | 2018-03-04T22:58:46.000Z | 2021-07-06T12:28:50.000Z | pyball/models/config/stats_group.py | SebastianDang/PyBall | d1965aa01477b5ee0db9c0463ec584a7e3997395 | [
"MIT"
] | 18 | 2018-03-10T19:17:54.000Z | 2020-01-04T15:42:47.000Z | pyball/models/config/stats_group.py | SebastianDang/PyBall | d1965aa01477b5ee0db9c0463ec584a7e3997395 | [
"MIT"
] | 13 | 2018-03-06T02:39:38.000Z | 2020-01-17T04:38:53.000Z | from dataclasses import dataclass
| 13.285714 | 33 | 0.774194 |
498724366b10f885fa79f500eaf773989a21c6f1 | 358 | py | Python | tests/test_skeleton_says.py | thomascobb/skeleton-says | e2ea189e075a0847a6679dc066bad47ced5d397a | [
"Apache-2.0"
] | null | null | null | tests/test_skeleton_says.py | thomascobb/skeleton-says | e2ea189e075a0847a6679dc066bad47ced5d397a | [
"Apache-2.0"
] | null | null | null | tests/test_skeleton_says.py | thomascobb/skeleton-says | e2ea189e075a0847a6679dc066bad47ced5d397a | [
"Apache-2.0"
] | null | null | null | from skeleton_says import say
skeleton_saying_hello = r"""
-------
( Hello )
-------
\
\ .-.
\(o.o)
|=|
__|__
//.=|=.\\
// .=|=. \\
\\ .=|=. //
\\(_=_)//
(:| |:)
|| ||
() ()
|| ||
|| ||
l42 ==' '==
"""
| 13.259259 | 52 | 0.379888 |
49880bf16640eed07e42f1ea42b7368e4b515269 | 1,710 | py | Python | open_connect/accounts/tests/test_tasks.py | lpatmo/actionify_the_news | 998d8ca6b35d0ef1b16efca70f50e59503f5a62d | [
"MIT"
] | 66 | 2015-11-30T20:35:38.000Z | 2019-06-12T17:40:32.000Z | open_connect/accounts/tests/test_tasks.py | lpatmo/actionify_the_news | 998d8ca6b35d0ef1b16efca70f50e59503f5a62d | [
"MIT"
] | 18 | 2015-11-30T22:03:05.000Z | 2019-07-02T00:50:29.000Z | open_connect/accounts/tests/test_tasks.py | lpatmo/actionify_the_news | 998d8ca6b35d0ef1b16efca70f50e59503f5a62d | [
"MIT"
] | 11 | 2015-11-30T20:56:01.000Z | 2019-07-01T17:06:09.000Z | """Tests for accounts tasks."""
from datetime import datetime
from unittest import TestCase
from django.conf import settings
from django.utils.timezone import now
from mock import patch
from model_mommy import mommy
from open_connect.accounts.models import Invite
from open_connect.accounts.tasks import (
render_and_send_invite_email
)
from open_connect.mailer.utils import unsubscribe_url
| 38 | 78 | 0.724561 |
49882b0d53f39e7e8ebf679902e5c955c3e1b55f | 944 | py | Python | tests/inputs/config.py | hsh-nids/python-betterproto | f5d3b48b1aa49fd64513907ed70124b32758ad3e | [
"MIT"
] | 708 | 2019-10-11T06:23:40.000Z | 2022-03-31T09:39:08.000Z | tests/inputs/config.py | hsh-nids/python-betterproto | f5d3b48b1aa49fd64513907ed70124b32758ad3e | [
"MIT"
] | 302 | 2019-11-11T22:09:21.000Z | 2022-03-29T11:21:04.000Z | tests/inputs/config.py | hsh-nids/python-betterproto | f5d3b48b1aa49fd64513907ed70124b32758ad3e | [
"MIT"
] | 122 | 2019-12-04T16:22:53.000Z | 2022-03-20T09:31:10.000Z | # Test cases that are expected to fail, e.g. unimplemented features or bug-fixes.
# Remove from list when fixed.
xfail = {
"namespace_keywords", # 70
"googletypes_struct", # 9
"googletypes_value", # 9
"import_capitalized_package",
"example", # This is the example in the readme. Not a test.
}
services = {
"googletypes_response",
"googletypes_response_embedded",
"service",
"service_separate_packages",
"import_service_input_message",
"googletypes_service_returns_empty",
"googletypes_service_returns_googletype",
"example_service",
"empty_service",
}
# Indicate json sample messages to skip when testing that json (de)serialization
# is symmetrical becuase some cases legitimately are not symmetrical.
# Each key references the name of the test scenario and the values in the tuple
# Are the names of the json files.
non_symmetrical_json = {"empty_repeated": ("empty_repeated",)}
| 32.551724 | 81 | 0.733051 |
4989cd340b09d2674ba44f9caf4ca76681a1034f | 1,476 | py | Python | examples/wagsley/wagsley/urls.py | Blogsley/blogsley | 0ca17397af5d53c2fac3affb5eacec2f8d941d37 | [
"MIT"
] | null | null | null | examples/wagsley/wagsley/urls.py | Blogsley/blogsley | 0ca17397af5d53c2fac3affb5eacec2f8d941d37 | [
"MIT"
] | null | null | null | examples/wagsley/wagsley/urls.py | Blogsley/blogsley | 0ca17397af5d53c2fac3affb5eacec2f8d941d37 | [
"MIT"
] | null | null | null | from django.conf import settings
from django.urls import include, path, re_path
from django.contrib import admin
from ariadne_django.views import GraphQLView
from wagtail.admin import urls as wagtailadmin_urls
from wagtail.core import urls as wagtail_urls
from wagtail.documents import urls as wagtaildocs_urls
from puput import urls as puput_urls
from search import views as search_views
from wagsley.schema import schema
print(schema)
urlpatterns = [
path('django-admin/', admin.site.urls),
path('admin/', include(wagtailadmin_urls)),
path('documents/', include(wagtaildocs_urls)),
#path('search/', search_views.search, name='search'),
]
if settings.DEBUG:
from django.conf.urls.static import static
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
# Serve static and media files from development server
urlpatterns += staticfiles_urlpatterns()
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
urlpatterns = urlpatterns + [
path('graphql/', GraphQLView.as_view(schema=schema), name='graphql'),
path('accounts/', include('accounts.urls')),
path('accounts/', include('django.contrib.auth.urls')),
path('accounts/', include('allauth.urls')),
path('events/', include('events.urls')),
re_path(r'^comments/', include('django_comments_xtd.urls')),
path("", include(puput_urls)),
path("", include(wagtail_urls)),
path('', include('home.urls')),
] | 28.941176 | 80 | 0.735095 |
4989d46fdda2f05efd221caf77a2291b849c31f5 | 1,311 | py | Python | tests/unit/core/test_certify_timestamp.py | sys-git/certifiable | a3c33c0d4f3ac2c53be9eded3fae633fa5f697f8 | [
"MIT"
] | null | null | null | tests/unit/core/test_certify_timestamp.py | sys-git/certifiable | a3c33c0d4f3ac2c53be9eded3fae633fa5f697f8 | [
"MIT"
] | 311 | 2017-09-14T22:34:21.000Z | 2022-03-27T18:30:17.000Z | tests/unit/core/test_certify_timestamp.py | sys-git/certifiable | a3c33c0d4f3ac2c53be9eded3fae633fa5f697f8 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `certifiable.core.certify_timestamp` method."""
import datetime
import unittest
from decimal import Decimal
from certifiable import CertifierTypeError
from certifiable.core import certify_timestamp
if __name__ == '__main__':
unittest.main()
| 22.220339 | 64 | 0.514111 |
498b4c183ee96795b8b620014ec7c0080e178c36 | 1,477 | py | Python | rtc_handle_example/replace/com_replace_impl.py | takashi-suehiro/rtmtools | 56ee92d3b3f2ea73d7fa78dfabe6a098e06f6215 | [
"MIT"
] | null | null | null | rtc_handle_example/replace/com_replace_impl.py | takashi-suehiro/rtmtools | 56ee92d3b3f2ea73d7fa78dfabe6a098e06f6215 | [
"MIT"
] | null | null | null | rtc_handle_example/replace/com_replace_impl.py | takashi-suehiro/rtmtools | 56ee92d3b3f2ea73d7fa78dfabe6a098e06f6215 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# -*- Python -*-
"""
\file com_replace_idl_examplefile.py
\brief Python example implementations generated from com_replace.idl
\date $Date$
"""
import omniORB
from omniORB import CORBA, PortableServer
import _GlobalIDL, _GlobalIDL__POA
if __name__ == "__main__":
import sys
# Initialise the ORB
orb = CORBA.ORB_init(sys.argv)
# As an example, we activate an object in the Root POA
poa = orb.resolve_initial_references("RootPOA")
# Create an instance of a servant class
servant = ComReplace_i()
# Activate it in the Root POA
poa.activate_object(servant)
# Get the object reference to the object
objref = servant._this()
# Print a stringified IOR for it
print( orb.object_to_string(objref))
# Activate the Root POA's manager
poa._get_the_POAManager().activate()
# Run the ORB, blocking this thread
orb.run()
| 22.723077 | 69 | 0.666892 |
498d4cc3d6311bb103e45b049930a347b5d6e562 | 588 | py | Python | pyknp_eventgraph/utils.py | ku-nlp/pyknp-eventgraph | 927128ac41098bc45637b02a3c2420d345a41347 | [
"BSD-3-Clause"
] | 7 | 2019-11-23T10:57:35.000Z | 2021-01-03T22:40:13.000Z | pyknp_eventgraph/utils.py | ku-nlp/pyknp-eventgraph | 927128ac41098bc45637b02a3c2420d345a41347 | [
"BSD-3-Clause"
] | 1 | 2021-11-05T02:19:17.000Z | 2021-11-05T02:19:17.000Z | pyknp_eventgraph/utils.py | ku-nlp/pyknp-eventgraph | 927128ac41098bc45637b02a3c2420d345a41347 | [
"BSD-3-Clause"
] | null | null | null | from io import open
from typing import List
from pyknp import KNP, BList
def read_knp_result_file(filename: str) -> List[BList]:
"""Read a KNP result file.
Args:
filename: A filename.
Returns:
A list of :class:`pyknp.knp.blist.BList` objects.
"""
knp = KNP()
blists = []
with open(filename, "rt", encoding="utf-8", errors="replace") as f:
chunk = ""
for line in f:
chunk += line
if line.strip() == "EOS":
blists.append(knp.result(chunk))
chunk = ""
return blists
| 22.615385 | 71 | 0.55102 |
498d7bdbff51b3b458f67d9c20042b421a42d945 | 2,272 | py | Python | freshlybuiltimagebol/OCR_Printed_Text.py | komal3120/freshlybuiltimagebol | fc46f687e326d53ec485e74a943e45b786dad36d | [
"MIT"
] | 3 | 2020-08-01T10:27:58.000Z | 2020-08-09T20:56:49.000Z | freshlybuiltimagebol/OCR_Printed_Text.py | komal3120/freshlybuiltimagebol | fc46f687e326d53ec485e74a943e45b786dad36d | [
"MIT"
] | null | null | null | freshlybuiltimagebol/OCR_Printed_Text.py | komal3120/freshlybuiltimagebol | fc46f687e326d53ec485e74a943e45b786dad36d | [
"MIT"
] | 1 | 2020-06-28T18:02:52.000Z | 2020-06-28T18:02:52.000Z | from cv2 import fastNlMeansDenoisingColored
from cv2 import cvtColor
from cv2 import bitwise_not,threshold,getRotationMatrix2D
from cv2 import warpAffine,filter2D,imread
from cv2 import THRESH_BINARY,COLOR_BGR2GRAY,THRESH_OTSU
from cv2 import INTER_CUBIC,BORDER_REPLICATE,minAreaRect
from numpy import column_stack,array,where
from matplotlib.pyplot import imshow,xticks,yticks
from pytesseract import image_to_string,pytesseract
from PIL import Image
| 38.508475 | 92 | 0.676496 |
498dafdb0fb28c8d01da1b1b893e4aaeb5ff08f2 | 5,944 | py | Python | program/tests/integration_tests_output/graph/graph.py | alienbri/audaces-perps | aa5b0e14eae4944dd0a18af60a72b119ff17fd84 | [
"MIT"
] | null | null | null | program/tests/integration_tests_output/graph/graph.py | alienbri/audaces-perps | aa5b0e14eae4944dd0a18af60a72b119ff17fd84 | [
"MIT"
] | null | null | null | program/tests/integration_tests_output/graph/graph.py | alienbri/audaces-perps | aa5b0e14eae4944dd0a18af60a72b119ff17fd84 | [
"MIT"
] | null | null | null | import yaml
import matplotlib.pyplot as plt
import math
from jsonmerge import merge
from datetime import datetime
import plotly as ply
import pandas as pd
import plotly.express as px
TRANSFORM = False
PLOT_MEMORY = True
NB_INSTRUCTIONS = 1000
f_value_props = {
# [Color, MinOffset, MaxOffset]
"total_collateral": ["", 0, 1],
"total_fee_balance": ["", 0, 1],
"rebalancing_funds": ["#99cc99", 0, 0.5], #
# "rebalanced_v_coin": ["", 0, 1],
"v_coin_amount": ["", 0, 1],
"v_pc_amount": ["", 0, 1],
"open_shorts_v_coin": ["", 0, 1],
"open_longs_v_coin": ["", 0, 1], #
"insurance_fund": ["#808080", 0.2, 1.2],
"market_price": ["#008080", 0.5, 1.5],
"oracle_price": ["#99cc99", 0.5, 1.5],
"equilibrium_price": ["#ff8000", 0.5, 1], #
# "signer_nonce",
# "market_symbol",
# "oracle_address",
# "admin_address",
# "vault_address",
# "quote_decimals",
# "coin_decimals",
# "total_user_balances",
# "last_funding_timestamp",
# "last_recording_timestamp",
# "funding_samples_offset",
# "funding_samples",
# "funding_history_offset",
# "funding_history",
# "funding_balancing_factors",
# "number_of_instances",
}
m_value_props = {
"gc_list_lengths",
"page_full_ratios",
"longs_depths",
"shorts_depths"
}
market_state_line_header = "INFO - MarketDataPoint"
date_time = datetime.now().strftime("%d-%m-%Y_%H-%M-%S")
infile = open("../log/output.log")
outfile = open(
"../log/formatted_output_{}.log".format(date_time), "a")
market_data_json = []
for line in infile:
if (market_state_line_header in line) or ("DEBUG - Program" in line) or ("DEBUG - tx error:" in line) or ("INFO - Tree:" in line) or ("INFO - Initial Conditions:" in line) or ("INFO - Seed for this run:" in line):
outfile.write(line)
if market_state_line_header in line:
market_state_datapoint_str = line[len(
market_state_line_header):].replace("Instance", "").replace("PageInfo", "") # Stripping header
line_json = yaml.load(market_state_datapoint_str)
market_data_json.append(line_json)
# Extract
market_data = {}
value_names = list(f_value_props.keys())
for key in market_data_json[0]:
if key in value_names:
market_data[key] = [data_point[key] for data_point in market_data_json]
# Normalize
if TRANSFORM:
max_per_value = [max(market_data[key]) for key in value_names]
min_per_value = [min(market_data[key]) for key in value_names]
max_per_value[value_names.index(
"market_price")] = max_per_value[value_names.index("oracle_price")]
min_per_value[value_names.index(
"market_price")] = min_per_value[value_names.index("oracle_price")]
scaled_market_data = [[((1 - f_value_props[value_names[i]][1]) * (data_value_point - min_per_value[i]) / abs((max_per_value[i] / f_value_props[value_names[i]][2]) - min_per_value[i])) + f_value_props[value_names[i]][1] for data_value_point in market_data[value_names[i]]]
for i in range(len(value_names))]
else:
max_per_value = [max(market_data[key]) for key in value_names]
total_max = max(max_per_value)
scaling_factors = [int(round(math.log10(total_max / value_max)))
if value_max != 0 else 1 for value_max in max_per_value]
scaled_market_data = [[(10 ** scaling_factors[i]) * data_value_point for data_value_point in market_data[value_names[i]]]
for i in range(len(value_names))]
# Plotting
if PLOT_MEMORY:
nb_lines = min(len(market_data_json), NB_INSTRUCTIONS)
df = pd.DataFrame(market_data_json)
print(df.columns)
print(df.shape)
df["shorts_depths"] = [k[0] for k in df["shorts_depths"]]
df["longs_depths"] = [k[0] for k in df["longs_depths"]]
df["gc_list_lengths"] = [k[0] for k in df["gc_list_lengths"]]
for k in range(len(df["page_full_ratios"][0][0])):
df[f"page_{k}_full_ratio"] = [l[0][k] for l in df["page_full_ratios"]]
df.drop("page_full_ratios", axis=1)
df = df.stack().reset_index()
print(df)
fig = px.line(df, x="level_0", y=0, color="level_1")
fig.show()
# print([len(m["page_full_ratios"]) for m in market_data_json])
page_full_ratios = [
market_data_json[i]["page_full_ratios"][0] for i in range(nb_lines)]
longs_depths = [
market_data_json[i]["longs_depths"] for i in range(nb_lines)
]
shorts_depths = [
market_data_json[i]["shorts_depths"] for i in range(nb_lines)
]
for k in range(len(market_data_json[0]["page_full_ratios"][0])):
plt.plot([page_full_ratios[i][k] for i in range(nb_lines)], label=(
"page_full_ratios for page " + str(k)))
plt.plot()
gc_list_lenghts = [
market_data_json[i]["gc_list_lengths"][0] for i in range(nb_lines)] # TODO Mult instances
# plt.plot([gc_list_lenghts[i] for i in range(nb_lines)], label=(
# "gc_list_length"))
plt.plot(longs_depths, label=("longs_depths"))
plt.plot(shorts_depths, label=("shorts_depths"))
elif TRANSFORM:
for (i, key) in enumerate(value_names):
if f_value_props[key][0] != "":
plt.plot(scaled_market_data[i][:NB_INSTRUCTIONS], label=(
key + " x1e"), color=f_value_props[key][0])
else:
plt.plot(scaled_market_data[i][:NB_INSTRUCTIONS], label=(
key + " x1e"))
else:
for (i, key) in enumerate(value_names):
if f_value_props[key][0] != "":
plt.plot(scaled_market_data[i], label=(
key + " x1e" + str(scaling_factors[i])), color=f_value_props[key][0])
else:
plt.plot(scaled_market_data[i], label=(
key + " x1e"))
plt.legend(prop={'size': 15})
plt.show() # block=False)
# plt.savefig("../log/graph_{}.png".format(date_time), dpi=440)
# gc_list_lengths: [0], page_full_ratios: [[], [0.0, 0.0, 0.0, 0.0, 0.0]]
| 37.383648 | 275 | 0.640646 |
498ebed60829fc81050f096acf226151f138af86 | 525 | py | Python | oTree/consent/__init__.py | jleutgeb/privilege | 2a4f15c98d94d9f1dbf1c4685c5e96d018d58abc | [
"MIT"
] | null | null | null | oTree/consent/__init__.py | jleutgeb/privilege | 2a4f15c98d94d9f1dbf1c4685c5e96d018d58abc | [
"MIT"
] | 11 | 2021-05-06T09:45:30.000Z | 2022-03-01T17:48:35.000Z | oTree/consent/__init__.py | jleutgeb/privilege | 2a4f15c98d94d9f1dbf1c4685c5e96d018d58abc | [
"MIT"
] | null | null | null | from otree.api import *
c = Currency
doc = """
Simple Consent App
Players may only continue after clicking the consent button.
"""
# PAGES
page_sequence = [Consent]
| 14.583333 | 61 | 0.693333 |
498efc2d71a44fd1bc6d2b0987f9eff5df4001b1 | 1,192 | py | Python | src/pytornado/_util.py | airinnova/pytornado | 6127f45af60ab05f15b441bc134089a7e7a59669 | [
"Linux-OpenIB"
] | 16 | 2019-08-13T18:49:14.000Z | 2022-01-11T15:41:12.000Z | src/pytornado/_util.py | airinnova/pytornado | 6127f45af60ab05f15b441bc134089a7e7a59669 | [
"Linux-OpenIB"
] | 24 | 2019-09-11T14:48:01.000Z | 2022-03-18T08:17:52.000Z | src/pytornado/_util.py | airinnova/pytornado | 6127f45af60ab05f15b441bc134089a7e7a59669 | [
"Linux-OpenIB"
] | 5 | 2019-09-20T18:45:45.000Z | 2020-12-08T01:44:43.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------
# Copyright 2019-2020 Airinnova AB and the FramAT authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------
"""
Utils
"""
from numbers import Number
| 34.057143 | 80 | 0.59396 |
498f0ce62fa86447888328db5c5d83ceabc8b302 | 587 | py | Python | test/application/test_auth.py | Ashaba/API-Monitor | 533eb6698fcb5decb48f746784af6894844b3c69 | [
"MIT"
] | null | null | null | test/application/test_auth.py | Ashaba/API-Monitor | 533eb6698fcb5decb48f746784af6894844b3c69 | [
"MIT"
] | 22 | 2018-02-06T19:53:11.000Z | 2021-04-30T20:35:01.000Z | test/application/test_auth.py | Ashaba/API-Monitor | 533eb6698fcb5decb48f746784af6894844b3c69 | [
"MIT"
] | null | null | null | from test.base import BaseTestCase, user_payload
import json
| 32.611111 | 102 | 0.76661 |
498fe8e984fc4170d05d05875ef23082a63dec00 | 5,918 | py | Python | JumpscaleCore/core/generator/JSGenerator.py | grimpy/jumpscaleX_core | c24d6d47fccc0801e578fedb376ef110f7a00bad | [
"Apache-2.0"
] | null | null | null | JumpscaleCore/core/generator/JSGenerator.py | grimpy/jumpscaleX_core | c24d6d47fccc0801e578fedb376ef110f7a00bad | [
"Apache-2.0"
] | null | null | null | JumpscaleCore/core/generator/JSGenerator.py | grimpy/jumpscaleX_core | c24d6d47fccc0801e578fedb376ef110f7a00bad | [
"Apache-2.0"
] | null | null | null | import os
import fnmatch
from pathlib import Path
from jinja2 import Template
from .Metadata import Metadata
| 34.811765 | 109 | 0.535992 |
49905454a4a778d8f4095622f9b3c6a78a737493 | 76,810 | py | Python | h1/api/recovery_project_plan_api.py | hyperonecom/h1-client-python | 4ce355852ba3120ec1b8f509ab5894a5c08da730 | [
"MIT"
] | null | null | null | h1/api/recovery_project_plan_api.py | hyperonecom/h1-client-python | 4ce355852ba3120ec1b8f509ab5894a5c08da730 | [
"MIT"
] | null | null | null | h1/api/recovery_project_plan_api.py | hyperonecom/h1-client-python | 4ce355852ba3120ec1b8f509ab5894a5c08da730 | [
"MIT"
] | null | null | null | """
HyperOne
HyperOne API # noqa: E501
The version of the OpenAPI document: 0.1.0
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from h1.api_client import ApiClient, Endpoint as _Endpoint
from h1.model_utils import ( # noqa: F401
check_allowed_values,
check_validations,
date,
datetime,
file_type,
none_type,
validate_and_convert_types
)
from h1.model.event import Event
from h1.model.inline_response400 import InlineResponse400
from h1.model.plan import Plan
from h1.model.recovery_project_plan_create import RecoveryProjectPlanCreate
from h1.model.recovery_project_plan_update import RecoveryProjectPlanUpdate
from h1.model.resource_service import ResourceService
from h1.model.tag import Tag
from h1.model.tag_array import TagArray
| 36.524013 | 137 | 0.442833 |
4992878d55b3a8da195170f6eea9d69be14347a9 | 2,059 | py | Python | days/day5.py | vanHavel/AdventOfCode2021 | a83ee21cffff56ba3f49de7af5113bf0b11fea7a | [
"MIT"
] | null | null | null | days/day5.py | vanHavel/AdventOfCode2021 | a83ee21cffff56ba3f49de7af5113bf0b11fea7a | [
"MIT"
] | null | null | null | days/day5.py | vanHavel/AdventOfCode2021 | a83ee21cffff56ba3f49de7af5113bf0b11fea7a | [
"MIT"
] | null | null | null | from collections import defaultdict
from typing import List, Tuple
from aocd import get_data, submit
DAY = 5
YEAR = 2021
if __name__ == '__main__':
input_data = get_data(day=DAY, year=YEAR)
ans1 = part1(input_data)
print(ans1)
#submit(answer=ans1, day=DAY, year=YEAR, part=1)
ans2 = part2(input_data)
print(ans2)
submit(answer=ans2, day=DAY, year=YEAR, part=2)
| 27.092105 | 61 | 0.4949 |
49936fb891f5aa506d6883922c089dfe1817eb4b | 1,108 | py | Python | 215.kthLargestElementInAnArray2.py | ColinTing/Algorithm | 02c8087503298f050deb0fbee6cb887b3aeb6592 | [
"MIT"
] | null | null | null | 215.kthLargestElementInAnArray2.py | ColinTing/Algorithm | 02c8087503298f050deb0fbee6cb887b3aeb6592 | [
"MIT"
] | null | null | null | 215.kthLargestElementInAnArray2.py | ColinTing/Algorithm | 02c8087503298f050deb0fbee6cb887b3aeb6592 | [
"MIT"
] | null | null | null | import random
list = [3,2,3,1,2,4,5,5,6]
k = 4
s = Solution()
print(s.findKthLargest(list,k)) | 25.767442 | 51 | 0.445848 |
4994b9856023b95cccc4144927c2909950d9bad5 | 383 | gyp | Python | binding.gyp | mceSystems/node-windows-pac-resolver | a1eaaa6b74d4e82218e6d975582aab121e12da6f | [
"MIT"
] | 1 | 2021-11-14T01:26:45.000Z | 2021-11-14T01:26:45.000Z | binding.gyp | mceSystems/node-windows-pac-resolver | a1eaaa6b74d4e82218e6d975582aab121e12da6f | [
"MIT"
] | 1 | 2021-08-31T21:38:42.000Z | 2021-08-31T21:38:42.000Z | binding.gyp | mceSystems/node-windows-pac-resolver | a1eaaa6b74d4e82218e6d975582aab121e12da6f | [
"MIT"
] | 1 | 2021-11-14T01:26:12.000Z | 2021-11-14T01:26:12.000Z | {
"targets": [
{
"target_name": "binding",
"sources": [
"native\\winhttpBindings.cpp"
],
"include_dirs": [
"<!@(node -p \"require('node-addon-api').include\")"
],
"libraries": [
"WinHTTP.lib",
"-DelayLoad:node.exe"
],
"msbuild_settings": {
"ClCompile": {
"RuntimeLibrary": "MultiThreaded"
}
}
}
]
}
| 16.652174 | 68 | 0.48564 |
4994cdca869fe06dd8910a681063b2822b7a3d86 | 2,122 | py | Python | diplom_test/data_reader.py | CrackedSTone/algorithm-detects-liver-pathology | d52d08e4e6931b3502f083f20d6332f7b6839a3b | [
"Apache-2.0"
] | 8 | 2019-04-09T07:11:26.000Z | 2020-02-27T16:51:26.000Z | diplom_test/data_reader.py | il-yanko/algorithm-detects-liver-pathology | d52d08e4e6931b3502f083f20d6332f7b6839a3b | [
"Apache-2.0"
] | null | null | null | diplom_test/data_reader.py | il-yanko/algorithm-detects-liver-pathology | d52d08e4e6931b3502f083f20d6332f7b6839a3b | [
"Apache-2.0"
] | 2 | 2019-04-04T07:13:02.000Z | 2020-02-06T04:58:34.000Z | import glob
import numpy as np
#import cv2
from PIL import Image
#import os.path
# ALTERNATIVE LOADER:
'''
# process RGB/grayscale
def rgb_to_gray(rgb):
# scalar product of colors with certain theoretical coefficients according to the YUV system
return np.dot(rgb[..., :3], [0.299, 0.587, 0.114]).round(3).astype(int)
# download folder BMP
def get_all_bmp(full_dir):
# to calculate number of files in the folder
file_number = len(next(os.walk(full_dir))[2])
# print(fileNumber, "files were found")
img_arr = list()
for i in range(1, file_number + 1):
img_arr.append(cv2.imread(full_dir + '/' + str(i) + ".bmp"))
print(len(img_arr), "images were downloaded")
return img_arr
def get_all_img_make_gray(cwd, folder_name):
path = cwd + "/" + folder_name
print("\nPath = ", path)
img_arr = get_all_bmp(path)
for i in range(len(img_arr)):
img_arr[i] = rgb_to_gray(img_arr[i])
return img_arr
'''
# test load .csv
'''
import os.path
cwd = os.getcwd()
a = cwd + "/glcm/auh/csv/"
data = DataReader.read_directory(a)
print(data[0])
''' | 29.068493 | 111 | 0.615928 |
49983ba3d7a780b5fb33eabb069b3531df6c3624 | 3,351 | py | Python | docs/conf.py | arashbm/dag-python | a62761d516daf3a129f6a75359e1b09047ede6f2 | [
"MIT"
] | null | null | null | docs/conf.py | arashbm/dag-python | a62761d516daf3a129f6a75359e1b09047ede6f2 | [
"MIT"
] | null | null | null | docs/conf.py | arashbm/dag-python | a62761d516daf3a129f6a75359e1b09047ede6f2 | [
"MIT"
] | null | null | null | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'Reticula'
copyright = '2022'
author = 'Arash Badie-Modiri'
# The full version, including alpha/beta/rc tags
release = '0.0.4'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinxcontrib.bibtex'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
primary_domain = None
nitpicky = True
rst_prolog = """
.. role:: py(code)
:language: python
:class: highlight
.. role:: cpp(code)
:language: cpp
:class: highlight
"""
# REs for Python signatures with types
import re
typed_py_re = re.compile(
r'''^ ([\w.]*\.)? # class name(s)
(\w+(?: \[[^\]]+\])?) \s* # thing name
(?: \(\s*(.*)\s*\) # optional: arguments
(?:\s* -> \s* (.*))? # return annotation
)? $ # and nothing more
''', re.VERBOSE)
import sphinx.domains.python
sphinx.domains.python.py_sig_re = typed_py_re
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'furo'
pygments_style = "sphinx"
pygments_dark_style = "monokai"
html_title = "Reticula"
import os.path
html_theme_options = {
"source_repository": "https://github.com/reticula-network/reticula-python",
"source_branch": "main",
"source_directory": "docs/",
"footer_icons": [
{
"name": "GitHub",
"url": "https://github.com/reticula-network",
"html": read_icon("github.svg"),
"class": "",
}, {
"name": "PyPi",
"url": "https://pypi.org/project/reticula/",
"html": read_icon("pypi.svg"),
"class": "",
},
],
}
bibtex_bibfiles = ['references.bib']
bibtex_default_style = 'unsrt'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
| 28.887931 | 79 | 0.617428 |
4998582ea46c71688c285dfd2591280666ab63f8 | 1,455 | py | Python | libs/cloner.py | Rookout/log-scanner | bd8b940660a9f40068151dfca514e85aa730bfc0 | [
"Apache-2.0"
] | null | null | null | libs/cloner.py | Rookout/log-scanner | bd8b940660a9f40068151dfca514e85aa730bfc0 | [
"Apache-2.0"
] | 3 | 2021-05-05T18:30:21.000Z | 2022-03-10T11:32:52.000Z | libs/cloner.py | Rookout/log-scanner | bd8b940660a9f40068151dfca514e85aa730bfc0 | [
"Apache-2.0"
] | 1 | 2019-12-16T22:27:45.000Z | 2019-12-16T22:27:45.000Z | import os
import sys
import shutil
import random
import stat
import string
import logging
from git import Repo
try: # macOS
BASE_CLONE_LOCATION = os.path.join(os.path.dirname(sys.modules['__main__'].__file__), "current_clone")
except: # Windows
BASE_CLONE_LOCATION = os.path.join(os.getcwd(), "current_clone")
try:
GITHUB_TOKEN = os.environ["GITHUB_TOKEN"]
except:
logging.basicConfig(level=logging.INFO, format="%(levelname)s - %(message)s")
logging.error("GITHUB_TOKEN must be supplied as environment variable")
quit()
# handles deleting readonly files with shutil
| 30.957447 | 106 | 0.754639 |
499a41cfbffd9bf9473869aaf693707dd595ba03 | 6,671 | py | Python | tests/test_formDef.py | swhume/odmlib | 597f71c60f4c6bd8639c92e9fc0ae71b8a5416a7 | [
"MIT"
] | 9 | 2021-09-15T12:26:30.000Z | 2022-03-30T10:14:14.000Z | tests/test_formDef.py | swhume/odmlib | 597f71c60f4c6bd8639c92e9fc0ae71b8a5416a7 | [
"MIT"
] | 1 | 2021-09-28T09:05:01.000Z | 2021-09-28T09:05:01.000Z | tests/test_formDef.py | swhume/odmlib | 597f71c60f4c6bd8639c92e9fc0ae71b8a5416a7 | [
"MIT"
] | 1 | 2021-09-29T04:50:23.000Z | 2021-09-29T04:50:23.000Z | from unittest import TestCase
import json
import odmlib.odm_1_3_2.model as ODM
| 57.017094 | 122 | 0.665867 |
499a70e266d8579796d64d1f4d58f86d8e09e3c3 | 143 | py | Python | src/Utilities/__init__.py | sigseg5/nometa-tg | 7d0d9f0cf5d8fd98a3808c07a5c44d30f1b13032 | [
"MIT"
] | 3 | 2020-12-15T07:44:58.000Z | 2022-03-11T18:57:44.000Z | src/Utilities/__init__.py | sigseg5/nometa-tg | 7d0d9f0cf5d8fd98a3808c07a5c44d30f1b13032 | [
"MIT"
] | null | null | null | src/Utilities/__init__.py | sigseg5/nometa-tg | 7d0d9f0cf5d8fd98a3808c07a5c44d30f1b13032 | [
"MIT"
] | null | null | null | from src.Utilities import cmd_logger
from src.Utilities import metadata_worker
from src.Utilities import misc
from src.Utilities import runner
| 28.6 | 41 | 0.86014 |
499c8c68960d9d5e2ecf3da660784d02e54b3419 | 9,062 | py | Python | db_eplusout_reader/processing/esofile_time.py | DesignBuilderSoftware/db-esofile-reader | a5a80a8069e7eeb30af8ceeca28eb33e9e4f7a50 | [
"MIT"
] | 1 | 2021-07-15T14:16:10.000Z | 2021-07-15T14:16:10.000Z | db_eplusout_reader/processing/esofile_time.py | DesignBuilderSoftware/db-esofile-reader | a5a80a8069e7eeb30af8ceeca28eb33e9e4f7a50 | [
"MIT"
] | 1 | 2022-03-02T08:30:20.000Z | 2022-03-08T07:57:57.000Z | db_eplusout_reader/processing/esofile_time.py | DesignBuilderSoftware/db-esofile-reader | a5a80a8069e7eeb30af8ceeca28eb33e9e4f7a50 | [
"MIT"
] | null | null | null | import calendar
import logging
from collections import namedtuple
from datetime import datetime, timedelta
from db_eplusout_reader.constants import RP, TS, A, D, H, M
from db_eplusout_reader.exceptions import LeapYearMismatch, StartDayMismatch
EsoTimestamp = namedtuple("EsoTimestamp", "month day hour end_minute")
def parse_eso_timestamp(year, month, day, hour, end_minute):
"""
Convert E+ time format to format acceptable by datetime module.
EnergyPlus date and time format is not compatible with
datetime.datetime module. This because hourly information
can be '24' and end minute can be '60' - which is not
allowed.
To get around the issue, logic is in place to
convert raw input into format as required for datetime
(or datetime like) module.
"""
if hour == 24 and end_minute == 60:
shifted_datetime = datetime(year, month, day, hour - 1)
corrected_datetime = shifted_datetime + timedelta(hours=1)
elif end_minute == 60:
# Convert last timestep of an hour
corrected_datetime = datetime(year, month, day, hour, 0)
elif hour == 0:
corrected_datetime = datetime(year, month, day, hour, end_minute)
else:
corrected_datetime = datetime(year, month, day, hour - 1, end_minute)
return corrected_datetime
def get_month_n_days_from_cumulative(monthly_cumulative_days):
"""
Transform consecutive number of days in monthly data to actual number of days.
EnergyPlus monthly results report a total consecutive number of days for each day.
Raw data reports table as 31, 59..., this function calculates and returns
actual number of days for each month 31, 28...
"""
old_num = monthly_cumulative_days.pop(0)
m_actual_days = [old_num]
for num in monthly_cumulative_days:
new_num = num - old_num
m_actual_days.append(new_num)
old_num += new_num
return m_actual_days
def find_num_of_days_annual(ann_num_of_days, rp_num_of_days):
"""Use runperiod data to calculate number of days for each annual period."""
days = rp_num_of_days[0] // len(ann_num_of_days)
return [days for _ in ann_num_of_days]
def get_num_of_days(cumulative_days):
"""Split num of days and date."""
num_of_days = {}
for table, values in cumulative_days.items():
if table == M:
# calculate actual number of days for monthly table
num_of_days[M] = get_month_n_days_from_cumulative(values)
else:
num_of_days[table] = values
# calculate number of days for annual table for
# an incomplete year run or multi year analysis
if A in cumulative_days.keys() and RP in cumulative_days.keys():
num_of_days[A] = find_num_of_days_annual(num_of_days[A], num_of_days[RP])
return num_of_days
def check_year_increment(first_step_data, current_step_data):
"""Check if year value should be incremented inside environment table."""
if first_step_data is current_step_data:
# do not increment first step
return False
return first_step_data >= current_step_data
def generate_datetime_dates(raw_dates, year):
"""Generate datetime index for a given period."""
dates = []
for i in range(0, len(raw_dates)):
# based on the first, current and previous
# steps decide if the year should be incremented
if check_year_increment(raw_dates[0], raw_dates[i]):
year += 1
# year can be incremented automatically when converting to datetime
date = parse_eso_timestamp(year, *raw_dates[i])
dates.append(date)
return dates
def update_start_dates(dates):
"""Set accurate first date for monthly+ tables."""
timestep_to_monthly_dates = {k: dates[k] for k in dates if k in [TS, H, D, M]}
if timestep_to_monthly_dates:
for frequency in (M, A, RP):
if frequency in dates:
dates[frequency] = set_start_date(
dates[frequency], timestep_to_monthly_dates
)
return dates
def get_n_days_from_cumulative(cumulative_days):
"""Convert cumulative days to number of days pers step."""
if cumulative_days:
# Separate number of days data if any M to RP table is available
num_of_days = get_num_of_days(cumulative_days)
else:
num_of_days = None
return num_of_days
def validate_year(year, is_leap, date, day):
"""Check if date for given and day corresponds to specified year."""
if calendar.isleap(year) is is_leap:
test_datetime = datetime(year, date.month, date.day)
test_day = test_datetime.strftime("%A")
if day != test_day and day not in (
"SummerDesignDay",
"WinterDesignDay",
):
max_year = datetime.now().year + 10 # give some choices from future
suitable_years = get_allowed_years(
is_leap, date, day, max_year, n_samples=3
)
formatted_day = test_datetime.strftime("%Y-%m-%d")
raise StartDayMismatch(
"Start day '{}' for given day '{}'"
" does not correspond to real calendar day '{}'!"
"\nEither set 'year' kwarg as 'None' to identify year automatically"
" or use one of '{}'.".format(
day, formatted_day, test_day, suitable_years
)
)
else:
raise LeapYearMismatch(
"Specified year '{0}' does not match expected calendar data!"
" Outputs are reported for {1} year"
" but given year '{0}' is {2}."
" Either set 'year' kwarg as 'None' to seek year automatically"
" or use {1} year.".format(
year,
"leap" if is_leap else "standard",
"standard" if is_leap else "leap",
)
)
def is_leap_year_ts_to_d(raw_dates_arr):
"""Check if first year is leap based on timestep, hourly or daily data."""
for tup in raw_dates_arr:
if (tup.month, tup.day) == (2, 29):
return True
if check_year_increment(raw_dates_arr[0], tup):
# stop once first year is covered
return False
return False
def seek_year(is_leap, date, day, max_year):
"""Find first year matching given criteria."""
for year in range(max_year, 0, -1):
if day in ("SummerDesignDay", "WinterDesignDay"):
logging.info("Sizing simulation, setting year to 2002.")
year = 2002
break
if calendar.isleap(year) is is_leap:
test_datetime = datetime(year, date.month, date.day)
test_start_day = test_datetime.strftime("%A")
if day == test_start_day:
break
else:
raise ValueError(
"Failed to automatically find year for following arguments"
" is_leap='{}', date='{}' and day='{}'."
" It seems that there ins't a year between 0 - {} matching"
" date and day of week combination.".format(is_leap, date, day, max_year)
)
return year
def get_allowed_years(
is_leap,
first_date,
first_day,
max_year,
n_samples=4,
):
"""Get a sample of allowed years for given conditions."""
allowed_years = []
for _ in range(n_samples):
year = seek_year(is_leap, first_date, first_day, max_year)
max_year = year - 1
allowed_years.append(year)
return allowed_years
def get_lowest_frequency(all_frequencies):
"""Find the shortest frequency from given ones."""
return next((freq for freq in (TS, H, D, M, A, RP) if freq in all_frequencies))
def convert_raw_dates(raw_dates, year):
"""Transform raw E+ date and time data into datetime.datetime objects."""
dates = {}
for frequency, value in raw_dates.items():
dates[frequency] = generate_datetime_dates(value, year)
return dates
def convert_raw_date_data(
raw_dates, #: Dict[str, List[EsoTimestamp]],
days_of_week, #: Dict[str, List[str]],
year, #: Optional[int],
): # -> Dict[str, List[datetime]]:
"""Convert EnergyPlus dates into standard datetime format."""
lowest_frequency = get_lowest_frequency(list(raw_dates.keys()))
if lowest_frequency in {TS, H, D}:
lowest_frequency_values = raw_dates[lowest_frequency]
is_leap = is_leap_year_ts_to_d(lowest_frequency_values)
first_date = lowest_frequency_values[0]
first_day = days_of_week[lowest_frequency][0]
if year is None:
year = seek_year(is_leap, first_date, first_day, 2020)
else:
validate_year(year, is_leap, first_date, first_day)
else:
# allow any year defined or set EnergyPlus default 2002
year = year if year else 2002
dates = convert_raw_dates(raw_dates, year)
return update_start_dates(dates)
| 36.688259 | 86 | 0.647318 |
499ce59557a4ca3973fb3d83ed14750b0515612a | 772 | py | Python | setup.py | EliRibble/parentopticon | 8593d7f72fac9706f1bd8e8326ac932f5af95a32 | [
"MIT"
] | null | null | null | setup.py | EliRibble/parentopticon | 8593d7f72fac9706f1bd8e8326ac932f5af95a32 | [
"MIT"
] | null | null | null | setup.py | EliRibble/parentopticon | 8593d7f72fac9706f1bd8e8326ac932f5af95a32 | [
"MIT"
] | null | null | null | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="parentopticon",
version="0.0.1",
author="Eli Ribble",
author_email="junk@theribbles.org",
description="A system for controlling kids access to computers.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/eliribble/parentopticon",
packages=setuptools.find_packages(),
install_requires = [
"arrow==0.15.5",
"chryso==2.1",
"flask==1.1.2",
"flask-login==0.5.0",
"Jinja2==2.11.1",
"psutil==5.6.6",
"requests==2.23.0",
"toml==0.10.0",
],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
| 24.125 | 66 | 0.682642 |
499cfa9ec9626bc8ee462071e912f59d22f18419 | 11,701 | py | Python | src/race/src/my_lane_detection/slidewindow_ver2.py | young43/ISCC_2020 | 2a7187410bceca901bd87b753a91fd35b73ca036 | [
"MIT"
] | 3 | 2020-11-13T04:59:27.000Z | 2021-04-02T06:36:03.000Z | src/race/src/my_lane_detection/slidewindow_ver2.py | yongbeomkwak/ISCC_2021 | 7e7e5a8a14b9ed88e1cfbe2ee585fe24e4701015 | [
"MIT"
] | null | null | null | src/race/src/my_lane_detection/slidewindow_ver2.py | yongbeomkwak/ISCC_2021 | 7e7e5a8a14b9ed88e1cfbe2ee585fe24e4701015 | [
"MIT"
] | 5 | 2020-09-13T09:06:16.000Z | 2021-06-19T02:31:23.000Z | import cv2
import numpy as np
import matplotlib.pyplot as plt
from findpoint import FindPoint
| 52.470852 | 124 | 0.557217 |
499d165572daf46e08305c7a946da82bbf43582f | 767 | py | Python | broadcasts/managers.py | foolwealth/django-site-broadcasts | f870fbf96cde7ea29fc8179e71ab738d2192628f | [
"MIT"
] | 5 | 2016-08-08T07:31:53.000Z | 2020-01-21T00:10:22.000Z | broadcasts/managers.py | foolwealth/django-site-broadcasts | f870fbf96cde7ea29fc8179e71ab738d2192628f | [
"MIT"
] | 2 | 2015-05-22T00:47:14.000Z | 2018-08-15T19:07:21.000Z | broadcasts/managers.py | bennylope/django-site-broadcasts | 0c7556462e7aa09a48ccce4ca8d0b4827a2ce190 | [
"MIT"
] | 2 | 2015-05-21T23:23:16.000Z | 2018-08-15T17:03:51.000Z | from django.db import models
from django.db.models import Q
from django.utils import timezone
| 29.5 | 73 | 0.65189 |
499e17c024651f588861f4597a8d8cf5d56a914e | 11,114 | py | Python | google/cloud/gkehub_v1/types/membership.py | googleapis/python-gke-hub | 9f620c83af1da8f27fc6933716142164d26647f2 | [
"Apache-2.0"
] | 3 | 2021-06-04T06:10:44.000Z | 2021-12-30T02:19:30.000Z | google/cloud/gkehub_v1/types/membership.py | renovate-bot/python-gke-hub | 9f620c83af1da8f27fc6933716142164d26647f2 | [
"Apache-2.0"
] | 43 | 2021-03-16T14:10:35.000Z | 2022-03-07T16:07:33.000Z | google/cloud/gkehub_v1/types/membership.py | renovate-bot/python-gke-hub | 9f620c83af1da8f27fc6933716142164d26647f2 | [
"Apache-2.0"
] | 3 | 2021-03-15T20:46:05.000Z | 2022-01-29T08:11:13.000Z | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.cloud.gkehub.v1",
manifest={
"Membership",
"MembershipEndpoint",
"GkeCluster",
"KubernetesMetadata",
"MembershipState",
"Authority",
},
)
__all__ = tuple(sorted(__protobuf__.manifest))
| 40.414545 | 110 | 0.65557 |
499e67a21d0dc3cde30c8234f79e3aae5c8b02f5 | 1,728 | py | Python | tests/test_tasks.py | alarig/peeringdb-py | 917cda69f7bc05be008faa66875827d408328609 | [
"Apache-2.0"
] | 71 | 2015-11-10T04:55:54.000Z | 2022-02-25T20:03:48.000Z | tests/test_tasks.py | alarig/peeringdb-py | 917cda69f7bc05be008faa66875827d408328609 | [
"Apache-2.0"
] | 53 | 2016-01-29T12:15:38.000Z | 2022-03-04T07:03:41.000Z | tests/test_tasks.py | alarig/peeringdb-py | 917cda69f7bc05be008faa66875827d408328609 | [
"Apache-2.0"
] | 28 | 2016-02-03T07:59:34.000Z | 2022-02-27T19:25:06.000Z | # Units tests to directly cover both task wrapper modules -
# not possible with pytest parametrization
import pytest
import sys
from collections import defaultdict
from peeringdb import _tasks_sequential
TASKS_MODS = [_tasks_sequential]
# pre-async compat. import
if sys.version_info >= (3, 5):
from peeringdb import _tasks_async
TASKS_MODS.append(_tasks_async)
# dummy resources for task objects
DATA_EXPECTED = {ResOne: [1, 2], ResTwo: [1, 2]}
# dummy context classes parameterized on tasks module
| 24.338028 | 73 | 0.622106 |
499e8f87034a01b4664449514e2ad3632e9bb2a1 | 1,074 | py | Python | dp/kadane.py | williamsmj/prakhar1989-algorithms | 82e64ce9d451b33c1bce64a63276d6341a1f13b0 | [
"WTFPL"
] | 2,797 | 2015-01-01T15:52:13.000Z | 2022-03-28T20:52:37.000Z | dp/kadane.py | williamsmj/prakhar1989-algorithms | 82e64ce9d451b33c1bce64a63276d6341a1f13b0 | [
"WTFPL"
] | 35 | 2015-01-07T03:11:18.000Z | 2021-06-27T09:09:55.000Z | dp/kadane.py | williamsmj/prakhar1989-algorithms | 82e64ce9d451b33c1bce64a63276d6341a1f13b0 | [
"WTFPL"
] | 887 | 2015-01-02T06:38:19.000Z | 2022-03-26T20:33:11.000Z | """
Problem: The maximum subarray problem is the task of finding the
contiguous subarray within a one-dimensional array of numbers
(containing at least one positive number) which has the largest sum.
Solution:
The recurrence relation that we solve at each step is the following -
Let S[i] = be the max value contigous subsequence till the ith element
of the array.
Then S[i] = max(A[i], A[i] + S[i - 1])
At each step, we have two options
1) We add the ith element to the sum till the i-1th elem
2) We start a new array starting at i
We take a max of both these options and accordingly build up the array.
"""
if __name__ == "__main__":
x = [-2, -3, 4, -1, -2, 1, 5, -3]
y = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
z = [-1, 3, -5, 4, 6, -1, 2, -7, 13, -3]
print map(max_value_contigous_subsequence, [x, y, z])
| 33.5625 | 71 | 0.645251 |
499ea6990d99f7681e517c981073364d93c42de3 | 3,215 | py | Python | online_recommend/full_main.py | hfhfn/db_recommend | 3a9f03157bb81e295f8cff30fbc7ad2a8cfdf963 | [
"MIT"
] | null | null | null | online_recommend/full_main.py | hfhfn/db_recommend | 3a9f03157bb81e295f8cff30fbc7ad2a8cfdf963 | [
"MIT"
] | null | null | null | online_recommend/full_main.py | hfhfn/db_recommend | 3a9f03157bb81e295f8cff30fbc7ad2a8cfdf963 | [
"MIT"
] | null | null | null | from user_portrait import SaveUserProfile
from action_profile_recall import save_inverted_table, SaveUserRecall
from movie_recall import SaveMovieRecall
from movie_portrait import save_topic_weights_normal, save_predata, save_textrank, save_cut_words, save_tfidf, \
save_topK_idf_textrank, save_topK_tfidf_textrank, save_keyword_weights, save_topic_words, save_movie_profile, \
save_topic_weights, get_cv_idf_model
from stat_factor import save_movie_hot_sort, save_movie_hot_factor, save_movie_time, save_movie_year_factor, \
save_movie_score_factor
from action_similar_recall import SaveUserSimilarRecall
from utils import user_recall_db
from content_recall import Update
if __name__ == '__main__':
# merge_action 3
# movie_protrait_run()
# filter_factor_run()
# movie_recall_run()
user_profile_run()
# user_profile_recall_run()
# user_similar_recall_run()
pass
| 33.489583 | 115 | 0.765163 |
499ebc213eb730a6668f7fe2c42632f4551f69a9 | 1,962 | py | Python | libcst/codemod/commands/strip_strings_from_types.py | rowillia/LibCST | 621d9a949a57a9100b7f2d1465ebd32aaeddb05c | [
"Apache-2.0"
] | null | null | null | libcst/codemod/commands/strip_strings_from_types.py | rowillia/LibCST | 621d9a949a57a9100b7f2d1465ebd32aaeddb05c | [
"Apache-2.0"
] | null | null | null | libcst/codemod/commands/strip_strings_from_types.py | rowillia/LibCST | 621d9a949a57a9100b7f2d1465ebd32aaeddb05c | [
"Apache-2.0"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
# pyre-strict
from typing import Union
import libcst
import libcst.matchers as m
from libcst import parse_expression
from libcst.codemod import VisitorBasedCodemodCommand
from libcst.codemod.visitors import AddImportsVisitor
from libcst.metadata import QualifiedNameProvider
| 38.470588 | 95 | 0.700306 |
49a041d58cf1e03640f9ec85a2adef02ee0d008f | 1,309 | py | Python | nasa_fevo/InMemoryCache.py | lradomski10m/nasa-fevo | 92cc11097766e94346bc2b0b0819e9191f8b04bf | [
"MIT"
] | null | null | null | nasa_fevo/InMemoryCache.py | lradomski10m/nasa-fevo | 92cc11097766e94346bc2b0b0819e9191f8b04bf | [
"MIT"
] | null | null | null | nasa_fevo/InMemoryCache.py | lradomski10m/nasa-fevo | 92cc11097766e94346bc2b0b0819e9191f8b04bf | [
"MIT"
] | null | null | null | from typing import Dict, Union
from nasa_fevo.Cache import Cache
from datetime import datetime
CACHE_EXPIRATION_TIMER_MINUTES = 10
# very simple in-memory cache
# meant for small # of items | 29.088889 | 82 | 0.571429 |
49a08ee15b6bd0370e65813bd6b2e298574e430e | 5,079 | py | Python | get_embeddings.py | PauPerezT/WEBERT | e189f84de14de6d4bae785e48c8a36eb1afaa46f | [
"Apache-1.1"
] | 3 | 2020-07-28T10:00:44.000Z | 2021-01-25T17:48:01.000Z | get_embeddings.py | PauPerezT/WEBERT | e189f84de14de6d4bae785e48c8a36eb1afaa46f | [
"Apache-1.1"
] | 3 | 2020-12-07T18:45:16.000Z | 2020-12-07T18:45:27.000Z | get_embeddings.py | PauPerezT/WEBERT | e189f84de14de6d4bae785e48c8a36eb1afaa46f | [
"Apache-1.1"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 27 20:45:40 2020
@author: P.A. Perez-Toro
"""
#%%Libraries
import argparse
from utils import create_fold,str2bool
import csv
from tqdm import tqdm
import os
import gc
import numpy as np
import pandas as pd
from WEBERT import BERT, BETO, SciBERT
#%%
if __name__ == '__main__':
parser = argparse.ArgumentParser(add_help=True)
parser.add_argument('-f','--files_path', default='./texts/',help='File folder of the set of documents', action="store")
parser.add_argument('-sv','--save_path', default='./bert_embeddings/',help='Path to save the embeddings', action="store")
parser.add_argument('-bm','--bert_model', default='Bert',help='Choose between three different BERT models: Bert, Beto and SciBert. By default BERT', choices=('Bert','Beto', 'SciBert'))
parser.add_argument('-d','--dynamic', type=str2bool, nargs='?',const=False, default=True, help='Boolean value to get dynamic features= True. By default True.', choices=(True, False))
parser.add_argument('-st','--static', type=str2bool, nargs='?',const=True, default=False, help='Boolean value to get static features= True from the embeddings such as mean, standard deviation, kurtosis, skeweness, min and max. By default False.', choices=(True, False))
parser.add_argument('-l','--language', default='english',help='Chosen language (only available for BERT model). Here is available only english or spanish. By default english.', choices=('english', 'spanish'))
parser.add_argument('-sw','--stopwords', type=str2bool, nargs='?',const=True, default=False, help='Boolean value, set True if you want to remove stopwords, By default False.' , choices=(True, False))
parser.add_argument('-m','--model', default='base', help='Bert models, two options base and large. By default base.', choices=('base', 'large'))
parser.add_argument('-ca','--cased', type=str2bool, nargs='?',const=True, default=False, help='Boolean value for cased= True o lower-cased= False models. By defaul False.', choices=(True, False))
parser.add_argument('-cu','--cuda', type=str2bool, nargs='?', const=True, default=False, help='Boolean value for using cuda to compute the embeddings (True). By defaul False.', choices=(True, False))
#parser.print_help()
args = parser.parse_args()
files_path=args.files_path
save_path=args.save_path
bert_model=str(args.bert_model)
language=str(args.language)
stopwords=args.stopwords
model=str(args.model)
cased=args.cased
dynamic=args.dynamic
static=args.static
cuda=args.cuda
files=np.hstack(sorted([f for f in os.listdir(files_path) if f.endswith('.txt')]))
file_names=np.hstack([".".join(f.split(".")[:-1]) for f in files ])
folder_path_static=save_path+'/Static/'
folder_path=save_path+'/Dynamic/'
create_fold(folder_path)
create_fold(folder_path_static)
j=0
neurons=768
if (model=='large') & (bert_model!='SciBert'):
neurons=1024
if static:
labelstf=[]
labelstf.append('File')
for n in range (neurons):
labelstf.append('Avg Neuron'+str(n+1))
for n in range (neurons):
labelstf.append('STD Neuron'+str(n+1))
for n in range (neurons):
labelstf.append('Skew Neuron'+str(n+1))
for n in range (neurons):
labelstf.append('Kurt Neuron'+str(n+1))
for n in range (neurons):
labelstf.append('Min Neuron'+str(n+1))
for n in range (neurons):
labelstf.append('Max Neuron'+str(n+1))
with open(folder_path_static+bert_model+'_Static_Features.csv', 'w', newline='') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(labelstf)
pbar=tqdm(files)
for file in pbar:
pbar.set_description("Processing %s" % file)
data = pd.read_csv(files_path+'/'+file, sep='\t', header=None)
file_name=file_names[j]
data_input=list(data[0])
if bert_model=='Bert':
bert=BERT(data_input,file_name, language=language, stopwords=stopwords,
model=model, cased=cased, cuda=cuda)
elif bert_model=='Beto':
bert=BETO(data_input,file_name, stopwords=stopwords,
model=model, cased=cased, cuda=cuda)
elif bert_model=='SciBert':
bert=SciBERT(data_input,file_name, stopwords=stopwords,
cased=cased, cuda=cuda)
j+=1
if static:
data_stat=bert.get_bert_embeddings(folder_path, dynamic=dynamic, static=static)
with open(folder_path_static+bert_model+'_Static_Features.csv', 'a') as f:
writer = csv.writer(f)
writer.writerow(np.hstack((file_name, data_stat)))
gc.collect()
else:
bert.get_bert_embeddings(folder_path, dynamic=dynamic, static=static)
gc.collect() | 40.309524 | 273 | 0.638905 |
49a17ebec39db4cc9cf78ab25d40d4459000d689 | 264 | py | Python | AiSD_03/Zadanie_7.py | DLQuake/Algorytmy_i_struktury_danych | 210d0b4e868e5cc9d6aa730a2297d8074e4d52a1 | [
"MIT"
] | null | null | null | AiSD_03/Zadanie_7.py | DLQuake/Algorytmy_i_struktury_danych | 210d0b4e868e5cc9d6aa730a2297d8074e4d52a1 | [
"MIT"
] | null | null | null | AiSD_03/Zadanie_7.py | DLQuake/Algorytmy_i_struktury_danych | 210d0b4e868e5cc9d6aa730a2297d8074e4d52a1 | [
"MIT"
] | null | null | null | # Zaimplementowa funkcj n_sums(n: int) -> listint, ktra zwrci wszystkie n-cyfrowe liczby o takich samych sumach na indeksach parzystych i nieparzystych. Przykadowo, dla 3 cyfr bd to liczby m.in. 198, 220, 891, 990
print(n_sums(3)) | 44 | 220 | 0.75 |
49a34879fe64e92596a7c6eaecaaa74f1636d0c6 | 2,327 | py | Python | wsgiservice/xmlserializer.py | beekpr/wsgiservice | 9ba21060ff19cbff984424b184a5b2829fe644bb | [
"BSD-2-Clause"
] | 1 | 2018-01-19T10:44:15.000Z | 2018-01-19T10:44:15.000Z | wsgiservice/xmlserializer.py | beekpr/wsgiservice | 9ba21060ff19cbff984424b184a5b2829fe644bb | [
"BSD-2-Clause"
] | 2 | 2015-10-12T07:53:57.000Z | 2016-06-17T11:13:08.000Z | wsgiservice/xmlserializer.py | beekpr/wsgiservice | 9ba21060ff19cbff984424b184a5b2829fe644bb | [
"BSD-2-Clause"
] | null | null | null | """Helper to convert Python data structures into XML. Used so we can return
intuitive data from resource methods which are usable as JSON but can also be
returned as XML.
"""
import re
from xml.sax.saxutils import escape as xml_escape
# Regular expression matching all the illegal XML characters.
RE_ILLEGAL_XML = re.compile(
u'([\u0000-\u0008\u000b-\u000c\u000e-\u001f\ufffe-\uffff])|([%s-%s][^%s-%s])|([^%s-%s][%s-%s])|([%s-%s]$)|(^[%s-%s])' % \
(unichr(0xd800),unichr(0xdbff),unichr(0xdc00),unichr(0xdfff),
unichr(0xd800),unichr(0xdbff),unichr(0xdc00),unichr(0xdfff),
unichr(0xd800),unichr(0xdbff),unichr(0xdc00),unichr(0xdfff)))
def dumps(obj, root_tag):
"""Serialize :arg:`obj` to an XML :class:`str`.
"""
xml = _get_xml_value(obj)
if xml:
# Remove invalid XML
xml = RE_ILLEGAL_XML.sub('', xml)
if root_tag is None:
return xml
else:
root = root_tag
return '<' + root + '>' + xml + '</' + root + '>'
def _get_xml_value(value):
"""Convert an individual value to an XML string. Calls itself
recursively for dictionaries and lists.
Uses some heuristics to convert the data to XML:
- In dictionaries, the keys become the tag name.
- In lists the tag name is 'child' with an order-attribute giving
the list index.
- All other values are included as is.
All values are escaped to fit into the XML document.
:param value: The value to convert to XML.
:type value: Any valid Python value
:rtype: string
"""
retval = []
if isinstance(value, dict):
for key, value in value.iteritems():
retval.append('<' + xml_escape(str(key)) + '>')
retval.append(_get_xml_value(value))
retval.append('</' + xml_escape(str(key)) + '>')
elif isinstance(value, list):
for key, value in enumerate(value):
retval.append('<child order="' + xml_escape(str(key)) + '">')
retval.append(_get_xml_value(value))
retval.append('</child>')
elif isinstance(value, bool):
retval.append(xml_escape(str(value).lower()))
elif isinstance(value, unicode):
retval.append(xml_escape(value.encode('utf-8')))
else:
retval.append(xml_escape(str(value)))
return "".join(retval)
| 35.8 | 125 | 0.627417 |
b8c51a5a3052b41343351c2e050b600648c80729 | 45,700 | py | Python | sql/query.py | real-fire/archer | 8e9e82a51125859c61d23496ad0cab0a4bbc5181 | [
"Apache-2.0"
] | null | null | null | sql/query.py | real-fire/archer | 8e9e82a51125859c61d23496ad0cab0a4bbc5181 | [
"Apache-2.0"
] | null | null | null | sql/query.py | real-fire/archer | 8e9e82a51125859c61d23496ad0cab0a4bbc5181 | [
"Apache-2.0"
] | null | null | null | import re
import simplejson as json
from django.core.urlresolvers import reverse
from django.db.models import Q, Min, F, Sum
from django.db import connection
from django.conf import settings
from django.db.models.functions import Concat
from django.views.decorators.csrf import csrf_exempt
from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse, HttpResponseRedirect
from django.core import serializers
from django.db import transaction
from datetime import date
from django.db.models import Value as V
import datetime
import time
from sql.extend_json_encoder import ExtendJSONEncoder
from .aes_decryptor import Prpcrypt
from .sendmail import MailSender
from .dao import Dao
from .const import WorkflowDict
from .inception import InceptionDao
from .models import users, master_config, slave_config, QueryPrivilegesApply, QueryPrivileges, QueryLog, SlowQuery, \
SlowQueryHistory
from .data_masking import Masking
from .workflow import Workflow
from .permission import role_required, superuser_required
if settings.ALIYUN_RDS_MANAGE:
from .aliyun_function import slowquery_review as aliyun_rds_slowquery_review, \
slowquery_review_history as aliyun_rds_slowquery_review_history
dao = Dao()
prpCryptor = Prpcrypt()
inceptionDao = InceptionDao()
datamasking = Masking()
workflowOb = Workflow()
mailSenderOb = MailSender()
#
#
#
#
#
#
#
#
#
#
#
# SQL
# sql
# SQL
# SQL
# SQL
#
| 44.197292 | 136 | 0.596521 |
b8c640f9283d5b83c08e12647497d33055a9e83f | 13,671 | py | Python | pyTooling/CLIAbstraction/__init__.py | pyTooling/pyTooling.CLIAbstraction | 3b17490ae729e126799328198a814b6c741b1ac7 | [
"Apache-2.0"
] | null | null | null | pyTooling/CLIAbstraction/__init__.py | pyTooling/pyTooling.CLIAbstraction | 3b17490ae729e126799328198a814b6c741b1ac7 | [
"Apache-2.0"
] | 8 | 2021-12-19T19:58:31.000Z | 2022-03-02T10:45:16.000Z | pyTooling/CLIAbstraction/__init__.py | pyTooling/pyTooling.CLIAbstraction | 3b17490ae729e126799328198a814b6c741b1ac7 | [
"Apache-2.0"
] | null | null | null | # ==================================================================================================================== #
# _____ _ _ ____ _ ___ _ _ _ _ _ #
# _ __ _ |_ _|__ ___ | (_)_ __ __ _ / ___| | |_ _| / \ | |__ ___| |_ _ __ __ _ ___| |_(_) ___ _ __ #
# | '_ \| | | || |/ _ \ / _ \| | | '_ \ / _` || | | | | | / _ \ | '_ \/ __| __| '__/ _` |/ __| __| |/ _ \| '_ \ #
# | |_) | |_| || | (_) | (_) | | | | | | (_| || |___| |___ | | / ___ \| |_) \__ \ |_| | | (_| | (__| |_| | (_) | | | | #
# | .__/ \__, ||_|\___/ \___/|_|_|_| |_|\__, (_)____|_____|___/_/ \_\_.__/|___/\__|_| \__,_|\___|\__|_|\___/|_| |_| #
# |_| |___/ |___/ #
# ==================================================================================================================== #
# Authors: #
# Patrick Lehmann #
# #
# License: #
# ==================================================================================================================== #
# Copyright 2017-2022 Patrick Lehmann - Btzingen, Germany #
# Copyright 2007-2016 Technische Universitt Dresden - Germany, Chair of VLSI-Design, Diagnostics and Architecture #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# #
# SPDX-License-Identifier: Apache-2.0 #
# ==================================================================================================================== #
#
"""Basic abstraction layer for executables."""
__author__ = "Patrick Lehmann"
__email__ = "Paebbels@gmail.com"
__copyright__ = "2014-2022, Patrick Lehmann"
__license__ = "Apache License, Version 2.0"
__version__ = "0.4.0"
__keywords__ = ["abstract", "executable", "cli", "cli arguments"]
from pathlib import Path
from platform import system
from shutil import which as shutil_which
from subprocess import (
Popen as Subprocess_Popen,
PIPE as Subprocess_Pipe,
STDOUT as Subprocess_StdOut
)
from typing import Dict, Optional, ClassVar, Type, List, Tuple, Iterator, Generator
from pyTooling.Decorators import export
from pyTooling.Exceptions import ExceptionBase, PlatformNotSupportedException
from pyAttributes import Attribute
from .Argument import (
CommandLineArgument, ExecutableArgument,
NamedAndValuedArgument, ValuedArgument, PathArgument,
PathListArgument, NamedTupledArgument
)
from .ValuedFlag import ValuedFlag
# @export
# class Environment:
# def __init__(self):
# self.Variables = {}
| 44.676471 | 147 | 0.575744 |
b8c7afa99f880ad851ed3d1e2b329906d0d376a5 | 1,601 | py | Python | ingest_to_dynamodb/lambda_function.py | fladdimir/csa-simulation-based-sc-forecast | 80f176a783496f8859609f63b56c6199a73d9909 | [
"MIT"
] | 2 | 2020-11-04T17:34:38.000Z | 2021-08-13T07:55:23.000Z | ingest_to_dynamodb/lambda_function.py | fladdimir/csa-simulation-based-sc-forecast | 80f176a783496f8859609f63b56c6199a73d9909 | [
"MIT"
] | null | null | null | ingest_to_dynamodb/lambda_function.py | fladdimir/csa-simulation-based-sc-forecast | 80f176a783496f8859609f63b56c6199a73d9909 | [
"MIT"
] | 2 | 2021-05-28T02:55:44.000Z | 2021-08-03T13:56:10.000Z | import base64
import json
import logging
import os
from decimal import Decimal
import boto3
"""
environment variables:
export AWS_ENDPOINT=http://localhost:4566
export TABLE_NAME=table_xy
# for direct local execution:
export AWS_DEFAULT_REGION=localhost
export AWS_ACCESS_KEY_ID=access_key_id
export AWS_SECRET_ACCESS_KEY=secret_access_key
"""
AWS_ENDPOINT = os.getenv("AWS_ENDPOINT")
TABLE_NAME = os.getenv("TABLE_NAME")
# localstack specific url processing
LOCALSTACK_HOSTNAME = "LOCALSTACK_HOSTNAME"
if LOCALSTACK_HOSTNAME in AWS_ENDPOINT:
localstack_hostname = os.getenv(LOCALSTACK_HOSTNAME, "localstack_main")
AWS_ENDPOINT = AWS_ENDPOINT.replace(LOCALSTACK_HOSTNAME, localstack_hostname)
dynamodb = boto3.resource("dynamodb", endpoint_url=AWS_ENDPOINT)
table = dynamodb.Table(TABLE_NAME)
| 30.788462 | 112 | 0.715178 |
b8c91e32bf4a536211d6e1b856f0e33473d42a4f | 3,816 | py | Python | modules/sfp_psbdmp.py | IronFireFA/spiderfoot | e75428e7584666de52a20b0d2f1fb80dffd6f39c | [
"MIT"
] | null | null | null | modules/sfp_psbdmp.py | IronFireFA/spiderfoot | e75428e7584666de52a20b0d2f1fb80dffd6f39c | [
"MIT"
] | null | null | null | modules/sfp_psbdmp.py | IronFireFA/spiderfoot | e75428e7584666de52a20b0d2f1fb80dffd6f39c | [
"MIT"
] | null | null | null | # -------------------------------------------------------------------------------
# Name: sfp_psbdmp
# Purpose: Query psbdmp.cc for potentially hacked e-mail addresses.
#
# Author: Steve Micallef <steve@binarypool.com>
#
# Created: 21/11/2016
# Copyright: (c) Steve Micallef
# Licence: MIT
# -------------------------------------------------------------------------------
import json
import re
from spiderfoot import SpiderFootEvent, SpiderFootPlugin
# End of sfp_psbdmp class
| 28.058824 | 97 | 0.508124 |
b8c9483c89fccb1526f7a1b94d89843858f14cf3 | 3,216 | py | Python | dcr/scenarios/agent-bvt/test_agent_basics.py | sshedi/WALinuxAgent | 99d07d29b7843293588bec4b961e4ef2d1daabb2 | [
"Apache-2.0"
] | null | null | null | dcr/scenarios/agent-bvt/test_agent_basics.py | sshedi/WALinuxAgent | 99d07d29b7843293588bec4b961e4ef2d1daabb2 | [
"Apache-2.0"
] | null | null | null | dcr/scenarios/agent-bvt/test_agent_basics.py | sshedi/WALinuxAgent | 99d07d29b7843293588bec4b961e4ef2d1daabb2 | [
"Apache-2.0"
] | null | null | null | import os
import re
import socket
from dotenv import load_dotenv
from dcr.scenario_utils.common_utils import execute_command_and_raise_on_error
from dcr.scenario_utils.models import get_vm_data_from_env
| 30.923077 | 109 | 0.661692 |
b8ca7c27c5d04fb6e63bdc64ba80458973c7d303 | 9,033 | py | Python | src/DrawingEpisodes.py | Benykoz/simcom | ffe1c3636ef65a037a34e71d5cbcdb2e483d5b93 | [
"MIT"
] | null | null | null | src/DrawingEpisodes.py | Benykoz/simcom | ffe1c3636ef65a037a34e71d5cbcdb2e483d5b93 | [
"MIT"
] | null | null | null | src/DrawingEpisodes.py | Benykoz/simcom | ffe1c3636ef65a037a34e71d5cbcdb2e483d5b93 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
#
# This file includes mainly a class "randomEpisode" that:
# - draws localization of vehicle
# - draws number of rocks
# - draws position of each rock
# - save in a json file
# Author: Michele
# Project: SmartLoader - Innovation
import json
import random
from geometry_msgs.msg import PoseStamped, Quaternion, Vector3
import math
from math import pi as pi
import src.Unity2RealWorld as toRW
import os
if __name__ == '__main__':
for j in range(3):
scenario = recorderEpisode(j)
| 35.14786 | 94 | 0.469169 |
b8cbd20dcd81315e2ca364311bd80d356f50ed2d | 587 | py | Python | gimmemotifs/commands/logo.py | littleblackfish/gimmemotifs | 913a6e5db378493155273e2c0f8ab0dc11ab219e | [
"MIT"
] | null | null | null | gimmemotifs/commands/logo.py | littleblackfish/gimmemotifs | 913a6e5db378493155273e2c0f8ab0dc11ab219e | [
"MIT"
] | null | null | null | gimmemotifs/commands/logo.py | littleblackfish/gimmemotifs | 913a6e5db378493155273e2c0f8ab0dc11ab219e | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# Copyright (c) 2009-2016 Simon van Heeringen <simon.vanheeringen@gmail.com>
#
# This module is free software. You can redistribute it and/or modify it under
# the terms of the MIT License, see the file COPYING included with this
# distribution.
from gimmemotifs.motif import pwmfile_to_motifs
| 29.35 | 79 | 0.688245 |
b8cbfca6de86ee3ef9fe472b32eb107264c928c8 | 1,671 | py | Python | EDA/src/utils/main_flask.py | paleomau/MGOL_BOOTCAMP | 8c2b018f49fd12a255ea6f323141260d04d4421d | [
"MIT"
] | null | null | null | EDA/src/utils/main_flask.py | paleomau/MGOL_BOOTCAMP | 8c2b018f49fd12a255ea6f323141260d04d4421d | [
"MIT"
] | null | null | null | EDA/src/utils/main_flask.py | paleomau/MGOL_BOOTCAMP | 8c2b018f49fd12a255ea6f323141260d04d4421d | [
"MIT"
] | null | null | null | from flask import Flask, request, render_template
from functions import read_json
import os
# Mandatory
app = Flask(__name__) # __name__ --> __main__
# ---------- Flask functions ----------
# localhost:6060/give_me_id?password=12345
# ---------- Other functions ----------
def main():
print("---------STARTING PROCESS---------")
print(__file__)
# Get the settings fullpath
# \\ --> WINDOWS
# / --> UNIX
# Para ambos: os.sep
settings_file = os.path.dirname(__file__) + os.sep + "settings.json"
print(settings_file)
# Load json from file
json_readed = read_json(fullpath=settings_file)
# Load variables from jsons
DEBUG = json_readed["debug"]
HOST = json_readed["host"]
PORT_NUM = json_readed["port"]
# Dos posibilidades:
# HOST = "0.0.0.0"
# HOST = "127.0.0.1" --> localhost
app.run(debug=DEBUG, host=HOST, port=PORT_NUM)
if __name__ == "__main__":
main() | 25.318182 | 72 | 0.625972 |
b8ccc7bb85dc9dad61097e465ec52bcbf128cb34 | 1,473 | py | Python | opta/core/secrets.py | pecigonzalo/opta | 0259f128ad3cfc4a96fe1f578833de28b2f19602 | [
"Apache-2.0"
] | null | null | null | opta/core/secrets.py | pecigonzalo/opta | 0259f128ad3cfc4a96fe1f578833de28b2f19602 | [
"Apache-2.0"
] | null | null | null | opta/core/secrets.py | pecigonzalo/opta | 0259f128ad3cfc4a96fe1f578833de28b2f19602 | [
"Apache-2.0"
] | null | null | null | import os
from dotenv import dotenv_values
from opta.core.kubernetes import get_namespaced_secrets, update_secrets
from opta.exceptions import UserErrors
from opta.utils import deep_merge, logger
MANUAL_SECRET_NAME = "manual-secrets" # nosec
LINKED_SECRET_NAME = "secret" # nosec
def get_secrets(namespace: str, manual_secret_name: str) -> dict:
""":return: manual and linked secrets"""
manual_secrets = get_namespaced_secrets(namespace, manual_secret_name)
linked_secrets = get_namespaced_secrets(
namespace, LINKED_SECRET_NAME
) # Helm charts don't have linked secrets, but it'll just return an empty dict so no worries
for secret_name in manual_secrets.keys():
if secret_name in linked_secrets:
logger.warning(
f"# Secret {secret_name} found manually overwritten from linked value."
)
del linked_secrets[secret_name]
return deep_merge(manual_secrets, linked_secrets)
def bulk_update_manual_secrets(
namespace: str, manual_secret_name: str, env_file: str
) -> None:
"""
append the values from the env file to the existing data for this manual secret.
create the secret if it doesn't exist yet.
:raises UserErrors: if env_file is not found
"""
if not os.path.exists(env_file):
raise UserErrors(f"Could not find file {env_file}")
new_values = dotenv_values(env_file)
update_secrets(namespace, manual_secret_name, new_values)
| 35.071429 | 97 | 0.728445 |
b8cdf4dde7f1aa6655db7010276c1247756180f9 | 5,114 | py | Python | venv/Lib/site-packages/mpl_toolkits/axes_grid1/axes_rgb.py | EkremBayar/bayar | aad1a32044da671d0b4f11908416044753360b39 | [
"MIT"
] | 603 | 2020-12-23T13:49:32.000Z | 2022-03-31T23:38:03.000Z | venv/Lib/site-packages/mpl_toolkits/axes_grid1/axes_rgb.py | EkremBayar/bayar | aad1a32044da671d0b4f11908416044753360b39 | [
"MIT"
] | 387 | 2020-12-15T14:54:04.000Z | 2022-03-31T07:00:21.000Z | venv/Lib/site-packages/mpl_toolkits/axes_grid1/axes_rgb.py | EkremBayar/bayar | aad1a32044da671d0b4f11908416044753360b39 | [
"MIT"
] | 35 | 2021-03-26T03:12:04.000Z | 2022-03-23T10:15:10.000Z | import numpy as np
from matplotlib import _api
from .axes_divider import make_axes_locatable, Size
from .mpl_axes import Axes
class RGBAxes:
"""
4-panel imshow (RGB, R, G, B).
Layout:
+---------------+-----+
| | R |
+ +-----+
| RGB | G |
+ +-----+
| | B |
+---------------+-----+
Subclasses can override the ``_defaultAxesClass`` attribute.
Attributes
----------
RGB : ``_defaultAxesClass``
The axes object for the three-channel imshow.
R : ``_defaultAxesClass``
The axes object for the red channel imshow.
G : ``_defaultAxesClass``
The axes object for the green channel imshow.
B : ``_defaultAxesClass``
The axes object for the blue channel imshow.
"""
_defaultAxesClass = Axes
def imshow_rgb(self, r, g, b, **kwargs):
"""
Create the four images {rgb, r, g, b}.
Parameters
----------
r, g, b : array-like
The red, green, and blue arrays.
kwargs : imshow kwargs
kwargs get unpacked into the imshow calls for the four images.
Returns
-------
rgb : matplotlib.image.AxesImage
r : matplotlib.image.AxesImage
g : matplotlib.image.AxesImage
b : matplotlib.image.AxesImage
"""
if not (r.shape == g.shape == b.shape):
raise ValueError(
f'Input shapes ({r.shape}, {g.shape}, {b.shape}) do not match')
RGB = np.dstack([r, g, b])
R = np.zeros_like(RGB)
R[:, :, 0] = r
G = np.zeros_like(RGB)
G[:, :, 1] = g
B = np.zeros_like(RGB)
B[:, :, 2] = b
im_rgb = self.RGB.imshow(RGB, **kwargs)
im_r = self.R.imshow(R, **kwargs)
im_g = self.G.imshow(G, **kwargs)
im_b = self.B.imshow(B, **kwargs)
return im_rgb, im_r, im_g, im_b
| 30.260355 | 79 | 0.550841 |
b8ce37a154e212778f695fcf9135c3e96507ff09 | 88 | py | Python | app/admin/controllers/__init__.py | aries-zhang/flask-template | 369d77f2910f653f46668dd9bda735954b6c145e | [
"MIT"
] | null | null | null | app/admin/controllers/__init__.py | aries-zhang/flask-template | 369d77f2910f653f46668dd9bda735954b6c145e | [
"MIT"
] | null | null | null | app/admin/controllers/__init__.py | aries-zhang/flask-template | 369d77f2910f653f46668dd9bda735954b6c145e | [
"MIT"
] | null | null | null | from flask import Blueprint
admin = Blueprint('admin', __name__, url_prefix='/manage')
| 22 | 58 | 0.761364 |
b8d03933a76fe421eb780621a4114e528f2cddbc | 535 | py | Python | first.py | wmoulin/chatterbot | 075a4651227ad159e58a36fca5ea7456d9153653 | [
"MIT"
] | null | null | null | first.py | wmoulin/chatterbot | 075a4651227ad159e58a36fca5ea7456d9153653 | [
"MIT"
] | null | null | null | first.py | wmoulin/chatterbot | 075a4651227ad159e58a36fca5ea7456d9153653 | [
"MIT"
] | null | null | null | from chatterbot import ChatBot
from chatterbot.trainers import ListTrainer
# The only required parameter for the ChatBot is a name. This can be anything you want.
chatbot = ChatBot("My First Chatbot")
# Training your ChatBot
conversation = [
"Hello",
"Hi there!",
"How are you doing?",
"I'm doing great.",
"That is good to hear",
"Thank you.",
"You're welcome."
]
trainer = ListTrainer(chatbot)
trainer.train(conversation)
# Get a response
response = chatbot.get_response("Good morning!")
print(response) | 24.318182 | 87 | 0.708411 |
b8d0ad22e9f860e320dd54fc175dce04ecd1af3d | 7,405 | py | Python | runpandas/types/summary.py | pnposch/runpandas | 25388c18b52dfcc168e81922b8ba20ca93adad20 | [
"MIT"
] | 11 | 2020-12-04T20:43:23.000Z | 2022-03-16T19:19:12.000Z | runpandas/types/summary.py | pnposch/runpandas | 25388c18b52dfcc168e81922b8ba20ca93adad20 | [
"MIT"
] | 45 | 2020-06-23T02:50:31.000Z | 2022-02-15T16:56:00.000Z | runpandas/types/summary.py | pnposch/runpandas | 25388c18b52dfcc168e81922b8ba20ca93adad20 | [
"MIT"
] | 4 | 2021-11-11T15:23:04.000Z | 2022-02-02T13:02:12.000Z | """
Helper module for evaluation and display of the summary of training sessions.
"""
import numpy as np
import pandas as pd
from runpandas._utils import convert_pace_secmeters2minkms
def _build_summary_statistics(obj):
"""
Generate session statistics from a given DataFrame.
Parameters
----------
obj: The DataFrame to generate basic commute statistics from.
Returns:
--------
A Dictionary containing the following statistics:
- Total moving time
- Average speed
- Max speed
- Average moving speed
- Average cadence running
- Average cadence running moving
- Max cadence
- Average heart rate
- Average heart rate moving
- Max heart rate
- Average pace (per 1 km)
- Average pace moving (per 1 km)
- Max pace
- Average temperature
- Max temperature
- Min temperature
- Total distance
- Total ellapsed time
"""
start = obj.start
try:
moving_time = obj.moving_time
except AttributeError:
moving_time = np.nan
try:
mean_speed = obj.mean_speed()
max_speed = obj["speed"].max()
mean_pace = convert_pace_secmeters2minkms(obj.mean_pace().total_seconds())
max_pace = convert_pace_secmeters2minkms(
obj["speed"].to_pace().min().total_seconds()
)
except AttributeError:
mean_speed = np.nan
max_speed = np.nan
mean_pace = np.nan
try:
mean_moving_speed = obj.mean_speed(only_moving=True)
mean_moving_pace = convert_pace_secmeters2minkms(
obj.mean_pace(only_moving=True).total_seconds()
)
except (AttributeError, KeyError):
mean_moving_speed = np.nan
mean_moving_pace = np.nan
try:
mean_cadence = obj.mean_cadence()
max_cadence = obj["cad"].max()
except AttributeError:
mean_cadence = np.nan
max_cadence = np.nan
try:
mean_moving_cadence = obj.mean_cadence(only_moving=True)
except (AttributeError, KeyError):
mean_moving_cadence = np.nan
try:
mean_heart_rate = obj.mean_heart_rate()
max_heart_rate = obj["hr"].max()
except AttributeError:
mean_heart_rate = np.nan
max_heart_rate = np.nan
try:
mean_moving_heart_rate = obj.mean_heart_rate(only_moving=True)
except (AttributeError, KeyError):
mean_moving_heart_rate = np.nan
try:
mean_temperature = obj["temp"].mean()
min_temperature = obj["temp"].min()
max_temperature = obj["temp"].max()
except KeyError:
mean_temperature = np.nan
min_temperature = np.nan
max_temperature = np.nan
total_distance = obj.distance
ellapsed_time = obj.ellapsed_time
row = {k: v for k, v in locals().items() if not k.startswith("__") and k != "obj"}
return row
def _build_session_statistics(obj):
"""
Generate session statistics from a given DataFrame.
Parameters
----------
obj: The DataFrame to generate basic commute statistics from.
Returns:
--------
A ``pandas.Dataframe`` containing the following statistics:
- Total moving time
- Average speed
- Max speed
- Average moving speed
- Average cadence running
- Average cadence running moving
- Max cadence
- Average heart rate
- Average heart rate moving
- Max heart rate
- Average pace (per 1 km)
- Average pace moving (per 1 km)
- Max pace
- Average temperature
- Max temperature
- Min temperature
- Total distance
- Total ellapsed time
"""
stats = {key: [value] for key, value in _build_summary_statistics(obj).items()}
return pd.DataFrame(stats).set_index("start")
def _build_activity_statistics(obj):
"""
Generate basic statistics from a given pandas Series.
Parameters
----------
obj: The DataFrame to generate basic commute statistics from.
Returns:
--------
A Series containing the following statistics:
- Session times
- Total distance
- Total ellapsed time
- Total moving time
- Total and average elevation gain
- Average speed
- Average moving speed
- Average pace (per 1 km)
- Average pace moving (per 1 km)
- Average cadence running
- Average cadence running moving
- Average heart rate
- Average heart rate moving
- Average temperature
"""
# special conditions for methods that raise Exceptions
stats = _build_summary_statistics(obj)
rows = {
"Session": "Running: %s" % stats["start"].strftime("%d-%m-%Y %H:%M:%S"),
"Total distance (meters)": stats["total_distance"],
"Total ellapsed time": stats["ellapsed_time"],
"Total moving time": stats["moving_time"],
"Average speed (km/h)": stats["mean_speed"] * 3.6,
"Average moving speed (km/h)": stats["mean_moving_speed"] * 3.6,
"Average pace (per 1 km)": stats["mean_pace"],
"Average pace moving (per 1 km)": stats["mean_moving_pace"],
"Average cadence": stats["mean_cadence"],
"Average moving cadence": stats["mean_moving_cadence"],
"Average heart rate": stats["mean_heart_rate"],
"Average moving heart rate": stats["mean_moving_heart_rate"],
"Average temperature": stats["mean_temperature"],
}
series = pd.Series(
rows,
index=[
"Session",
"Total distance (meters)",
"Total ellapsed time",
"Total moving time",
"Average speed (km/h)",
"Average moving speed (km/h)",
"Average pace (per 1 km)",
"Average pace moving (per 1 km)",
"Average cadence",
"Average moving cadence",
"Average heart rate",
"Average moving heart rate",
"Average temperature",
],
)
return series
def activity_summary(activity):
"""
Returns the pandas Dataframe with the common basic statistics for the
given activity.
Parameters
----------
activity: runpandas.types.Activity. Runpandas Activity to be computed the statistics
Returns
-------
pandas.Dataframe: A pandas DataFrame containing the summary statistics, which
inclues estimates of the total distance covered, the total duration,
the time spent moving, and many others.
"""
summary_statistics = _build_activity_statistics(activity)
return summary_statistics.T
def session_summary(session):
"""
Returns the a pandas Dataframe with the common basic statistics for the
given activity.
Parameters
----------
session: runpandas.types.Activity. Runpandas Activity with pandas.MultiIndex
to be computed the statistics
Returns
-------
pandas.Dataframe: A pandas DataFrame containing the summary statistics
across all th activities, which includes estimates of the total distance covered,
the total duration, the time spent moving, and many others.
"""
frames = []
for index in session.index.unique(level="start"):
df = session.xs(index, level=0)
df.start = index
frames.append(_build_session_statistics(df))
session_summary = pd.concat(frames, axis=0, verify_integrity=True)
session_summary.sort_index(inplace=True)
return session_summary
| 28.480769 | 89 | 0.637677 |
b8d180754d7fc90d954cb1d916a92cd2b5b1aea1 | 589 | py | Python | dribdat/decorators.py | gonzalocasas/dribdat | f8c326c96e851be199eb9f61daed6c8780e3bc27 | [
"MIT"
] | 21 | 2015-10-25T23:22:04.000Z | 2019-04-01T06:42:54.000Z | dribdat/decorators.py | gonzalocasas/dribdat | f8c326c96e851be199eb9f61daed6c8780e3bc27 | [
"MIT"
] | 108 | 2020-02-11T10:07:53.000Z | 2021-06-19T20:30:03.000Z | dribdat/decorators.py | OpendataCH/dribdat | 90d95a12c782dea7d284a4c454a06481e67c1e37 | [
"MIT"
] | 12 | 2016-09-02T03:12:28.000Z | 2021-06-02T07:58:48.000Z | # -*- coding: utf-8 -*-
from functools import wraps
from flask import abort, jsonify
from flask_login import current_user
| 25.608696 | 64 | 0.657046 |
b8d3d6eef9923c53e2c72ef3ffa4d51959b6e188 | 263 | py | Python | run_perf_benchmarks.py | alirezajahani60/FabFlee | e2cfdb6efc758281e123f6acc1b06f93176dd756 | [
"BSD-3-Clause"
] | null | null | null | run_perf_benchmarks.py | alirezajahani60/FabFlee | e2cfdb6efc758281e123f6acc1b06f93176dd756 | [
"BSD-3-Clause"
] | null | null | null | run_perf_benchmarks.py | alirezajahani60/FabFlee | e2cfdb6efc758281e123f6acc1b06f93176dd756 | [
"BSD-3-Clause"
] | null | null | null | from base.fab import *
from plugins.FabFlee.FabFlee import *
| 29.222222 | 79 | 0.703422 |
b8d3d895be119a8b71cde792e94daf1fc8fa955b | 479 | py | Python | vwgconnect/account.py | Farfar/vwgbroker | 9acc9f1a259e26aa830a9534a6dea3cee21c09ff | [
"Apache-2.0"
] | null | null | null | vwgconnect/account.py | Farfar/vwgbroker | 9acc9f1a259e26aa830a9534a6dea3cee21c09ff | [
"Apache-2.0"
] | null | null | null | vwgconnect/account.py | Farfar/vwgbroker | 9acc9f1a259e26aa830a9534a6dea3cee21c09ff | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import re
import time
import logging
import asyncio
import hashlib
import jwt
| 18.423077 | 61 | 0.611691 |
b8d7cf7888021a157102a64b5a55477b57bc5fa9 | 3,263 | py | Python | src/project_02/project2_b.py | group7BSE1/BSE-2021 | 2553b12e5fd5d1015af4746bcf84a8ee7c1cb8e0 | [
"MIT"
] | null | null | null | src/project_02/project2_b.py | group7BSE1/BSE-2021 | 2553b12e5fd5d1015af4746bcf84a8ee7c1cb8e0 | [
"MIT"
] | null | null | null | src/project_02/project2_b.py | group7BSE1/BSE-2021 | 2553b12e5fd5d1015af4746bcf84a8ee7c1cb8e0 | [
"MIT"
] | 1 | 2021-04-07T14:49:04.000Z | 2021-04-07T14:49:04.000Z |
main()
| 37.079545 | 131 | 0.599142 |
b8d7d6b700479d42df11c33ef276f3c562f44f38 | 159 | py | Python | basic_algorithms/primeiro_ultimo_nome.py | Yta-ux/python_algorithms | 62dd2d897e2f2de8783e68df3022170a86e9132e | [
"MIT"
] | 1 | 2022-01-26T22:15:17.000Z | 2022-01-26T22:15:17.000Z | basic_algorithms/primeiro_ultimo_nome.py | Yta-ux/python_algorithms | 62dd2d897e2f2de8783e68df3022170a86e9132e | [
"MIT"
] | null | null | null | basic_algorithms/primeiro_ultimo_nome.py | Yta-ux/python_algorithms | 62dd2d897e2f2de8783e68df3022170a86e9132e | [
"MIT"
] | null | null | null | nome = input('Nome Completo:').title().strip().split()
print(f"""Prazer em Conhece-lo
Seu Primeiro Nome e: {nome[0]}
Seu Ultimo Nome e: {nome[len(nome)-1]}""") | 39.75 | 54 | 0.666667 |
b8d7f25bc4dac9b169ae8981214f8ae8040f25ce | 3,193 | py | Python | magnum/conductor/k8s_api.py | vivian-rook/magnum | 7acc6eeda44ce6ffcca8b7fc2e682f80403ac4b7 | [
"Apache-2.0"
] | null | null | null | magnum/conductor/k8s_api.py | vivian-rook/magnum | 7acc6eeda44ce6ffcca8b7fc2e682f80403ac4b7 | [
"Apache-2.0"
] | null | null | null | magnum/conductor/k8s_api.py | vivian-rook/magnum | 7acc6eeda44ce6ffcca8b7fc2e682f80403ac4b7 | [
"Apache-2.0"
] | null | null | null | # Copyright 2015 Huawei Technologies Co.,LTD.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import requests
from magnum.conductor.handlers.common.cert_manager import create_client_files
| 31 | 77 | 0.630442 |
b8d95b42f671a377b5da5f2e5ac42f949f5f6c0c | 1,865 | py | Python | secret/secret.py | futurice/vault | 6da5341804509b7984d0a5817bbd13d3477fe0bc | [
"Apache-2.0"
] | 9 | 2015-10-16T12:06:35.000Z | 2020-04-03T09:05:06.000Z | secret/secret.py | futurice/vault | 6da5341804509b7984d0a5817bbd13d3477fe0bc | [
"Apache-2.0"
] | null | null | null | secret/secret.py | futurice/vault | 6da5341804509b7984d0a5817bbd13d3477fe0bc | [
"Apache-2.0"
] | 3 | 2015-10-20T09:36:53.000Z | 2021-01-18T20:49:41.000Z | #!/usr/bin/env python
from __future__ import absolute_import
from __future__ import print_function
import logging, os, sys
from pprint import pprint as pp
from secret.project import get_project
from secret.cli import prepare
if sys.version_info.major == 2:
trollius_log()
from secret.storage import S3
from secret.output import prettyprint
import boto3
import trollius as asyncio
from trollius import From, Return
if __name__ == '__main__':
runner()
| 27.028986 | 69 | 0.676139 |
b8dcb2e38617c441c3331cf21108a3eb3fba7a49 | 3,094 | py | Python | test_main.py | zenranda/proj10-gcalfinal | ee32beb3ef570b23883d41f84394b28818e5a07c | [
"Artistic-2.0"
] | null | null | null | test_main.py | zenranda/proj10-gcalfinal | ee32beb3ef570b23883d41f84394b28818e5a07c | [
"Artistic-2.0"
] | 2 | 2021-02-08T20:17:57.000Z | 2021-04-30T20:38:59.000Z | test_main.py | zenranda/proj10-gcalfinal | ee32beb3ef570b23883d41f84394b28818e5a07c | [
"Artistic-2.0"
] | null | null | null | ###
#Various nose tests. If you want to adapt this for your own use, be aware that the start/end block list has a very specific formatting.
###
import get_freebusy
import arrow
from operator import itemgetter
from pymongo import MongoClient
import secrets.admin_secrets
import secrets.client_secrets
MONGO_CLIENT_URL = "mongodb://{}:{}@localhost:{}/{}".format(
secrets.client_secrets.db_user,
secrets.client_secrets.db_user_pw,
secrets.admin_secrets.port,
secrets.client_secrets.db)
try:
dbclient = MongoClient(MONGO_CLIENT_URL)
db = getattr(dbclient, secrets.client_secrets.db)
collection = db.dated
base_size = collection.count() #current size of the db, for comparison later
except:
print("Failure opening database. Is Mongo running? Correct password?")
sys.exit(1)
| 55.25 | 624 | 0.649968 |
b8dd4a9a3b779200a138616573ee9d9a08756937 | 2,664 | py | Python | examples/scripts/ct_abel_tv_admm.py | lanl/scico | 976c9e5833f8f67eed2eaa43460d89fb09bb9f78 | [
"BSD-3-Clause"
] | 18 | 2021-09-21T18:55:11.000Z | 2022-03-21T20:13:05.000Z | examples/scripts/ct_abel_tv_admm.py | lanl/scico | 976c9e5833f8f67eed2eaa43460d89fb09bb9f78 | [
"BSD-3-Clause"
] | 218 | 2021-09-21T21:45:08.000Z | 2022-03-30T18:45:27.000Z | examples/scripts/ct_abel_tv_admm.py | lanl/scico | 976c9e5833f8f67eed2eaa43460d89fb09bb9f78 | [
"BSD-3-Clause"
] | 2 | 2021-09-23T22:44:47.000Z | 2021-12-18T16:01:43.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of the SCICO package. Details of the copyright
# and user license can be found in the 'LICENSE.txt' file distributed
# with the package.
r"""
Regularized Abel Inversion
==========================
This example demonstrates a TV-regularized Abel inversion using
an Abel projector based on PyAbel :cite:`pyabel-2022`
"""
import numpy as np
import scico.numpy as snp
from scico import functional, linop, loss, metric, plot
from scico.examples import create_circular_phantom
from scico.linop.abel import AbelProjector
from scico.optimize.admm import ADMM, LinearSubproblemSolver
from scico.util import device_info
"""
Create a ground truth image.
"""
N = 256 # phantom size
x_gt = create_circular_phantom((N, N), [0.4 * N, 0.2 * N, 0.1 * N], [1, 0, 0.5])
"""
Set up the forward operator and create a test measurement
"""
A = AbelProjector(x_gt.shape)
y = A @ x_gt
np.random.seed(12345)
y = y + np.random.normal(size=y.shape).astype(np.float32)
ATy = A.T @ y
"""
Set up ADMM solver object.
"""
= 1.9e1 # L1 norm regularization parameter
= 4.9e1 # ADMM penalty parameter
maxiter = 100 # number of ADMM iterations
cg_tol = 1e-4 # CG relative tolerance
cg_maxiter = 25 # maximum CG iterations per ADMM iteration
# Note the use of anisotropic TV. Isotropic TV would require use of L21Norm.
g = * functional.L1Norm()
C = linop.FiniteDifference(input_shape=x_gt.shape)
f = loss.SquaredL2Loss(y=y, A=A)
x_inv = A.inverse(y)
x0 = snp.clip(x_inv, 0, 1.0)
solver = ADMM(
f=f,
g_list=[g],
C_list=[C],
rho_list=[],
x0=x0,
maxiter=maxiter,
subproblem_solver=LinearSubproblemSolver(cg_kwargs={"tol": cg_tol, "maxiter": cg_maxiter}),
itstat_options={"display": True, "period": 5},
)
"""
Run the solver.
"""
print(f"Solving on {device_info()}\n")
solver.solve()
hist = solver.itstat_object.history(transpose=True)
x_tv = snp.clip(solver.x, 0, 1.0)
"""
Show results.
"""
norm = plot.matplotlib.colors.Normalize(vmin=-0.1, vmax=1.2)
fig, ax = plot.subplots(nrows=2, ncols=2, figsize=(12, 12))
plot.imview(x_gt, title="Ground Truth", cmap=plot.cm.Blues, fig=fig, ax=ax[0, 0], norm=norm)
plot.imview(y, title="Measurement", cmap=plot.cm.Blues, fig=fig, ax=ax[0, 1])
plot.imview(
x_inv,
title="Inverse Abel: %.2f (dB)" % metric.psnr(x_gt, x_inv),
cmap=plot.cm.Blues,
fig=fig,
ax=ax[1, 0],
norm=norm,
)
plot.imview(
x_tv,
title="TV Regularized Inversion: %.2f (dB)" % metric.psnr(x_gt, x_tv),
cmap=plot.cm.Blues,
fig=fig,
ax=ax[1, 1],
norm=norm,
)
fig.show()
input("\nWaiting for input to close figures and exit")
| 24.897196 | 95 | 0.682432 |
b8ddae5f1b6f6079138cdb43e8d72e2e1ca77817 | 1,760 | py | Python | pyblas/level1/csrot.py | timleslie/pyblas | 9109f2cc24e674cf59a3b39f95c2d7b8116ae884 | [
"BSD-3-Clause"
] | null | null | null | pyblas/level1/csrot.py | timleslie/pyblas | 9109f2cc24e674cf59a3b39f95c2d7b8116ae884 | [
"BSD-3-Clause"
] | 1 | 2020-10-10T23:23:06.000Z | 2020-10-10T23:23:06.000Z | pyblas/level1/csrot.py | timleslie/pyblas | 9109f2cc24e674cf59a3b39f95c2d7b8116ae884 | [
"BSD-3-Clause"
] | null | null | null | from ..util import slice_
def csrot(N, CX, INCX, CY, INCY, C, S):
"""Applies a Givens rotation to a pair of vectors x and y
Parameters
----------
N : int
Number of elements in input vector
CX : numpy.ndarray
A single precision complex array, dimension (1 + (`N` - 1)*abs(`INCX`))
INCX : int
Storage spacing between elements of `CX`
CY : numpy.ndarray
A single precision complex array, dimension (1 + (`N` - 1)*abs(`INCY`))
INCY : int
Storage spacing between elements of `CY`
C : numpy.single
The Givens parameter c, with value cos(theta)
S : numpy.single
The Givens parameter s, with value sin(theta)
Returns
-------
None
See Also
--------
srot : Single-precision real Givens rotation
crot : Single-precision complex Givens rotation
zdrot : Double-precision complex Givens rotation
Notes
-----
Online PyBLAS documentation: https://nbviewer.jupyter.org/github/timleslie/pyblas/blob/main/docs/csrot.ipynb
Reference BLAS documentation: https://github.com/Reference-LAPACK/lapack/blob/v3.9.0/BLAS/SRC/csrot.f
Examples
--------
>>> x = np.array([1+2j, 2+3j, 3+4j], dtype=np.complex64)
>>> y = np.array([6+7j, 7+8j, 8+9j], dtype=np.complex64)
>>> N = len(x)
>>> incx = 1
>>> incy = 1
>>> theta = np.pi/2
>>> csrot(N, x, incx, y, incy, np.cos(theta), np.sin(theta))
>>> print(x)
[6.+7.j 7.+8.j 8.+9.j]
>>> print(y)
[-1.-2.j -2.-3.j -3.-4.j]
"""
if N <= 0:
return
x_slice = slice_(N, INCX)
y_slice = slice_(N, INCY)
X_TEMP = C * CX[x_slice] + S * CY[y_slice]
CY[y_slice] = -S * CX[x_slice] + C * CY[y_slice]
CX[x_slice] = X_TEMP
| 29.333333 | 112 | 0.580682 |
b8de8fb9e2f63a96dbca5bb30f4841f157b6ed7b | 160 | py | Python | items.py | yarnoiser/PyDungeon | c37ad314605065194732202539db50eef94ea3da | [
"BSD-3-Clause"
] | 1 | 2018-05-15T01:26:04.000Z | 2018-05-15T01:26:04.000Z | items.py | yarnoiser/PyDungeon | c37ad314605065194732202539db50eef94ea3da | [
"BSD-3-Clause"
] | null | null | null | items.py | yarnoiser/PyDungeon | c37ad314605065194732202539db50eef94ea3da | [
"BSD-3-Clause"
] | null | null | null | from dice import *
| 14.545455 | 47 | 0.69375 |
b8df7da99167063e92023aa153878ad215a2e8ff | 2,476 | py | Python | leet.py | blackcow/pytorch-cifar-master | c571c8fd7fe521907755ca2eacb6aa877abe3493 | [
"MIT"
] | null | null | null | leet.py | blackcow/pytorch-cifar-master | c571c8fd7fe521907755ca2eacb6aa877abe3493 | [
"MIT"
] | null | null | null | leet.py | blackcow/pytorch-cifar-master | c571c8fd7fe521907755ca2eacb6aa877abe3493 | [
"MIT"
] | null | null | null |
import io
import sys
sys.stdout = io.TextIOWrapper(sys.stdout.buffer,encoding='utf-8')
#str = input()
#print(str)
l = [1, 3, 5, 2, 8, 7]
Solution.findMedium(l)
import io
import sys
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')
# str = input()
# print(str)
text = 'abbbbcccddddddddeee'
Solution.maxStr(text)
import io
import sys
sys.stdout = io.TextIOWrapper(sys.stdout.buffer,encoding='utf-8')
#str = input()
#print(str)
l = [1, -2, 4, 5, -1, 1]
Solution.findMaxArray(l) | 23.358491 | 79 | 0.468094 |
b8df9843139746c1adbc8ed57ae326c83672e193 | 1,091 | py | Python | shop_website/users/views.py | omar00070/django-shopping-website | af2741b900b60631349ea2e6de17586994e31680 | [
"MIT"
] | null | null | null | shop_website/users/views.py | omar00070/django-shopping-website | af2741b900b60631349ea2e6de17586994e31680 | [
"MIT"
] | null | null | null | shop_website/users/views.py | omar00070/django-shopping-website | af2741b900b60631349ea2e6de17586994e31680 | [
"MIT"
] | null | null | null | from django.shortcuts import render
from .forms import RegistrationForm, UserUpdateForm, ProfileUpdateForm
from django.shortcuts import redirect
from .models import Profile
from django.contrib.auth.decorators import login_required | 34.09375 | 88 | 0.75802 |
b8e0455d33253902aeabce67886870561b85812f | 2,685 | py | Python | quantumcat/gates/custom_gates/cirq/__init__.py | Artificial-Brain/quantumcat | eff99cac7674b3a1b7e1f752e7ebed2b960f85b3 | [
"Apache-2.0"
] | 20 | 2021-05-10T07:04:41.000Z | 2021-12-13T17:12:05.000Z | quantumcat/gates/custom_gates/cirq/__init__.py | Artificial-Brain/quantumcat | eff99cac7674b3a1b7e1f752e7ebed2b960f85b3 | [
"Apache-2.0"
] | 2 | 2021-04-26T05:34:52.000Z | 2021-05-16T13:46:22.000Z | quantumcat/gates/custom_gates/cirq/__init__.py | Artificial-Brain/quantumcat | eff99cac7674b3a1b7e1f752e7ebed2b960f85b3 | [
"Apache-2.0"
] | 17 | 2021-04-02T18:09:33.000Z | 2022-02-10T16:38:57.000Z | # (C) Copyright Artificial Brain 2021.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from quantumcat.gates.custom_gates.cirq.u_gate import UGate
from quantumcat.gates.custom_gates.cirq.u1_gate import U1Gate
from quantumcat.gates.custom_gates.cirq.u2_gate import U2Gate
from quantumcat.gates.custom_gates.cirq.u3_gate import U3Gate
from quantumcat.gates.custom_gates.cirq.sdg_gate import SDGGate
from quantumcat.gates.custom_gates.cirq.sxd_gate import SXDGate
from quantumcat.gates.custom_gates.cirq.td_gate import TDGate
from quantumcat.gates.custom_gates.cirq.rxx_gate import RXXGate
from quantumcat.gates.custom_gates.cirq.r_gate import RGate
from quantumcat.gates.custom_gates.cirq.rx_gate import RXGate
from quantumcat.gates.custom_gates.cirq.ry_gate import RYGate
from quantumcat.gates.custom_gates.cirq.ryy_gate import RYYGate
from quantumcat.gates.custom_gates.cirq.rz_gate import RZGate
from quantumcat.gates.custom_gates.cirq.rccx_gate import RCCXGate
from quantumcat.gates.custom_gates.cirq.rc3x_gate import RC3XGate
from quantumcat.gates.custom_gates.cirq.rzz_gate import RZZGate
from quantumcat.gates.custom_gates.cirq.rzx_gate import RZXGate
from quantumcat.gates.custom_gates.cirq.sx_gate import SXGate
from quantumcat.gates.custom_gates.cirq.cy_gate import CYGate
from quantumcat.gates.custom_gates.cirq.p_gate import PGate
from quantumcat.gates.custom_gates.cirq.cu_gate import CUGate
from quantumcat.gates.custom_gates.cirq.cu1_gate import CU1Gate
from quantumcat.gates.custom_gates.cirq.cu3_gate import CU3Gate
from quantumcat.gates.custom_gates.cirq.crx_gate import CRXGate
from quantumcat.gates.custom_gates.cirq.cry_gate import CRYGate
from quantumcat.gates.custom_gates.cirq.crz_gate import CRZGate
from quantumcat.gates.custom_gates.cirq.dcx_gate import DCXGate
from quantumcat.gates.custom_gates.cirq.c3x_gate import C3XGate
from quantumcat.gates.custom_gates.cirq.c4x_gate import C4XGate
from quantumcat.gates.custom_gates.cirq.c3sx_gate import C3SXGate
from quantumcat.gates.custom_gates.cirq.cphase_gate import CPhaseGate
from quantumcat.gates.custom_gates.cirq.csx_gate import CSXGate
from quantumcat.gates.custom_gates.cirq.ch_gate import CHGate
| 55.9375 | 75 | 0.84581 |
b8e06a6109f1d799db4201a71cba9cf898507598 | 1,045 | py | Python | CL_Net/Referential_Game/Number_Set/info.py | MarkFzp/ToM-Pragmatics | 3de1956c36ea40f29a41e4c153c4b8cdc73afc15 | [
"MIT"
] | null | null | null | CL_Net/Referential_Game/Number_Set/info.py | MarkFzp/ToM-Pragmatics | 3de1956c36ea40f29a41e4c153c4b8cdc73afc15 | [
"MIT"
] | null | null | null | CL_Net/Referential_Game/Number_Set/info.py | MarkFzp/ToM-Pragmatics | 3de1956c36ea40f29a41e4c153c4b8cdc73afc15 | [
"MIT"
] | null | null | null | import numpy as np
import scipy.stats as sp
from concept import Concept
if __name__ == '__main__':
main()
| 30.735294 | 82 | 0.67177 |
b8e0a7c86db8162077913d429a8e44b03bb440ed | 1,695 | py | Python | commands/misc/github.py | typhonshambo/TY-BOT-v3 | eb192d495bf32ae3a56d4a60ec2aa4e1e6a7ef2c | [
"MIT"
] | null | null | null | commands/misc/github.py | typhonshambo/TY-BOT-v3 | eb192d495bf32ae3a56d4a60ec2aa4e1e6a7ef2c | [
"MIT"
] | null | null | null | commands/misc/github.py | typhonshambo/TY-BOT-v3 | eb192d495bf32ae3a56d4a60ec2aa4e1e6a7ef2c | [
"MIT"
] | null | null | null | import aiohttp
import discord
from discord.ext import commands
from discord.commands import Option, slash_command, SlashCommandGroup
import json
with open ('././config/guilds.json', 'r') as f:
data = json.load(f)
guilds = data['guilds']
with open ('././config/api.json', 'r') as f:
ApiData = json.load(f)
githubApi = ApiData['github']
| 23.219178 | 86 | 0.629499 |
b8e177cd51c2b5569754fe0293a60b5835aa4a05 | 1,126 | py | Python | raspbeeryPi/smart-home-hubs/gy30.py | zibuyu1995/Hardware | 8461ebf9b04a603b397d8396ae14b359bd89a8cf | [
"MIT"
] | 2 | 2020-05-20T03:02:01.000Z | 2020-06-14T15:38:31.000Z | raspbeeryPi/smart-home-hubs/gy30.py | zibuyu1995/Hardware | 8461ebf9b04a603b397d8396ae14b359bd89a8cf | [
"MIT"
] | 3 | 2018-08-05T04:38:56.000Z | 2019-11-25T07:02:15.000Z | raspbeeryPi/smart-home-hubs/gy30.py | zibuyu1995/Hardware | 8461ebf9b04a603b397d8396ae14b359bd89a8cf | [
"MIT"
] | 1 | 2020-07-29T03:56:41.000Z | 2020-07-29T03:56:41.000Z | import json
import time
import smbus
from paho.mqtt import client as mqtt
# BH1750FVI config
DEVICE = 0x23 # Default device I2C address
POWER_DOWN = 0x00
POWER_ON = 0x01
RESET = 0x07
CONTINUOUS_LOW_RES_MODE = 0x13
CONTINUOUS_HIGH_RES_MODE_1 = 0x10
CONTINUOUS_HIGH_RES_MODE_2 = 0x11
ONE_TIME_HIGH_RES_MODE_1 = 0x20
ONE_TIME_HIGH_RES_MODE_2 = 0x21
ONE_TIME_LOW_RES_MODE = 0x23
bus = smbus.SMBus(1)
# MQTT Broker config
broker = '127.0.0.1'
port = 1883
topic = 'smartHomeHubs/light'
if __name__ == "__main__":
run()
| 20.851852 | 68 | 0.694494 |