hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f7b7563d85b1f23ad406817127e2c0f401a6930a | 2,817 | py | Python | corehq/apps/app_manager/app_schemas/casedb_schema.py | dimagilg/commcare-hq | ea1786238eae556bb7f1cbd8d2460171af1b619c | [
"BSD-3-Clause"
] | 1 | 2020-07-14T13:00:23.000Z | 2020-07-14T13:00:23.000Z | corehq/apps/app_manager/app_schemas/casedb_schema.py | dimagilg/commcare-hq | ea1786238eae556bb7f1cbd8d2460171af1b619c | [
"BSD-3-Clause"
] | 94 | 2020-12-11T06:57:31.000Z | 2022-03-15T10:24:06.000Z | corehq/apps/app_manager/app_schemas/casedb_schema.py | dimagilg/commcare-hq | ea1786238eae556bb7f1cbd8d2460171af1b619c | [
"BSD-3-Clause"
] | null | null | null | from corehq import toggles
from corehq.apps.app_manager.app_schemas.case_properties import (
ParentCasePropertyBuilder,
get_usercase_properties,
)
from corehq.apps.app_manager.const import USERCASE_TYPE
from corehq.apps.app_manager.util import is_usercase_in_use
from corehq.apps.data_dictionary.util import get_case_property_description_dict
def get_casedb_schema(form):
"""Get case database schema definition for vellum to display as an external data source.
This lists all case types and their properties for the given app.
"""
app = form.get_app()
base_case_type = form.get_module().case_type if form.requires_case() else None
builder = ParentCasePropertyBuilder.for_app(app, ['case_name'], include_parent_properties=False)
related = builder.get_parent_type_map(None)
map = builder.get_properties_by_case_type()
descriptions_dict = get_case_property_description_dict(app.domain)
if base_case_type:
# Generate hierarchy of case types, represented as a list of lists of strings:
# [[base_case_type], [parent_type1, parent_type2...], [grandparent_type1, grandparent_type2...]]
# Vellum case management only supports three levels
generation_names = ['case', 'parent', 'grandparent']
generations = [[] for g in generation_names]
_add_ancestors(base_case_type, 0)
# Remove any duplicate types or empty generations
generations = [set(g) for g in generations if len(g)]
else:
generations = []
subsets = [{
"id": generation_names[i],
"name": "{} ({})".format(generation_names[i], " or ".join(ctypes)) if i > 0 else base_case_type,
"structure": {
p: {"description": descriptions_dict.get(t, {}).get(p, '')}
for t in ctypes for p in map[t]},
"related": {"parent": {
"hashtag": "#case/" + generation_names[i + 1],
"subset": generation_names[i + 1],
"key": "@case_id",
}} if i < len(generations) - 1 else None,
} for i, ctypes in enumerate(generations)]
if is_usercase_in_use(app.domain):
subsets.append({
"id": USERCASE_TYPE,
"name": "user",
"key": "@case_type",
"structure": {p: {} for p in get_usercase_properties(app)[USERCASE_TYPE]},
})
return {
"id": "casedb",
"uri": "jr://instance/casedb",
"name": "case",
"path": "/casedb/case",
"structure": {},
"subsets": subsets,
}
| 39.125 | 104 | 0.635428 |
f7b75acf0297c3ab2601bc579ad2b3528994326d | 28 | py | Python | python/testData/keywordCompletion/noMatchInCondition.py | 06needhamt/intellij-community | 63d7b8030e4fdefeb4760e511e289f7e6b3a5c5b | [
"Apache-2.0"
] | null | null | null | python/testData/keywordCompletion/noMatchInCondition.py | 06needhamt/intellij-community | 63d7b8030e4fdefeb4760e511e289f7e6b3a5c5b | [
"Apache-2.0"
] | null | null | null | python/testData/keywordCompletion/noMatchInCondition.py | 06needhamt/intellij-community | 63d7b8030e4fdefeb4760e511e289f7e6b3a5c5b | [
"Apache-2.0"
] | null | null | null | matches = True
if mat<caret> | 14 | 14 | 0.75 |
f7b8c19ee74b54f26fc920af5e0e656df23e85a5 | 3,597 | py | Python | bookshelf/bookshelf/model_aerospike.py | fakeskimo/as2bt | 0872192e703a2992dea7bee2bf2544727d6094ee | [
"Apache-2.0"
] | null | null | null | bookshelf/bookshelf/model_aerospike.py | fakeskimo/as2bt | 0872192e703a2992dea7bee2bf2544727d6094ee | [
"Apache-2.0"
] | null | null | null | bookshelf/bookshelf/model_aerospike.py | fakeskimo/as2bt | 0872192e703a2992dea7bee2bf2544727d6094ee | [
"Apache-2.0"
] | null | null | null | import math
import aerospike
from aerospike import predicates as p
from aerospike import exception as ex
from flask import current_app
aerospike_host = current_app.config['AEROSPIKE_HOST']
aerospike_port = current_app.config['AEROSPIKE_PORT']
namespace = current_app.config['AEROSPIKE_NAMESPACE']
set_name = current_app.config['AEROSPIKE_SET_NAME']
n_replicas = 1
config = {
'hosts': [
(aerospike_host, aerospike_port)
],
'policies': {
'timeout': 1000 # milliseconds
}
}
client = aerospike.client(config).connect()
# cannot limit the number of rows, only percent
# there is no start offset option
# https://discuss.aerospike.com/t/can-you-limit-the-number-of-returned-records/1330/2
# https://discuss.aerospike.com/t/official-as-approach-to-pagination/2532
# https://stackoverflow.com/questions/25927736/limit-number-of-records-in-aerospike-select-query
# if there is no more record, return -1 as next
# cannot limit the number of rows, only percent
# there is no start offset option
# https://discuss.aerospike.com/t/can-you-limit-the-number-of-returned-records/1330/2
# https://discuss.aerospike.com/t/official-as-approach-to-pagination/2532
# https://stackoverflow.com/questions/25927736/limit-number-of-records-in-aerospike-select-query
# if there is no more record, return -1 as next
| 22.341615 | 96 | 0.626633 |
f7b8e6d755230cb8c58e980bba16ad5edecee7d7 | 1,437 | py | Python | examples/EC2.py | nimRobotics/fnirslib | 0273c0da5f4a41d7cf4dac0fc9686c38f2c7b0cd | [
"MIT"
] | null | null | null | examples/EC2.py | nimRobotics/fnirslib | 0273c0da5f4a41d7cf4dac0fc9686c38f2c7b0cd | [
"MIT"
] | null | null | null | examples/EC2.py | nimRobotics/fnirslib | 0273c0da5f4a41d7cf4dac0fc9686c38f2c7b0cd | [
"MIT"
] | null | null | null | """
author: @nimrobotics
description: calculates the effective connectivity between regions and plots them
"""
import numpy as np
import scipy.io
import glob
import sys
sys.path.append('../utils')
from plots import plotData
dir = "./process3/" #directory of the data
outdir = 'process3/' #directory to save the plots
regions = 3 #number of regions
files = glob.glob(dir+'/*_.mat') # get all the files in the directory
for file in files:
print('Processing condition: ', file)
data = scipy.io.loadmat(file) #load data from the directory
fval = data['fval'] #fval
pval = data['pval'] #pval
sig = data['sig'] #sig
cd = data['cd'] #cd
print('fval shape: ',fval.shape)
print('\nfval \n',fval)
print('pval shape: ',pval.shape)
print('sig shape: ',sig.shape)
print('\nsig \n',sig)
print(cd.shape)
# elementwise multiplication of fval and sig(0/1)
fval_sig = np.multiply(fval, sig)
print(fval_sig.shape)
print('\nfval_sig \n',fval_sig)
# fval_sig = np.mean(fval_sig, axis=2) # average over files
# print(fval_sig.shape)
# fval = np.mean(fval, axis=2)
labels = ['PFC', 'PM-MC', 'VC'] #labels for the regions
condition = file.split('/')[-1].split('.')[0] #get the condition name
plot = plotData(fval_sig, labels, outdir, colormap='viridis', dpi=300, title='EC: '+condition, filename='EC_'+condition +'.png')
plot.matrixPlot()
plot.circularPlot()
| 31.933333 | 133 | 0.659708 |
f7b9749cf050209379cfad2f528020cbb5090d82 | 263 | py | Python | feed/models.py | Lisgevan/DJANGO-101-PROJECT-COPY | 01655b30682efd435d91e85223af0fd6186e6a59 | [
"MIT"
] | null | null | null | feed/models.py | Lisgevan/DJANGO-101-PROJECT-COPY | 01655b30682efd435d91e85223af0fd6186e6a59 | [
"MIT"
] | null | null | null | feed/models.py | Lisgevan/DJANGO-101-PROJECT-COPY | 01655b30682efd435d91e85223af0fd6186e6a59 | [
"MIT"
] | null | null | null | from django.db import models
from sorl.thumbnail import ImageField
# Create your models here. | 26.3 | 68 | 0.726236 |
f7bb92af288264a3c094d6c7636074324c8ab56d | 12,847 | py | Python | gcp/docker/infrastructure/rapids_lib.py | ethem-kinginthenorth/cloud-ml-examples | e434d2bdbf2adf058dc436f992a56585537dc8ab | [
"Apache-2.0"
] | 1 | 2022-03-23T05:10:45.000Z | 2022-03-23T05:10:45.000Z | gcp/docker/infrastructure/rapids_lib.py | ethem-kinginthenorth/cloud-ml-examples | e434d2bdbf2adf058dc436f992a56585537dc8ab | [
"Apache-2.0"
] | null | null | null | gcp/docker/infrastructure/rapids_lib.py | ethem-kinginthenorth/cloud-ml-examples | e434d2bdbf2adf058dc436f992a56585537dc8ab | [
"Apache-2.0"
] | null | null | null | # os
import sys, os, time, logging
# CPU DS stack
import pandas as pd
import numpy as np
import sklearn
# GPU DS stack [ rapids ]
import gcsfs
# scaling library
import dask
# data ingestion [ CPU ]
from pyarrow import orc as pyarrow_orc
# ML models
from sklearn import ensemble
import xgboost
# data set splits
from sklearn.model_selection import train_test_split as sklearn_train_test_split
# device query
##hack
try:
import cudf, cuml
from cuml.preprocessing.model_selection import train_test_split as cuml_train_test_split
import pynvml
import cupy
except:
print("Caught import failures -- probably missing GPU")
# memory query
import psutil
# i/o
import logging, json, pprint
default_sagemaker_paths = {
'base': '/opt/ml',
'code': '/opt/ml/code',
'data': '/opt/ml/input',
'train_data': '/opt/ml/input/data/training',
'hyperparams': '/opt/ml/input/config/hyperparameters.json',
'model': '/opt/ml/model',
'output': '/opt/ml/output',
}
# perf_counter = highest available timer resolution
'''
https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html#sklearn.ensemble.RandomForestClassifier.fit
n_estimators=100,
criterion='gini',
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.0,
max_features='auto',
max_leaf_nodes=None,
min_impurity_decrease=0.0,
min_impurity_split=None,
bootstrap=True,
oob_score=False,
n_jobs=None,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None,
ccp_alpha=0.0,
max_samples=None
'''
| 38.57958 | 138 | 0.599829 |
f7bc4cc67a214b3d1cc41c823e3eb37e1f5d2531 | 5,011 | py | Python | docs/making_widgets_from_scratch/line_clock.py | Rahuum/glooey | 932edca1c8fdd710f1941038e47ac8d25a31a1a8 | [
"MIT"
] | 86 | 2016-11-28T12:34:28.000Z | 2022-03-17T13:49:49.000Z | docs/making_widgets_from_scratch/line_clock.py | Rahuum/glooey | 932edca1c8fdd710f1941038e47ac8d25a31a1a8 | [
"MIT"
] | 57 | 2017-03-07T10:11:52.000Z | 2022-01-16T19:35:33.000Z | docs/making_widgets_from_scratch/line_clock.py | Rahuum/glooey | 932edca1c8fdd710f1941038e47ac8d25a31a1a8 | [
"MIT"
] | 9 | 2017-03-15T18:55:50.000Z | 2022-02-17T14:52:49.000Z | #!/usr/bin/env python3
import pyglet
import glooey
import autoprop
import datetime
from pyglet.gl import *
from vecrec import Vector, Rect
window = pyglet.window.Window()
gui = glooey.Gui(window)
gui.add(LineClock())
pyglet.app.run()
| 28.151685 | 79 | 0.580124 |
f7bd078884fa7f447ad7081c6426bb1a2e21941b | 625 | py | Python | forms_builder/forms/migrations/0004_auto_20180727_1256.py | maqmigh/django-forms-builder | 1a0068d1d07498f4a2e160c46ec85b9a5f2ddd98 | [
"BSD-2-Clause"
] | null | null | null | forms_builder/forms/migrations/0004_auto_20180727_1256.py | maqmigh/django-forms-builder | 1a0068d1d07498f4a2e160c46ec85b9a5f2ddd98 | [
"BSD-2-Clause"
] | null | null | null | forms_builder/forms/migrations/0004_auto_20180727_1256.py | maqmigh/django-forms-builder | 1a0068d1d07498f4a2e160c46ec85b9a5f2ddd98 | [
"BSD-2-Clause"
] | null | null | null | # coding=utf-8
# Generated by Django 2.0.7 on 2018-07-27 10:56
from django.db import migrations, models
| 25 | 90 | 0.5968 |
f7bd2e55648aaa2a1a246e97711c0fc010416b3b | 5,711 | py | Python | scripts/sighan/generate.py | piglaker/SpecialEdition | 172688ef111e1b5c62bdb1ba0a523a2654201b90 | [
"Apache-2.0"
] | 2 | 2022-01-06T07:41:50.000Z | 2022-01-22T14:18:51.000Z | scripts/sighan/generate.py | piglaker/SpecialEdition | 172688ef111e1b5c62bdb1ba0a523a2654201b90 | [
"Apache-2.0"
] | null | null | null | scripts/sighan/generate.py | piglaker/SpecialEdition | 172688ef111e1b5c62bdb1ba0a523a2654201b90 | [
"Apache-2.0"
] | null | null | null | import os
import re
import sys
import json
#upper import
sys.path.append("../../")
from utils import levenshtein
from utils.io import load_json, write_to
def strQ2B(ustring):
""""""
rstring = ""
for uchar in ustring:
inside_code=ord(uchar)
if inside_code == 12288: #
inside_code = 32
elif (inside_code >= 65281 and inside_code <= 65374): #
inside_code -= 65248
rstring += chr(inside_code)
return rstring
def generate(need_preprocess=True):
"""
split raw data(train.json) to preprocessed target
"""
#file = open("../../data/rawdata/ctc2021/train.json", 'r', encoding='utf-8')
data = get_sighan_from_json()
train_source, train_target = json2list(data["train"], need_preprocess)
valid14_source, valid14_target = json2list(data["valid14"], need_preprocess)
valid_source, valid_target = json2list(data["valid"], need_preprocess)
print(train_source[:3], train_target[:3])
print(len(train_source), len(train_target))
print(valid_source[:3], valid_target[:3])
print(len(valid_source), len(valid_target))
need_remove = {}
# cluster all need_remove
for i, sample in enumerate(valid_source):
for j, char in enumerate(sample):
tgt = valid_target[i][j]
if char != tgt:
need_remove[ (char, tgt) ] = 0
for i, sample in enumerate(valid14_source):
for j, char in enumerate(sample):
tgt = valid14_target[i][j]
if char != tgt:
need_remove[ (char, tgt) ] = 0
#remove
remove_count = 0
new_train_source, new_train_target = [], []
for i, sample in enumerate(train_source):
skip = False
for j, char in enumerate(sample):
tgt = train_target[i][j]
if char != tgt:
key = (char, tgt)
if key in need_remove:
skip = True
remove_count += 1
break
if not skip:
new_train_source.append(sample)
new_train_target.append(train_target[i])
print("Total Skip: ", remove_count)
train_source, train_target = new_train_source, new_train_target
#f_src = levenstein.tokenize(source, vocab_file_path="vocab.txt")
train_through = levenshtein.convert_from_sentpair_through(train_source, train_target, train_source)
valid14_through = levenshtein.convert_from_sentpair_through(valid14_source, valid14_target, valid14_source)
valid_through = levenshtein.convert_from_sentpair_through(valid_source, valid_target, valid_source)
#print(train_through[0], valid_through[0])
#output_name = "enchanted"
#output_name = "raw"
output_name = "holy"
write_to("../../data/rawdata/sighan/" + output_name + "/train.src", "\n".join(train_source))
write_to("../../data/rawdata/sighan/"+output_name+"/train.tgt", "\n".join(train_target))
#write_to("../../data/rawdata/sighan/std/train.through", "\n".join(train_through))
write_to("../../data/rawdata/sighan/"+output_name+"/valid14.src", "\n".join(valid14_source))
write_to("../../data/rawdata/sighan/"+output_name+"/valid14.tgt", "\n".join(valid14_target))
#write_to("../../data/rawdata/sighan/std/valid14.through", "\n".join(valid14_through))
write_to("../../data/rawdata/sighan/"+output_name+"/test.src", "\n".join(valid_source))
write_to("../../data/rawdata/sighan/"+output_name+"/test.tgt", "\n".join(valid_target))
#write_to("../../data/rawdata/sighan/std/test.through", "\n".join(valid_through))
write_to("../../data/rawdata/sighan/"+output_name+"/valid.src", "\n".join(valid_source))
write_to("../../data/rawdata/sighan/"+output_name+"/valid.tgt", "\n".join(valid_target))
#write_to("../../data/rawdata/sighan/std/valid.through", "\n".join(valid_through[:500]))
if __name__ == "__main__":
generate()
| 33.994048 | 179 | 0.629487 |
f7bde64d861ea84f6a0483cdddf17127e95c800d | 67 | py | Python | keras_retinanet/backend/__init__.py | mj-haghighi/keras-retinanet | 644c2f8da799889a2a3f6cc833478256cbe32c23 | [
"Apache-2.0"
] | null | null | null | keras_retinanet/backend/__init__.py | mj-haghighi/keras-retinanet | 644c2f8da799889a2a3f6cc833478256cbe32c23 | [
"Apache-2.0"
] | null | null | null | keras_retinanet/backend/__init__.py | mj-haghighi/keras-retinanet | 644c2f8da799889a2a3f6cc833478256cbe32c23 | [
"Apache-2.0"
] | null | null | null | # from .backend import * # noqa: F401,F403
from .sbackend import * | 33.5 | 43 | 0.701493 |
f7bf187ba4675f05a89f42e9783052fe7bcd13c5 | 647 | py | Python | docs/_docs/bash/az3166_patch_binary.py | skolbin-ssi/azure-iot-developer-kit | 24035c8870e9c342d055bcd586529441078af0a0 | [
"MIT"
] | 43 | 2017-10-03T23:03:23.000Z | 2019-04-27T18:57:16.000Z | docs/_docs/bash/az3166_patch_binary.py | skolbin-ssi/azure-iot-developer-kit | 24035c8870e9c342d055bcd586529441078af0a0 | [
"MIT"
] | 114 | 2017-09-20T02:51:28.000Z | 2019-05-06T06:13:14.000Z | docs/_docs/bash/az3166_patch_binary.py | skolbin-ssi/azure-iot-developer-kit | 24035c8870e9c342d055bcd586529441078af0a0 | [
"MIT"
] | 48 | 2017-09-19T08:18:52.000Z | 2019-04-19T11:44:32.000Z | # ----------------------------------------------------------------------------
# Copyright (C) Microsoft. All rights reserved.
# Licensed under the MIT license.
# ----------------------------------------------------------------------------
import os
import binascii
import struct
import shutil
import inspect
import sys
if __name__ == '__main__':
binary_hook(sys.argv[1], sys.argv[2]) | 29.409091 | 78 | 0.482226 |
f7bfccc428289385cc22ed6c618de770f292647a | 590 | py | Python | setup.py | FireXStuff/firex-bundle-ci | 05ef1d9017b3553e8f4249da9a96e313f0ad7047 | [
"BSD-3-Clause"
] | 1 | 2021-01-08T19:50:33.000Z | 2021-01-08T19:50:33.000Z | setup.py | FireXStuff/firex-bundle-ci | 05ef1d9017b3553e8f4249da9a96e313f0ad7047 | [
"BSD-3-Clause"
] | null | null | null | setup.py | FireXStuff/firex-bundle-ci | 05ef1d9017b3553e8f4249da9a96e313f0ad7047 | [
"BSD-3-Clause"
] | null | null | null | import versioneer
from setuptools import setup
setup(name='firex-bundle-ci',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
description='FireX CI services',
url='https://github.com/FireXStuff/firex-bundle-ci.git',
author='Core FireX Team',
author_email='firex-dev@gmail.com',
license='BSD-3-Clause',
packages=['firex_bundle_ci'],
zip_safe=True,
install_requires=[
"firexapp",
"firex-keeper",
"lxml",
"xunitmerge",
"unittest-xml-reporting"
],
)
| 26.818182 | 62 | 0.60678 |
f7c03e8c3283127463ae5c11c8faf6e12bf38615 | 1,951 | py | Python | meta_middleware/meta_middleware/middleware.py | kevin-wyx/ProxyFS | 76d9478c9e87c18950f2e4659b397a397fb1ac69 | [
"Apache-2.0"
] | null | null | null | meta_middleware/meta_middleware/middleware.py | kevin-wyx/ProxyFS | 76d9478c9e87c18950f2e4659b397a397fb1ac69 | [
"Apache-2.0"
] | null | null | null | meta_middleware/meta_middleware/middleware.py | kevin-wyx/ProxyFS | 76d9478c9e87c18950f2e4659b397a397fb1ac69 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2016 SwiftStack, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 34.22807 | 112 | 0.633521 |
f7c20796c689531f3a41f3738826f84aead341b4 | 1,397 | py | Python | distpy/util/__init__.py | CU-NESS/distpy | 279ba7e46726a85246566401fca19b8739d18d08 | [
"Apache-2.0"
] | null | null | null | distpy/util/__init__.py | CU-NESS/distpy | 279ba7e46726a85246566401fca19b8739d18d08 | [
"Apache-2.0"
] | null | null | null | distpy/util/__init__.py | CU-NESS/distpy | 279ba7e46726a85246566401fca19b8739d18d08 | [
"Apache-2.0"
] | null | null | null | """
Introduces utilities used throughout the package, including:
- interfaces for making objects `distpy.util.Savable.Savable` and
`distpy.util.Loadable.Loadable` in binary hdf5 files using h5py
- helper methods for using h5py to save and load variables and arrays
(`h5py_extensions`)
- type category definitions (`distpy.util.TypeCategories`)
- functions for making univariate histograms, bivariate histograms, and
triangle plots (`distpy.util.TrianglePlot`)
- a class that uses strings to represent an `distpy.util.Expression.Expression`
that can be modified and have arguments passed to it before being evaluated
- a class that represents
**File**: $DISTPY/distpy/util/\\_\\_init\\_\\_.py
**Author**: Keith Tauscher
**Date**: 14 May 2021
"""
from distpy.util.Savable import Savable
from distpy.util.Loadable import Loadable
from distpy.util.TypeCategories import bool_types, int_types, float_types,\
real_numerical_types, complex_numerical_types, numerical_types,\
sequence_types
from distpy.util.h5py_extensions import create_hdf5_dataset, get_hdf5_value,\
HDF5Link, save_dictionary, load_dictionary
from distpy.util.TrianglePlot import univariate_histogram,\
confidence_contour_2D, bivariate_histogram, triangle_plot
from distpy.util.Expression import Expression
from distpy.util.SparseSquareBlockDiagonalMatrix import\
SparseSquareBlockDiagonalMatrix
| 43.65625 | 79 | 0.800286 |
f7c31602d3ba09f1a3970f8ce071305eb086135d | 74 | py | Python | Crypto-hardRSA/flag.py | JSW2020/hsctf-2019-freshmen | 5282d6d51153aadd62f42673aa3d487f8d7ef45b | [
"MIT"
] | 16 | 2019-12-09T15:53:08.000Z | 2021-12-07T00:34:30.000Z | Crypto-hardRSA/flag.py | JSW2020/hsctf-2019-freshmen | 5282d6d51153aadd62f42673aa3d487f8d7ef45b | [
"MIT"
] | null | null | null | Crypto-hardRSA/flag.py | JSW2020/hsctf-2019-freshmen | 5282d6d51153aadd62f42673aa3d487f8d7ef45b | [
"MIT"
] | 7 | 2019-12-09T11:53:52.000Z | 2021-11-14T04:09:04.000Z | flag = "flag{b3453333-9da9-49ae-b4ed-0017c392d58e}"
e1 = 65537
e2 = 368273 | 24.666667 | 51 | 0.743243 |
f7c417316d84349935d37272663f36b5a52c49ff | 1,165 | py | Python | drogher/package/fedex.py | thisisnotmyuserid/drogher | f8ea5e34dad6a2e9f22608b4ae4a6f7032133e45 | [
"BSD-3-Clause"
] | 13 | 2017-04-24T07:49:30.000Z | 2020-09-22T13:13:13.000Z | drogher/package/fedex.py | thisisnotmyuserid/drogher | f8ea5e34dad6a2e9f22608b4ae4a6f7032133e45 | [
"BSD-3-Clause"
] | null | null | null | drogher/package/fedex.py | thisisnotmyuserid/drogher | f8ea5e34dad6a2e9f22608b4ae4a6f7032133e45 | [
"BSD-3-Clause"
] | 4 | 2018-09-08T05:31:57.000Z | 2022-02-10T17:42:31.000Z | import itertools
from .base import Package
| 25.326087 | 80 | 0.574249 |
f7c4b93a5f9fe2cd51baa68e74a1491e4f04cbf5 | 1,535 | py | Python | nipy/labs/spatial_models/tests/test_bsa_io.py | arokem/nipy | d6b2e862c65558bb5747c36140fd6261a7e1ecfe | [
"BSD-3-Clause"
] | null | null | null | nipy/labs/spatial_models/tests/test_bsa_io.py | arokem/nipy | d6b2e862c65558bb5747c36140fd6261a7e1ecfe | [
"BSD-3-Clause"
] | null | null | null | nipy/labs/spatial_models/tests/test_bsa_io.py | arokem/nipy | d6b2e862c65558bb5747c36140fd6261a7e1ecfe | [
"BSD-3-Clause"
] | null | null | null | from __future__ import with_statement
from nose.tools import assert_true
from os.path import exists
import numpy as np
from nibabel import Nifti1Image
from numpy.testing import assert_equal
from ...utils.simul_multisubject_fmri_dataset import surrogate_3d_dataset
from ..bsa_io import make_bsa_image
from nibabel.tmpdirs import InTemporaryDirectory
def test_parcel_intra_from_3d_images_list():
"""Test that a parcellation is generated, starting from a list of 3D images
"""
# Generate an image
shape = (5, 5, 5)
contrast_id = 'plop'
mask_image = Nifti1Image(np.ones(shape), np.eye(4))
#mask_images = [mask_image for _ in range(5)]
with InTemporaryDirectory() as dir_context:
data_image = ['image_%d.nii' % i for i in range(5)]
for datim in data_image:
surrogate_3d_dataset(mask=mask_image, out_image_file=datim)
#run the algo
landmark, hrois = make_bsa_image(
mask_image, data_image, threshold=10., smin=0, sigma=1.,
prevalence_threshold=0, prevalence_pval=0.5, write_dir=dir_context,
algorithm='density', contrast_id=contrast_id)
assert_equal(landmark, None)
assert_equal(len(hrois), 5)
assert_true(exists('density_%s.nii' % contrast_id))
assert_true(exists('prevalence_%s.nii' % contrast_id))
assert_true(exists('AR_%s.nii' % contrast_id))
assert_true(exists('CR_%s.nii' % contrast_id))
if __name__ == "__main__":
import nose
nose.run(argv=['', __file__])
| 34.111111 | 79 | 0.699674 |
f7c5189c4c9985714dd619cfadbc0baf92efab39 | 5,099 | py | Python | MFSDA/MFSDA_run.py | bpaniagua/MFSDA_Python | d7e439fe670d5e2731c9ec722919a74f67b01e30 | [
"Apache-2.0"
] | 3 | 2020-08-10T08:57:36.000Z | 2021-04-04T01:12:50.000Z | MFSDA/MFSDA_run.py | bpaniagua/MFSDA_Python | d7e439fe670d5e2731c9ec722919a74f67b01e30 | [
"Apache-2.0"
] | 17 | 2018-08-03T14:25:52.000Z | 2022-02-06T18:19:39.000Z | MFSDA/MFSDA_run.py | bpaniagua/MFSDA_Python | d7e439fe670d5e2731c9ec722919a74f67b01e30 | [
"Apache-2.0"
] | 13 | 2017-11-14T17:22:32.000Z | 2020-12-10T16:55:58.000Z | #!/usr/bin/env python-real
# -*- coding: utf-8 -*-
"""
Run script: multivariate functional shape data analysis (MFSDA).
Author: Chao Huang (chaohuang.stat@gmail.com)
Last update: 2017-08-14
"""
import sys,os
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)),os.path.join('Resources','Libraries')))
import numpy as np
from scipy import stats
from statsmodels.sandbox.stats.multicomp import fdrcorrection0
from stat_read_x import read_x
from stat_lpks import lpks
from stat_sif import sif
from stat_wald_ht import wald_ht
from stat_bstrp_pvalue import bstrp_pvalue
import MFSDA_stat as mfsda
import timeit
import vtk
import argparse
import os
import json
"""installed all the libraries above"""
def run_script(args):
"""
Run the commandline script for MFSDA.
"""
"""+++++++++++++++++++++++++++++++++++"""
"""Step 1. load dataset """
print("loading data ......")
print("+++++++Read the surface shape data+++++++")
fh = open(args.shapeData, 'rU')
y_design = []
nshape = 0
numpoints = -1
header = fh.readline()
toks = header.split(sep=',')
covs_tmp = []
for line in fh.readlines():
toks = line.strip().split(sep=',')
# Read VTK file
vtkfilename = toks[0].rstrip()
print("Reading {}".format(vtkfilename))
reader = vtk.vtkPolyDataReader()
reader.SetFileName(vtkfilename)
reader.Update()
shapedata = reader.GetOutput()
shapedatapoints = shapedata.GetPoints()
y_design.append([])
if numpoints == -1:
numpoints = shapedatapoints.GetNumberOfPoints()
if numpoints != shapedatapoints.GetNumberOfPoints():
print("WARNING! The number of points is not the same for the shape:", vtkfilename)
for i in range(shapedatapoints.GetNumberOfPoints()):
p = shapedatapoints.GetPoint(i)
y_design[nshape].append(p)
nshape += 1
# Build covariate matrix
covs_tmp.append(toks[1:])
y_design = np.array(y_design)
y_design.reshape(nshape, numpoints, 3)
y_design = np.array(y_design)
y_design.reshape(nshape, numpoints, 3)
print("The dimension of shape matrix is " + str(y_design.shape))
print("+++++++Read the sphere coordinate data+++++++")
print("Reading", args.coordData)
reader = vtk.vtkPolyDataReader()
reader.SetFileName(args.coordData)
reader.Update()
coordData = reader.GetOutput()
shapedatapoints = coordData.GetPoints()
if numpoints != shapedatapoints.GetNumberOfPoints():
print("WARNING! The template does not have the same number of points as the shapes")
coord_mat = []
for i in range(shapedatapoints.GetNumberOfPoints()):
p = shapedatapoints.GetPoint(i)
coord_mat.append(p)
coord_mat = np.array(coord_mat)
# Set up design matrix
design_data = np.array(covs_tmp,dtype=float)
# read the covariate type
var_type = getCovariateType(design_data)
"""+++++++++++++++++++++++++++++++++++"""
"""Step 2. Statistical analysis: including (1) smoothing and (2) hypothesis testing"""
gpvals, lpvals_fdr, clu_pvals, efit_beta, efity_design, efit_eta = mfsda.run_stats(y_design, coord_mat, design_data, var_type)
"""+++++++++++++++++++++++++++++++++++"""
"""Step3. Save all the results"""
if not os.path.exists(args.outputDir):
os.makedirs(args.outputDir)
pvalues = {}
pvalues['Gpvals'] = gpvals.tolist()
pvalues['clu_pvals'] = clu_pvals.tolist()
pvalues['Lpvals_fdr'] = lpvals_fdr.tolist()
with open(os.path.join(args.outputDir,'pvalues.json'), 'w') as outfile:
json.dump(pvalues, outfile)
efit = {}
efit['efitBetas'] = efit_beta.tolist()
efit['efitYdesign'] = efity_design.tolist()
efit['efitEtas'] = efit_eta.tolist()
with open(os.path.join(args.outputDir,'efit.json'), 'w') as outfile:
json.dump(efit, outfile)
if __name__ == '__main__':
main()
| 29.818713 | 130 | 0.642871 |
f7c72117e015e7f0761f5162d10f3d3cf0ddb74f | 1,671 | py | Python | modules/mongodb_atlas/mongodb_atlas.py | riddopic/opta | 25fa6435fdc7e2ea9c7963ed74100fffb0743063 | [
"Apache-2.0"
] | 595 | 2021-05-21T22:30:48.000Z | 2022-03-31T15:40:25.000Z | modules/mongodb_atlas/mongodb_atlas.py | riddopic/opta | 25fa6435fdc7e2ea9c7963ed74100fffb0743063 | [
"Apache-2.0"
] | 463 | 2021-05-24T21:32:59.000Z | 2022-03-31T17:12:33.000Z | modules/mongodb_atlas/mongodb_atlas.py | riddopic/opta | 25fa6435fdc7e2ea9c7963ed74100fffb0743063 | [
"Apache-2.0"
] | 29 | 2021-05-21T22:27:52.000Z | 2022-03-28T16:43:45.000Z | import os
from typing import TYPE_CHECKING
from modules.base import ModuleProcessor
from opta.core.terraform import get_terraform_outputs
from opta.exceptions import UserErrors
if TYPE_CHECKING:
from opta.layer import Layer
from opta.module import Module
| 37.133333 | 98 | 0.663076 |
f7c92906bdd05fb9011ed12eacbe0ac0a33b671e | 502 | py | Python | python/tests/testdata/region_HU.py | kevin-brown/python-phonenumbers | e4ae191e6fae47581eb40d3d23c7e2b7d422c326 | [
"Apache-2.0"
] | 1 | 2019-08-06T03:19:28.000Z | 2019-08-06T03:19:28.000Z | python/tests/testdata/region_HU.py | kevin-brown/python-phonenumbers | e4ae191e6fae47581eb40d3d23c7e2b7d422c326 | [
"Apache-2.0"
] | null | null | null | python/tests/testdata/region_HU.py | kevin-brown/python-phonenumbers | e4ae191e6fae47581eb40d3d23c7e2b7d422c326 | [
"Apache-2.0"
] | 2 | 2018-02-09T13:52:15.000Z | 2019-09-10T08:36:25.000Z | """Auto-generated file, do not edit by hand. HU metadata"""
from phonenumbers.phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_HU = PhoneMetadata(id='HU', country_code=36, international_prefix='00',
general_desc=PhoneNumberDesc(national_number_pattern='30\\d{7}', possible_length=(9,)),
mobile=PhoneNumberDesc(national_number_pattern='30\\d{7}', example_number='301234567', possible_length=(9,)),
national_prefix='06',
national_prefix_for_parsing='06')
| 55.777778 | 113 | 0.776892 |
f7c994df8beeb9e54af1a6918047db78eb8494b2 | 1,389 | py | Python | lambdas/budget-handler/lambda_handler.py | weAllWeGot/personal_financial_engine | 37c89e49aa68d6db48c10d6663135f4992a72171 | [
"Apache-2.0"
] | 2 | 2018-08-18T16:41:43.000Z | 2020-12-20T21:29:49.000Z | lambdas/budget-handler/lambda_handler.py | weallwegot/personal_financial_engine | 37c89e49aa68d6db48c10d6663135f4992a72171 | [
"Apache-2.0"
] | 12 | 2018-07-25T16:56:48.000Z | 2019-10-22T01:16:23.000Z | lambdas/budget-handler/lambda_handler.py | weAllWeGot/personal_financial_engine | 37c89e49aa68d6db48c10d6663135f4992a72171 | [
"Apache-2.0"
] | 4 | 2018-12-07T23:50:12.000Z | 2021-04-16T20:49:08.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import boto3
import csv
import json
import logging
from budget_retrieval import get_budget
from budget_placement import place_budget
def lambda_handler(event: dict, context: dict) -> dict:
'''Demonstrates a simple HTTP endpoint using API Gateway. You have full
access to the request and response payload, including headers and
status code.
'''
path = event['path']
user_uid = event['requestContext']['authorizer']['claims']['sub']
body = json.loads(event['body'])
path = '/retrieve' if body['RetrieveOrPlace'].endswith('retrieve') else '/place'
entity = 'budget' if body['Entity'].endswith('budget') else 'account'
print(path)
if path.endswith('/retrieve'):
response = get_budget(user_uid, entity)
elif path.endswith('/place'):
response = place_budget(user_uid, body, entity)
return respond(err=None, res=response)
# with open('event.json') as f:
# e = json.load(f)
# lambda_handler(e, {})
| 28.346939 | 84 | 0.645068 |
f7ca0211e8a92052407acbaa028f0ad46e74b5f9 | 1,451 | py | Python | src/documenteer/stackdocs/doxygentag.py | lsst-sqre/sphinxkit | a9475d0722b0f6f89fd1c4c54eafad0564667b0b | [
"MIT"
] | 3 | 2019-04-18T02:47:06.000Z | 2021-11-09T03:49:12.000Z | src/documenteer/stackdocs/doxygentag.py | lsst-sqre/sphinxkit | a9475d0722b0f6f89fd1c4c54eafad0564667b0b | [
"MIT"
] | 29 | 2016-12-15T01:02:05.000Z | 2022-03-07T12:06:40.000Z | src/documenteer/stackdocs/doxygentag.py | lsst-sqre/sphinxkit | a9475d0722b0f6f89fd1c4c54eafad0564667b0b | [
"MIT"
] | 2 | 2016-09-12T17:44:06.000Z | 2016-12-15T00:37:05.000Z | """Utilities for working with Doxygen tag files.
"""
__all__ = ["get_tag_entity_names"]
import xml.etree.ElementTree as ET
from pathlib import Path
from typing import List, Optional, Sequence, Union
try:
from sphinxcontrib.doxylink import doxylink
except ImportError:
print(
"sphinxcontrib.doxylink is missing. Install documenteer with the "
"pipelines extra:\n\n pip install documenteer[pipelines]"
)
def get_tag_entity_names(
tag_path: Union[str, Path], kinds: Optional[Sequence[str]] = None
) -> List[str]:
"""Get the list of API names in a Doxygen tag file.
Parameters
----------
tag_path : `str` or `~pathlib.Path`
File path of the Doxygen tag file.
kinds : sequence of `str`, optional
If provided, a sequence of API kinds to include in the listing.
Doxygen types are:
- namespace
- struct
- class
- file
- define
- group
- variable
- typedef
- enumeration
- function
Returns
-------
names : `list` of `str`
List of API names.
"""
doc = ET.parse(str(tag_path))
symbol_map = doxylink.SymbolMap(doc)
keys = []
for key in symbol_map._mapping.keys():
entry = symbol_map[key]
if kinds:
if entry.kind in kinds:
keys.append(key)
else:
keys.append(key)
keys.sort()
return keys
| 24.183333 | 74 | 0.598208 |
f7cadf89eeb52e1e8b7bf3ad6d819d4964e7f62f | 1,263 | py | Python | src/gamesbyexample/shellgame.py | skinzor/PythonStdioGames | 75f27af19d7f1d555b0fd85fbcf215f07660b93f | [
"MIT"
] | 1 | 2019-11-30T17:04:09.000Z | 2019-11-30T17:04:09.000Z | src/gamesbyexample/shellgame.py | skinzor/PythonStdioGames | 75f27af19d7f1d555b0fd85fbcf215f07660b93f | [
"MIT"
] | null | null | null | src/gamesbyexample/shellgame.py | skinzor/PythonStdioGames | 75f27af19d7f1d555b0fd85fbcf215f07660b93f | [
"MIT"
] | null | null | null | # Shell Game, by Al Sweigart al@inventwithpython.com
# A random gambling game.
import random, time, sys
print('''SHELL GAME
By Al Sweigart al@inventwithpython.com
Try to find the diamond!
Press Enter to continue...''')
input()
CUPS = ['diamond', 'pocket lint', 'nothing']
while True:
print()
print('Shuffling the cups', end='')
random.shuffle(CUPS) # This happens instantly.
# We add fake pauses to make it seem more interesting:
time.sleep(0.3)
print('.', end='')
time.sleep(0.3)
print('.', end='')
time.sleep(0.3)
print('.', end='')
time.sleep(0.3)
print()
while True:
print('Okay! Pick a cup 1-{}'.format(len(CUPS)))
pickedCup = input()
if pickedCup.isdecimal() and 1 <= int(pickedCup) <= len(CUPS):
break
print('Type a number between 1 and {}.'.format(len(CUPS)))
print()
if CUPS[int(pickedCup) - 1] == 'diamond':
print('You found the cup with the diamond!')
else:
print('Nope! You picked the cup that had {} in it.'.format(CUPS[int(pickedCup) - 1]))
print('Would you like to play again? Y/N')
response = input().upper()
if not response.startswith('Y'):
print('Thanks for playing!')
sys.exit()
| 26.3125 | 93 | 0.599367 |
f7cbba72cbee5b92ee9bed0dc914113ae1d6f2e4 | 1,242 | py | Python | main.py | mathew4STAR/GPT-3_based_AI | 7c5ffcd26ebbd64ee1f6fa02ec4a8529c795b809 | [
"MIT"
] | null | null | null | main.py | mathew4STAR/GPT-3_based_AI | 7c5ffcd26ebbd64ee1f6fa02ec4a8529c795b809 | [
"MIT"
] | null | null | null | main.py | mathew4STAR/GPT-3_based_AI | 7c5ffcd26ebbd64ee1f6fa02ec4a8529c795b809 | [
"MIT"
] | null | null | null | import pyttsx3
import speech_recognition as sr
import openai as op
import os
op.api_key = os.getenv("OPENAI_API_KEY")
engine = pyttsx3.init()
engine.setProperty('rate', 150)
engine.setProperty('volume', 1.0)
voices = engine.getProperty('voices')
engine.setProperty('voice', voices[1].id)
while True:
query = takecommand()
response = op.Completion.create(
engine="text-davinci-001",
prompt="The following is a conversation with an AI friend. The friend is helpful, creative, clever, and very friendly.\n\nHuman: " + query + "\nAI: ",
temperature=0.9,
max_tokens=150,
top_p=1,
frequency_penalty=0,
presence_penalty=0.6,
)
presponse= response["choices"][0]["text"]
print(presponse)
tell(presponse) | 24.84 | 154 | 0.638486 |
f7cdafc3fcc754a52e3ada458ff7a926e8981f1d | 71,088 | py | Python | sdk/python/pulumi_azure_native/compute/v20200930/_inputs.py | polivbr/pulumi-azure-native | 09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/compute/v20200930/_inputs.py | polivbr/pulumi-azure-native | 09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/compute/v20200930/_inputs.py | polivbr/pulumi-azure-native | 09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from ._enums import *
__all__ = [
'CreationDataArgs',
'DataDiskImageEncryptionArgs',
'DisallowedArgs',
'DiskSkuArgs',
'EncryptionImagesArgs',
'EncryptionSetIdentityArgs',
'EncryptionSettingsCollectionArgs',
'EncryptionSettingsElementArgs',
'EncryptionArgs',
'ExtendedLocationArgs',
'GalleryApplicationVersionPublishingProfileArgs',
'GalleryArtifactVersionSourceArgs',
'GalleryDataDiskImageArgs',
'GalleryImageFeatureArgs',
'GalleryImageIdentifierArgs',
'GalleryImageVersionPublishingProfileArgs',
'GalleryImageVersionStorageProfileArgs',
'GalleryOSDiskImageArgs',
'ImageDiskReferenceArgs',
'ImagePurchasePlanArgs',
'KeyForDiskEncryptionSetArgs',
'KeyVaultAndKeyReferenceArgs',
'KeyVaultAndSecretReferenceArgs',
'OSDiskImageEncryptionArgs',
'PrivateLinkServiceConnectionStateArgs',
'PurchasePlanArgs',
'RecommendedMachineConfigurationArgs',
'ResourceRangeArgs',
'SharingProfileArgs',
'SnapshotSkuArgs',
'SourceVaultArgs',
'TargetRegionArgs',
'UserArtifactManageArgs',
'UserArtifactSourceArgs',
]
| 42.138708 | 389 | 0.668763 |
f7cdc28f8dbf0a5fa40122f9a836204bf7e9435a | 500 | py | Python | bgp_adjacencies/BGP_check_job.py | KamyarZiabari/solutions_examples | 3dfa80d276ab13d1e489142a3fcbe2bd8ab0eba2 | [
"Apache-2.0"
] | 59 | 2019-03-08T15:08:14.000Z | 2021-12-23T15:59:03.000Z | bgp_adjacencies/BGP_check_job.py | CiscoTestAutomation/genie_solutions | 69c96f57dce466bcd767bd1ea6326aaf6a63fbcf | [
"Apache-2.0"
] | 8 | 2019-04-05T04:29:17.000Z | 2021-04-12T15:37:51.000Z | bgp_adjacencies/BGP_check_job.py | CiscoTestAutomation/genie_solutions | 69c96f57dce466bcd767bd1ea6326aaf6a63fbcf | [
"Apache-2.0"
] | 37 | 2019-03-15T21:35:38.000Z | 2022-03-22T01:49:59.000Z | # To run the job:
# pyats run job BGP_check_job.py --testbed-file <testbed_file.yaml>
# Description: This job file checks that all BGP neighbors are in Established state
import os
# All run() must be inside a main function
| 38.461538 | 83 | 0.708 |
f7cddf9b0d9e1e72530d863ce9c077212cea7e97 | 858 | py | Python | tvae/utils/logging.py | ReallyAnonNeurips2021/TopographicVAE | 97ba47c039f7eab05ce9e17c3faea0a6ec86f1eb | [
"MIT"
] | 57 | 2021-09-02T13:20:43.000Z | 2022-03-17T18:35:55.000Z | tvae/utils/logging.py | ReallyAnonNeurips2021/TopographicVAE | 97ba47c039f7eab05ce9e17c3faea0a6ec86f1eb | [
"MIT"
] | 2 | 2021-09-07T13:06:40.000Z | 2022-03-04T11:54:22.000Z | tvae/utils/logging.py | ReallyAnonNeurips2021/TopographicVAE | 97ba47c039f7eab05ce9e17c3faea0a6ec86f1eb | [
"MIT"
] | 8 | 2021-09-07T14:48:25.000Z | 2022-03-12T05:44:32.000Z | import os
| 24.514286 | 71 | 0.56993 |
f7cde5f2b92aa7e388bad877341add7fc6bed0cb | 521 | py | Python | create_lesson_plan/admin.py | rishabhranawat/CrowdPlatform | 1de2ad7e70fbf6cbf2e29bc9368341134b4f7e0d | [
"MIT"
] | 1 | 2020-07-23T21:35:40.000Z | 2020-07-23T21:35:40.000Z | create_lesson_plan/admin.py | rishabhranawat/CrowdPlatform | 1de2ad7e70fbf6cbf2e29bc9368341134b4f7e0d | [
"MIT"
] | 9 | 2021-02-08T20:32:35.000Z | 2022-03-02T14:58:07.000Z | create_lesson_plan/admin.py | rishabhranawat/CrowdPlatform | 1de2ad7e70fbf6cbf2e29bc9368341134b4f7e0d | [
"MIT"
] | null | null | null | from django.contrib import admin
from create_lesson_plan.models import *
admin.site.register(lesson)
admin.site.register(lesson_plan)
admin.site.register(Engage_Urls)
admin.site.register(Explain_Urls)
admin.site.register(Evaluate_Urls)
admin.site.register(MCQ)
admin.site.register(FITB)
admin.site.register(Engage_Images)
admin.site.register(Explain_Images)
admin.site.register(Evaluate_Images)
admin.site.register(Document)
admin.site.register(Image)
admin.site.register(TestScore)
admin.site.register(OfflineDocument)
| 28.944444 | 39 | 0.84261 |
f7ce40df7d33d5f39e5868a59d46a085bed7cd64 | 3,408 | py | Python | src/models/modules/visual_bert_classifier.py | inzva/emotion-recognition-drawings | 56435f42d76c10c10fa58149ccbcc8d05efccdc0 | [
"MIT"
] | 10 | 2021-11-20T19:01:08.000Z | 2022-01-16T09:06:12.000Z | src/models/modules/visual_bert_classifier.py | inzva/emotion-recognition-drawings | 56435f42d76c10c10fa58149ccbcc8d05efccdc0 | [
"MIT"
] | 2 | 2021-12-11T12:28:03.000Z | 2021-12-13T21:09:53.000Z | src/models/modules/visual_bert_classifier.py | inzva/emotion-recognition-drawings | 56435f42d76c10c10fa58149ccbcc8d05efccdc0 | [
"MIT"
] | null | null | null | import torch
from torch import nn
from transformers import BertTokenizer, VisualBertModel, VisualBertConfig
import numpy as np
if __name__ == '__main__':
bert_text_tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
inputs = bert_text_tokenizer("What is the man eating?", return_tensors="pt")
text_input_ids = inputs.data['input_ids'].to('cuda')
text_token_type_ids = inputs.data['token_type_ids'].to('cuda')
text_attention_mask = inputs.data['attention_mask'].to('cuda')
sample_face_body_embedding_path = "/home/gsoykan20/Desktop/self_development/emotion-recognition-drawings/data/emoreccom_face_body_embeddings_96d/train/0_3_4.jpg.npy"
sample_face_body_embedding = np.load(sample_face_body_embedding_path)
visual_embeds = torch.from_numpy(sample_face_body_embedding)
visual_embeds = visual_embeds.to('cuda')
visual_embeds = torch.unsqueeze(visual_embeds, 0)
visual_token_type_ids = torch.ones(visual_embeds.shape[:-1], dtype=torch.long).to('cuda')
visual_attention_mask = torch.ones(visual_embeds.shape[:-1], dtype=torch.float).to('cuda')
classifier = VisualBertClassifier()
classifier.to('cuda')
classifier.forward(text_input_ids,
text_token_type_ids,
text_attention_mask,
visual_embeds,
visual_token_type_ids,
visual_attention_mask)
| 46.054054 | 169 | 0.667254 |
f7cfecaa2797756809c5e754e4b6bf4f05823087 | 1,006 | py | Python | narrative2vec/logging_instance/pose.py | code-iai/narrative2vec | 948071d09838ea41ee9749325af6804427a060d2 | [
"MIT"
] | null | null | null | narrative2vec/logging_instance/pose.py | code-iai/narrative2vec | 948071d09838ea41ee9749325af6804427a060d2 | [
"MIT"
] | null | null | null | narrative2vec/logging_instance/pose.py | code-iai/narrative2vec | 948071d09838ea41ee9749325af6804427a060d2 | [
"MIT"
] | null | null | null | from narrative2vec.logging_instance.logging_instance import LoggingInstance, _get_first_rdf_query_result
from narrative2vec.logging_instance.reasoning_task import ReasoningTask
from narrative2vec.ontology.neemNarrativeDefinitions import QUATERNION
from narrative2vec.ontology.ontologyHandler import get_knowrob_uri
| 43.73913 | 104 | 0.781312 |
f7d0423ade6b86198698a9b5f2ef5a03964e0231 | 288 | py | Python | kobra/settings/development.py | karservice/kobra | 2019fd3be499c06d2527e80576fd6ff03d8fe151 | [
"MIT"
] | 4 | 2016-08-28T16:00:20.000Z | 2018-01-31T18:22:43.000Z | kobra/settings/development.py | karservice/kobra | 2019fd3be499c06d2527e80576fd6ff03d8fe151 | [
"MIT"
] | 25 | 2016-08-15T20:57:59.000Z | 2022-02-10T18:14:48.000Z | kobra/settings/development.py | karservice/kobra | 2019fd3be499c06d2527e80576fd6ff03d8fe151 | [
"MIT"
] | 1 | 2017-02-06T17:13:16.000Z | 2017-02-06T17:13:16.000Z | # -*- coding: utf-8 -*-
from . import *
SECRET_KEY = env.str('KOBRA_SECRET_KEY',
'Unsafe_development_key._Never_use_in_production.')
DEBUG = env.bool('KOBRA_DEBUG_MODE', True)
DATABASES = {
'default': env.db_url('KOBRA_DATABASE_URL', 'sqlite:///db.sqlite3')
}
| 24 | 72 | 0.652778 |
f7d06f7dd5791848e16c5019b980180600add19a | 4,153 | py | Python | foobot_grapher.py | jpwright/foobot-slack | ffc1cf8490d08433d76bb62cbf7440c765089784 | [
"MIT"
] | 1 | 2018-02-17T14:29:41.000Z | 2018-02-17T14:29:41.000Z | foobot_grapher.py | jpwright/foobot-slack | ffc1cf8490d08433d76bb62cbf7440c765089784 | [
"MIT"
] | null | null | null | foobot_grapher.py | jpwright/foobot-slack | ffc1cf8490d08433d76bb62cbf7440c765089784 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from pyfoobot import Foobot
import requests
import matplotlib
matplotlib.use('Agg')
import matplotlib.dates
import matplotlib.pyplot
import datetime
from imgurpython import ImgurClient
import ConfigParser
if __name__ == "__main__":
getSensorReadings(True)
| 27.503311 | 179 | 0.675415 |
f7d2351d64f6c5df1c1015aaa80a18aa25236a08 | 239 | py | Python | safexl/__init__.py | ThePoetCoder/safexl | d2fb91ad45d33b6f51946e99c78e7fcf7564e82e | [
"MIT"
] | 6 | 2020-08-28T16:00:28.000Z | 2022-01-17T14:48:04.000Z | safexl/__init__.py | ThePoetCoder/safexl | d2fb91ad45d33b6f51946e99c78e7fcf7564e82e | [
"MIT"
] | null | null | null | safexl/__init__.py | ThePoetCoder/safexl | d2fb91ad45d33b6f51946e99c78e7fcf7564e82e | [
"MIT"
] | null | null | null | # Copyright (c) 2020 safexl
from safexl.toolkit import *
import safexl.xl_constants as xl_constants
import safexl.colors as colors
__author__ = "Eric Smith"
__email__ = "ThePoetCoder@gmail.com"
__license__ = "MIT"
__version__ = "0.0.7"
| 19.916667 | 42 | 0.76569 |
f7d2cd873463ee3cda95ca64c29e31dbdad2cad2 | 2,989 | py | Python | musicdb/restapi/migrations/0001_initial.py | alexebaker/django-music_database | cffa2574d894509b0eec7c71bd821cc0fd2f2cf7 | [
"MIT"
] | null | null | null | musicdb/restapi/migrations/0001_initial.py | alexebaker/django-music_database | cffa2574d894509b0eec7c71bd821cc0fd2f2cf7 | [
"MIT"
] | 7 | 2020-06-05T18:23:50.000Z | 2022-03-11T23:24:27.000Z | musicdb/restapi/migrations/0001_initial.py | alexebaker/django-music_database | cffa2574d894509b0eec7c71bd821cc0fd2f2cf7 | [
"MIT"
] | null | null | null | # Generated by Django 2.0.4 on 2018-05-01 05:22
from django.db import migrations, models
import django.db.models.deletion
| 38.320513 | 136 | 0.556708 |
f7d39269257b5bc266bf53edfc897cb41af5201f | 402 | py | Python | ballot_source/sources/migrations/0004_auto_20200824_1444.py | Ballot-Drop/ballot-source | 5dd9692ca5e9237a6073833a81771a17ad2c1dc9 | [
"MIT"
] | 3 | 2020-09-05T06:02:08.000Z | 2020-09-28T23:44:05.000Z | ballot_source/sources/migrations/0004_auto_20200824_1444.py | Ballot-Drop/ballot-source | 5dd9692ca5e9237a6073833a81771a17ad2c1dc9 | [
"MIT"
] | 18 | 2020-08-28T18:09:54.000Z | 2020-09-19T17:36:08.000Z | ballot_source/sources/migrations/0004_auto_20200824_1444.py | Ballot-Drop/ballot-source | 5dd9692ca5e9237a6073833a81771a17ad2c1dc9 | [
"MIT"
] | null | null | null | # Generated by Django 3.0.9 on 2020-08-24 20:44
from django.db import migrations, models
| 21.157895 | 58 | 0.606965 |
f7d411b7a1e10f51b58ab6692c180f5bbcd91a28 | 2,007 | py | Python | src/tests/Yi/tests/inner_product_between_lobatto_and_gauss.py | Idate96/Mimetic-Fem | 75ad3b982ef7ed7c6198f526d19dc460dec28f4d | [
"MIT"
] | null | null | null | src/tests/Yi/tests/inner_product_between_lobatto_and_gauss.py | Idate96/Mimetic-Fem | 75ad3b982ef7ed7c6198f526d19dc460dec28f4d | [
"MIT"
] | null | null | null | src/tests/Yi/tests/inner_product_between_lobatto_and_gauss.py | Idate96/Mimetic-Fem | 75ad3b982ef7ed7c6198f526d19dc460dec28f4d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
(SHORT NAME EXPLANATION)
>>>DOCTEST COMMANDS
(THE TEST ANSWER)
@author: Yi Zhang. Created on Mon Jul 10 20:12:27 2017
Department of Aerodynamics
Faculty of Aerospace Engineering
TU Delft
#SUMMARY----------------
#INPUTS-----------------
#ESSENTIAL:
#OPTIONAL:
#OUTPUTS----------------
#EXAMPLES---------------
#NOTES------------------
"""
# -*- coding: utf-8 -*-
"""
(SHORT NAME EXPLANATION)
>>>DOCTEST COMMANDS
(THE TEST ANSWER)
@author: Yi Zhang . Created on Thu Jul 6 16:00:33 2017
Department of Aerodynamics
Faculty of Aerospace Engineering
TU Delft
#SUMMARY----------------
#INPUTS-----------------
#ESSENTIAL:
#OPTIONAL:
#OUTPUTS----------------
#EXAMPLES---------------
#NOTES------------------
"""
from function_space import FunctionSpace
import numpy as np
from mesh import CrazyMesh
from forms import Form
from hodge import hodge
from coboundaries import d
from assemble import assemble
from _assembling import assemble_, integral1d_
import matplotlib.pyplot as plt
from quadrature import extended_gauss_quad
from scipy.integrate import quad
from sympy import Matrix
import scipy.io
from scipy import sparse
import scipy as sp
from inner_product import inner
# %% exact solution define
# u^{(1)} = { u, v }^T
# %% define the mesh
mesh = CrazyMesh( 2, (2, 2), ((-1, 1), (-1, 1)), 0.05 )
func_space_gauss1 = FunctionSpace(mesh, '1-gauss', (5, 5), is_inner=False)
func_space_lobatto1 = FunctionSpace(mesh, '1-lobatto', (5, 5), is_inner=False)
form_1_gauss = Form(func_space_gauss1)
form_1_lobatto = Form(func_space_lobatto1)
M = inner(form_1_lobatto.basis,form_1_gauss.basis)
| 22.3 | 78 | 0.619332 |
f7d511ad2e6640e470287dff8220becb4fa1880a | 1,871 | py | Python | src/quality_control/bin/createSpotDetectionQCHTML.py | WoutDavid/ST-nextflow-pipeline | 8de3da218ec4f10f183e1163fe782c19fd8dd841 | [
"MIT"
] | null | null | null | src/quality_control/bin/createSpotDetectionQCHTML.py | WoutDavid/ST-nextflow-pipeline | 8de3da218ec4f10f183e1163fe782c19fd8dd841 | [
"MIT"
] | null | null | null | src/quality_control/bin/createSpotDetectionQCHTML.py | WoutDavid/ST-nextflow-pipeline | 8de3da218ec4f10f183e1163fe782c19fd8dd841 | [
"MIT"
] | null | null | null | import json
from bs4 import BeautifulSoup
import pandas as pd
import sys
# Argparsing
argument_index = 1
template = sys.argv[argument_index]
argument_index +=1
recall_json = sys.argv[argument_index]
argument_index +=1
recall_plot = sys.argv[argument_index]
argument_index +=1
precision_jsons_list = [sys.argv[i] for i in range(argument_index, len(sys.argv))]
precision_rows_list = []
# convert jsons back to dicts for html conversion
for json_path in precision_jsons_list:
with open(json_path, 'r') as json_file:
data = json.load(json_file)
precision_rows_list.append(data)
precision_df = pd.DataFrame(precision_rows_list)
precision_df = precision_df.sort_values(by='Round #')
precision_html_table = precision_df.to_html(index=False)
# Same for recall json
recall_rows_list = []
with open(recall_json, 'r') as json_file:
data=json.load(json_file)
recall_rows_list.append(data)
recall_df = pd.DataFrame(recall_rows_list)
recall_html_table = recall_df.to_html(index=False)
# Create html
with open(template, 'r') as template_file:
contents = template_file.read()
template_soup = BeautifulSoup(contents, features="html.parser")
p_list = template_soup.find_all('p')
p_index = 0
# Read recall table tag
recall_soup = BeautifulSoup(recall_html_table, features="html.parser")
table_tag = recall_soup.find('table')
p_list[p_index].insert_after(table_tag)
p_index+=1
image_tag = template_soup.new_tag('img')
image_tag['src']= f"./recall/{recall_plot}"
image_tag['width']= 700
image_tag['height']= 500
p_list[p_index].insert_after(image_tag)
p_index+=1
precision_soup = BeautifulSoup(precision_html_table, features="html.parser")
table_tag = precision_soup.find('table')
p_list[p_index].insert_after(table_tag)
p_index+=1
with open('spot_detection_qc_report.html', 'w') as result_file:
result_file.write(str( template_soup ))
| 27.115942 | 82 | 0.772314 |
f7d56596394f7bfd79f8b0a1466fae7aaa135fac | 2,104 | py | Python | test/torch/mpc/test_fss.py | NicoSerranoP/PySyft | 87fcd566c46fce4c16d363c94396dd26bd82a016 | [
"Apache-2.0"
] | 3 | 2020-11-24T05:15:57.000Z | 2020-12-07T09:52:45.000Z | test/torch/mpc/test_fss.py | NicoSerranoP/PySyft | 87fcd566c46fce4c16d363c94396dd26bd82a016 | [
"Apache-2.0"
] | 1 | 2020-09-29T00:24:31.000Z | 2020-09-29T00:24:31.000Z | test/torch/mpc/test_fss.py | NicoSerranoP/PySyft | 87fcd566c46fce4c16d363c94396dd26bd82a016 | [
"Apache-2.0"
] | 1 | 2021-09-04T16:27:41.000Z | 2021-09-04T16:27:41.000Z | import pytest
import torch as th
from syft.frameworks.torch.mpc.fss import DPF, DIF, n
| 32.875 | 70 | 0.551331 |
f7d62d0a50f28ea90ec1747700a205b806ed75b7 | 2,684 | py | Python | allennlp/tests/data/tokenizers/pretrained_transformer_tokenizer_test.py | donna-legal/allennlp | fd1e3cfaed07ec3ba03b922d12eee47f8be16837 | [
"Apache-2.0"
] | 1 | 2020-01-28T07:52:28.000Z | 2020-01-28T07:52:28.000Z | allennlp/tests/data/tokenizers/pretrained_transformer_tokenizer_test.py | donna-legal/allennlp | fd1e3cfaed07ec3ba03b922d12eee47f8be16837 | [
"Apache-2.0"
] | null | null | null | allennlp/tests/data/tokenizers/pretrained_transformer_tokenizer_test.py | donna-legal/allennlp | fd1e3cfaed07ec3ba03b922d12eee47f8be16837 | [
"Apache-2.0"
] | null | null | null | from allennlp.common.testing import AllenNlpTestCase
from allennlp.data.tokenizers import PretrainedTransformerTokenizer
| 28.252632 | 99 | 0.462742 |
f7d6ae2f3cb3eec3b7e8a4d67b500afb529fc556 | 2,928 | py | Python | openmdao/api.py | ryanfarr01/blue | a9aac98c09cce0f7cadf26cf592e3d978bf4e3ff | [
"Apache-2.0"
] | null | null | null | openmdao/api.py | ryanfarr01/blue | a9aac98c09cce0f7cadf26cf592e3d978bf4e3ff | [
"Apache-2.0"
] | null | null | null | openmdao/api.py | ryanfarr01/blue | a9aac98c09cce0f7cadf26cf592e3d978bf4e3ff | [
"Apache-2.0"
] | null | null | null | """Key OpenMDAO classes can be imported from here."""
# Core
from openmdao.core.problem import Problem
from openmdao.core.group import Group
from openmdao.core.parallel_group import ParallelGroup
from openmdao.core.explicitcomponent import ExplicitComponent
from openmdao.core.implicitcomponent import ImplicitComponent
from openmdao.core.indepvarcomp import IndepVarComp
from openmdao.core.analysis_error import AnalysisError
# Components
from openmdao.components.deprecated_component import Component
from openmdao.components.exec_comp import ExecComp
from openmdao.components.linear_system_comp import LinearSystemComp
from openmdao.components.meta_model import MetaModel
from openmdao.components.multifi_meta_model import MultiFiMetaModel
# Solvers
from openmdao.solvers.linear.linear_block_gs import LinearBlockGS
from openmdao.solvers.linear.linear_block_jac import LinearBlockJac
from openmdao.solvers.linear.direct import DirectSolver
from openmdao.solvers.linear.petsc_ksp import PetscKSP
from openmdao.solvers.linear.linear_runonce import LinearRunOnce
from openmdao.solvers.linear.scipy_iter_solver import ScipyIterativeSolver
from openmdao.solvers.linesearch.backtracking import ArmijoGoldsteinLS
from openmdao.solvers.linesearch.backtracking import BoundsEnforceLS
from openmdao.solvers.nonlinear.nonlinear_block_gs import NonlinearBlockGS
from openmdao.solvers.nonlinear.nonlinear_block_jac import NonlinearBlockJac
from openmdao.solvers.nonlinear.newton import NewtonSolver
from openmdao.solvers.nonlinear.nonlinear_runonce import NonLinearRunOnce
# Surrogate Models
from openmdao.surrogate_models.kriging import KrigingSurrogate, FloatKrigingSurrogate
from openmdao.surrogate_models.multifi_cokriging import MultiFiCoKrigingSurrogate, \
FloatMultiFiCoKrigingSurrogate
from openmdao.surrogate_models.nearest_neighbor import NearestNeighbor
from openmdao.surrogate_models.response_surface import ResponseSurface
from openmdao.surrogate_models.surrogate_model import SurrogateModel, \
MultiFiSurrogateModel
# Vectors
from openmdao.vectors.default_vector import DefaultVector
try:
from openmdao.vectors.petsc_vector import PETScVector
except ImportError:
PETScVector = None
# Developer Tools
from openmdao.devtools.problem_viewer.problem_viewer import view_model
from openmdao.devtools.viewconns import view_connections
# Derivative Specification
from openmdao.jacobians.assembled_jacobian import AssembledJacobian, \
DenseJacobian, COOJacobian, CSRJacobian, CSCJacobian
# Drivers
try:
from openmdao.drivers.pyoptsparse_driver import pyOptSparseDriver
except ImportError:
pass
from openmdao.drivers.scipy_optimizer import ScipyOptimizer
# System-Building Tools
from openmdao.utils.options_dictionary import OptionsDictionary
# Recorders
from openmdao.recorders.sqlite_recorder import SqliteRecorder
from openmdao.recorders.openmdao_server_recorder import OpenMDAOServerRecorder
| 41.828571 | 85 | 0.873634 |
f7d8750cdaa9ce35d0790079eee8be949cbd02ee | 1,443 | py | Python | code-buddy.py | xl3ehindTim/Code-buddy | e04b7b4327a0b3ff2790d22aef93dca6fce021f4 | [
"MIT"
] | 8 | 2019-11-29T09:20:11.000Z | 2020-11-02T10:55:35.000Z | code-buddy.py | xl3ehindTim/Code-buddy | e04b7b4327a0b3ff2790d22aef93dca6fce021f4 | [
"MIT"
] | 2 | 2019-12-02T13:48:01.000Z | 2019-12-02T17:00:56.000Z | code-buddy.py | xl3ehindTim/Code-buddy | e04b7b4327a0b3ff2790d22aef93dca6fce021f4 | [
"MIT"
] | 3 | 2019-11-29T10:03:44.000Z | 2020-10-01T10:23:55.000Z | import os
from getArgs import getArgs
from modules import python, javascript, html, php, bootstrap, cca
# from folder import file
# code-buddy.py create (file type) (directory name)
# Checks for "create"
if getArgs(1) == "create":
# Checks for which file type
projectType = getArgs(2)
# Checks for file name
if projectType == "python":
name = getArgs(3)
python.createPythonProject(name)
print("Folder created succesfully")
elif projectType == "javascript":
name = getArgs(3)
javascript.createJavascriptProject(name)
print("Folder created succesfully")
elif projectType == "html":
name = getArgs(3)
html.createHtmlProject(name)
print("Folder created succesfully")
elif projectType == "php":
name = getArgs(3)
php.createPhpProject(name)
print("Folder created succesfully")
elif projectType == "bootstrap":
name = getArgs(3)
bootstrap.createPhpProject(name)
print("Folder created succesfully")
elif projectType == "cca"
name = getArgs(3)
cca.createCcaProject(name)
print("Folder created succesfully")
# If not valid file type
else:
print(f"argument {getArgs(2)} is unknown, try: 'python, javascript, html, php or bootstrap'")
else:
# If invalid "create"
print(f"argument {getArgs(1)} is unknown, use 'create' to create a folder")
| 33.55814 | 101 | 0.644491 |
f7d8d7b6d6bbc7f8a6c1802ec8a9bedc82cb072a | 5,799 | py | Python | compyle/tests/test_ext_module.py | manish364824/compyle | cc97dd0a0e7b12f904b3f1c0f20aa06a41779c61 | [
"BSD-3-Clause"
] | 1 | 2020-11-23T12:13:04.000Z | 2020-11-23T12:13:04.000Z | compyle/tests/test_ext_module.py | manish364824/compyle | cc97dd0a0e7b12f904b3f1c0f20aa06a41779c61 | [
"BSD-3-Clause"
] | null | null | null | compyle/tests/test_ext_module.py | manish364824/compyle | cc97dd0a0e7b12f904b3f1c0f20aa06a41779c61 | [
"BSD-3-Clause"
] | null | null | null | from contextlib import contextmanager
from distutils.sysconfig import get_config_var
from io import open as io_open
import os
from os.path import join, exists
import shutil
import sys
import tempfile
from textwrap import dedent
from multiprocessing import Pool
from unittest import TestCase, main
try:
from unittest import mock
except ImportError:
import mock
from ..ext_module import get_md5, ExtModule, get_ext_extension, get_unicode
def _check_write_source(root):
"""Used to create an ExtModule and test if a file was opened.
It returns the number of times "open" was called.
"""
m = mock.mock_open()
orig_side_effect = m.side_effect
m.side_effect = _side_effect
with mock.patch('compyle.ext_module.io.open', m, create=True):
s = ExtModule("print('hello')", root=root)
s.write_source()
return m.call_count
if __name__ == '__main__':
main()
| 29.436548 | 76 | 0.607174 |
f7db3778ef11768f9b2aff72c3bc714173c0ef05 | 5,286 | py | Python | tma/collector/xhn.py | hebpmo/TMA | b07747d3112e822ff92dd2ba4589d2288adab154 | [
"MIT"
] | 2 | 2020-02-15T18:31:39.000Z | 2020-03-18T13:30:58.000Z | tma/collector/xhn.py | hebpmo/TMA | b07747d3112e822ff92dd2ba4589d2288adab154 | [
"MIT"
] | null | null | null | tma/collector/xhn.py | hebpmo/TMA | b07747d3112e822ff92dd2ba4589d2288adab154 | [
"MIT"
] | 1 | 2021-02-13T19:14:39.000Z | 2021-02-13T19:14:39.000Z | # -*- coding: UTF-8 -*-
"""
collector.xhn -
http://www.xinhuanet.com/
1.
http://qc.wa.news.cn/nodeart/list?nid=115093&pgnum=1&cnt=10000
http://www.xinhuanet.com/politics/qmtt/index.htm
====================================================================
"""
import requests
import re
from datetime import datetime
from bs4 import BeautifulSoup
from zb.crawlers.utils import get_header
import traceback
import pandas as pd
from tqdm import tqdm
import tma
home_url = "http://www.xinhuanet.com/"
def get_special_topics(pgnum=1):
""""""
url = "http://qc.wa.news.cn/nodeart/list?" \
"nid=115093&pgnum=%s&cnt=200" % str(pgnum)
res = requests.get(url).text
res = res.replace("null", "\'\'")
res = eval(res)
assert res['status'] == 0, ""
data = res['data']['list']
specials = []
for a in data:
special = {
"Abstract": a['Abstract'],
"Author": a['Author'],
"LinkUrl": a['LinkUrl'],
"PubTime": a['PubTime'],
"Title": a['Title'],
"allPics": a['allPics'],
}
specials.append(special)
return specials
def get_article_detail(article_url):
"""article_url
:param article_url: url
:return:
{
"url": article_url,
"title": title,
"pub_time": pub_time,
"source": source,
"content": content
}
"""
# article_url = "http://www.xinhuanet.com/fortune/2018-06/20/c_129897476.htm"
html = requests.get(article_url, headers=get_header())
bsobj = BeautifulSoup(html.content.decode('utf-8'), 'lxml')
#
cols = bsobj.find('div', {"class": "h-news"}).text.strip().split("\r\n")
title = cols[0].strip()
pub_time = cols[1].strip()
source = cols[-1].strip()
#
content = bsobj.find('div', {"id": "p-detail"}).text.strip()
content = content.replace("\u3000\u3000", "")
content = [x.strip() for x in content.split("\n")]
content = [x for x in content if x != ""]
content = "\n".join(content)
return {
"url": article_url,
"title": title,
"pub_time": pub_time,
"source": source,
"content": content
}
| 28.26738 | 81 | 0.531782 |
f7dbb6eabf0492827bece2fbca9d7d345965609a | 995 | py | Python | tests/test_onetv.py | unlocKing/plugins | e5cee730c22a049cfd0e3873389c82e8ab5f7c41 | [
"BSD-2-Clause"
] | 2 | 2021-09-02T21:29:48.000Z | 2021-09-20T07:05:08.000Z | tests/test_onetv.py | unlocKing/plugins | e5cee730c22a049cfd0e3873389c82e8ab5f7c41 | [
"BSD-2-Clause"
] | null | null | null | tests/test_onetv.py | unlocKing/plugins | e5cee730c22a049cfd0e3873389c82e8ab5f7c41 | [
"BSD-2-Clause"
] | null | null | null | import unittest
from plugins.onetv import OneTV
| 36.851852 | 75 | 0.577889 |
f7dd193790b7ae7797daf8c7c2f3ca9a0623ed89 | 405 | py | Python | tests/test_plugins/pytester_example_dir/test_file_1.py | MORSECorp/snappiershot | acb6a8d01d4496abe0f2fe83c7e7af9cf77aac8e | [
"Apache-2.0"
] | 27 | 2020-10-15T18:36:25.000Z | 2022-03-02T19:11:44.000Z | tests/test_plugins/pytester_example_dir/test_file_1.py | MORSECorp/snappiershot | acb6a8d01d4496abe0f2fe83c7e7af9cf77aac8e | [
"Apache-2.0"
] | 33 | 2020-10-15T15:03:37.000Z | 2022-03-24T21:00:34.000Z | tests/test_plugins/pytester_example_dir/test_file_1.py | MORSECorp/snappiershot | acb6a8d01d4496abe0f2fe83c7e7af9cf77aac8e | [
"Apache-2.0"
] | 5 | 2020-10-15T16:30:00.000Z | 2022-03-30T15:07:28.000Z | """ This is a test file used for testing the pytest plugin. """
def test_function_passed(snapshot):
""" The snapshot for this function is expected to exist. """
snapshot.assert_match(3 + 4j)
def test_function_new(snapshot):
""" The snapshot for this function is expected to exist, but only one assertion is expected. """
snapshot.assert_match(3 + 4j)
snapshot.assert_match(3 + 4j)
| 31.153846 | 100 | 0.708642 |
f7de06300594a810a1f4175db45d6b833ced1a94 | 7,940 | py | Python | src/compas/geometry/pointclouds/pointcloud.py | Sam-Bouten/compas | 011c7779ded9b69bb602568b470bb0443e336f62 | [
"MIT"
] | null | null | null | src/compas/geometry/pointclouds/pointcloud.py | Sam-Bouten/compas | 011c7779ded9b69bb602568b470bb0443e336f62 | [
"MIT"
] | null | null | null | src/compas/geometry/pointclouds/pointcloud.py | Sam-Bouten/compas | 011c7779ded9b69bb602568b470bb0443e336f62 | [
"MIT"
] | null | null | null | from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from random import uniform
from compas.geometry import transform_points
from compas.geometry import centroid_points
from compas.geometry import bounding_box
from compas.geometry import Primitive
from compas.geometry import Point
__all__ = ['Pointcloud']
def __eq__(self, other):
"""Is this pointcloud equal to the other pointcloud?
Two pointclouds are considered equal if they have the same number of points
and if the XYZ coordinates of the corresponding points are identical.
Parameters
----------
other : :class:`compas.geometry.Pointcloud` | list[[float, float, float] | :class:`compas.geometry.Point`]
The pointcloud to compare.
Returns
-------
bool
True if the pointclouds are equal.
False otherwise.
"""
if len(self) != len(other):
return False
A = sorted(self, key=lambda point: (point[0], point[1], point[2]))
B = sorted(other, key=lambda point: (point[0], point[1], point[2]))
return all(a == b for a, b in zip(A, B))
# ==========================================================================
# constructors
# ==========================================================================
# ==========================================================================
# methods
# ==========================================================================
def transform(self, T):
"""Apply a transformation to the pointcloud.
Parameters
----------
T : :class:`compas.geometry.Transformation`
The transformation.
Returns
-------
None
The cloud is modified in place.
"""
for index, point in enumerate(transform_points(self.points, T)):
self.points[index].x = point[0]
self.points[index].y = point[1]
self.points[index].z = point[2]
| 27.957746 | 114 | 0.500756 |
f7de36b7d46515af7a1b6676baaac3b4ccaf3705 | 4,366 | py | Python | oa/regex.py | Worteks/OrangeAssassin | 21baf0b84fbedd887f6d88e13c624f14fb0b5e06 | [
"Apache-2.0"
] | null | null | null | oa/regex.py | Worteks/OrangeAssassin | 21baf0b84fbedd887f6d88e13c624f14fb0b5e06 | [
"Apache-2.0"
] | null | null | null | oa/regex.py | Worteks/OrangeAssassin | 21baf0b84fbedd887f6d88e13c624f14fb0b5e06 | [
"Apache-2.0"
] | null | null | null | """Handle regex conversions."""
from builtins import object
import re
import operator
from functools import reduce
import oa.errors
# Map of perl flags and the corresponding re ones.
FLAGS = {
"i": re.IGNORECASE,
"s": re.DOTALL,
"m": re.MULTILINE,
"x": re.VERBOSE,
}
DELIMS = {
"/": "/",
"{": "}",
"%": "%",
"<": ">",
"'": "'",
"~": "~",
",": ",",
"!": "!",
";": ";",
}
# Regex substitution for Perl -> Python compatibility
_CONVERTS = (
(re.compile(r"""
# Python does not support local extensions so remove those. For example:
# (?i:test) becomes (?:test)
(?<=\(\?) # Look-behind and match (?
(([adlupimsx-]*?)|(\^[?^alupimsx]*?)) # Capture the extension
(?=:) # Look-ahead and match the :
""", re.VERBOSE), r""),
(re.compile(r"""
# Python doesn't have support for expression such as \b?
# Replace it with (\b)?
(\\b) # Capture group that matches \b or \B
(?=\?) # Look-ahead that matches ?
""", re.VERBOSE | re.IGNORECASE), r"(\1)"),
(re.compile(r"""
# Python doesn't have support for "independent" subexpression (?>)
# Replace those with non capturing groups (?:)
(?<=\(\?) # Look-behind and match (?
(>) # Match >
""", re.VERBOSE), r":"),
)
def perl2re(pattern, match_op="=~"):
"""Convert a Perl type regex to a Python one."""
# We don't need to consider the pre-flags
pattern = pattern.strip().lstrip("mgs")
delim = pattern[0]
try:
rev_delim = DELIMS[delim]
except KeyError:
raise oa.errors.InvalidRegex("Invalid regex delimiter %r in %r" %
(delim, pattern))
try:
pattern, flags_str = pattern.lstrip(delim).rsplit(rev_delim, 1)
except ValueError:
raise oa.errors.InvalidRegex("Invalid regex %r. Please make sure you "
"have escaped all the special characters "
"when you defined the regex in "
"configuration file" % pattern)
for conv_p, repl in _CONVERTS:
pattern = conv_p.sub(repl, pattern)
flags = reduce(operator.or_, (FLAGS.get(flag, 0) for flag in flags_str), 0)
try:
if match_op == "=~":
return MatchPattern(re.compile(pattern, flags))
elif match_op == "!~": return NotMatchPattern(re.compile(pattern, flags))
except re.error as e:
raise oa.errors.InvalidRegex("Invalid regex %r: %s" % (pattern, e))
| 28.535948 | 81 | 0.574668 |
f7dec0cd3c585519d06741f3516a5564ea368e83 | 1,749 | py | Python | test_data/barometer_kalman.py | theo-brown/ahrs | cd9c9e0bbf9db7fd67a297e1aafa8518bf17050d | [
"MIT"
] | 1 | 2022-01-19T14:20:05.000Z | 2022-01-19T14:20:05.000Z | test_data/barometer_kalman.py | theo-brown/ahrs | cd9c9e0bbf9db7fd67a297e1aafa8518bf17050d | [
"MIT"
] | null | null | null | test_data/barometer_kalman.py | theo-brown/ahrs | cd9c9e0bbf9db7fd67a297e1aafa8518bf17050d | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider
from kalman_filter import KalmanFilter
raw_data = np.loadtxt("barometer_data.txt")
# Truncate raw data (it's super long)
raw_data = raw_data[:raw_data.size//4]
raw_data_step = np.loadtxt("barometer_data_step.txt")
t1 = np.arange(0, raw_data.size/12.5, 1/12.5)
t2 = np.arange(0, raw_data_step.size/12.5, 1/12.5)
fig1 = plt.figure("Data")
ax1 = fig1.add_subplot(121)
ax2 = fig1.add_subplot(122)
fig1.subplots_adjust(bottom=0.25)
[unfiltered_raw_line] = ax1.plot(t1, raw_data)
[unfiltered__step_line] = ax2.plot(t2, raw_data_step)
P0 = 2
Q0 = 1e-4
[filtered_raw_line] = ax1.plot(t1, filter_data(raw_data, 0, P0, Q0, R=raw_data.var())[0])
[filtered_step_line] = ax2.plot(t2, filter_data(raw_data_step, 0, P0, Q0, R=raw_data.var())[0])
P_slider_ax = fig1.add_axes([0.25, 0.15, 0.65, 0.03])
Q_slider_ax = fig1.add_axes([0.25, 0.1, 0.65, 0.03])
P_slider = Slider(P_slider_ax, 'P', 0.5, 5, valinit=P0)
Q_slider = Slider(Q_slider_ax, 'Q', 1e-4, 1e-3, valinit=Q0)
P_slider.on_changed(sliders_on_changed)
Q_slider.on_changed(sliders_on_changed)
plt.show()
| 31.232143 | 95 | 0.704974 |
f7df479cf0eb03f9edb6d36fe5773b716ab0594f | 1,694 | py | Python | number-of-orders-in-the-backlog/number_of_orders_in_the_backlog.py | joaojunior/hackerrank | a5ee0449e791535930b8659dfb7dddcf9e1237de | [
"MIT"
] | null | null | null | number-of-orders-in-the-backlog/number_of_orders_in_the_backlog.py | joaojunior/hackerrank | a5ee0449e791535930b8659dfb7dddcf9e1237de | [
"MIT"
] | null | null | null | number-of-orders-in-the-backlog/number_of_orders_in_the_backlog.py | joaojunior/hackerrank | a5ee0449e791535930b8659dfb7dddcf9e1237de | [
"MIT"
] | 1 | 2019-06-19T00:51:02.000Z | 2019-06-19T00:51:02.000Z | import heapq
from typing import List
| 40.333333 | 78 | 0.432113 |
f7df8183ed1dfeac2b83cb6b6b173f961a29bd8f | 2,585 | py | Python | scripts/plotresults.py | rafzi/DeepThings | d12e8e8ad9f9ebaa3b0d55f547c0b3c7f1baf636 | [
"MIT"
] | 1 | 2020-02-28T10:07:47.000Z | 2020-02-28T10:07:47.000Z | scripts/plotresults.py | rafzi/DeepThings | d12e8e8ad9f9ebaa3b0d55f547c0b3c7f1baf636 | [
"MIT"
] | null | null | null | scripts/plotresults.py | rafzi/DeepThings | d12e8e8ad9f9ebaa3b0d55f547c0b3c7f1baf636 | [
"MIT"
] | 2 | 2020-03-10T15:17:55.000Z | 2020-03-17T15:37:37.000Z | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# 1: YOLOv2, 2: AlexNet, 3: VGG-16, 4: GoogLeNet
model = 4
LINEPLOT = True
dfs = pd.read_excel("t.xlsx", sheet_name=None, header=None)
if model == 1:
ms = "YOLOv2"
elif model == 2:
ms = "AlexNet"
elif model == 3:
ms = "VGG-16"
elif model == 4:
ms = "GoogLeNet"
sh = dfs[ms]
print(sh)
labels = ["1", "2", "3", "4", "5", "6"]
x = np.arange(len(labels))
plt.rcParams.update({"font.size": 11})
fig, ax = plt.subplots()
plt.subplots_adjust(top=0.95, right=0.95)
# Workaround for this: https://bugs.python.org/issue32790
def autolabel(rects):
"""Attach a text label above each bar in *rects*, displaying its height."""
for rect in rects:
height = rect.get_height()
ax.annotate(fmtFlt(height, 3),
xy=(rect.get_x() + 1.2*rect.get_width() / 2, height),
xytext=(0, 3), # 3 points vertical offset
textcoords="offset points",
ha='center', va='bottom', rotation=90, fontsize=9.5)
# 1: 1gbit, 2: 100mbit, 3: 10mbit
addData(1, True)
addData(1, False)
addData(2, True)
addData(2, False)
addData(3, True)
addData(3, False)
#plt.ylim(plt.ylim()*1.1)
ybot, ytop = plt.ylim()
plt.ylim(ybot, ytop*1.05)
ax.set_xlabel("Number of devices")
ax.set_ylabel("Run time speedup over one device")
ax.set_xticks(x)
ax.set_xticklabels(labels)
ax.legend()
plt.savefig("plot_runtime.pdf")
plt.show()
| 26.927083 | 82 | 0.573308 |
f7e1dfd58619e2e27eaf63ac95f9bbd2215fc5c4 | 565 | py | Python | setup.py | oubiwann/myriad-worlds | bfbbab713e35c5700e37158a892c3a66a8c9f37a | [
"MIT"
] | 3 | 2015-01-29T05:24:32.000Z | 2021-05-10T01:47:36.000Z | setup.py | oubiwann/myriad-worlds | bfbbab713e35c5700e37158a892c3a66a8c9f37a | [
"MIT"
] | null | null | null | setup.py | oubiwann/myriad-worlds | bfbbab713e35c5700e37158a892c3a66a8c9f37a | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
from myriad import meta
from myriad.util import dist
setup(
name=meta.display_name,
version=meta.version,
description=meta.description,
long_description=meta.long_description,
author=meta.author,
author_email=meta.author_email,
url=meta.url,
license=meta.license,
packages=find_packages() + ["twisted.plugins"],
package_data={
"twisted": ['plugins/example_server.py']
},
install_requires=meta.requires,
zip_safe=False
)
dist.refresh_plugin_cache()
| 21.730769 | 51 | 0.709735 |
f7e2347893dbbd12b3c90e6ec6f949cb83aa2a4f | 1,110 | py | Python | val_resnet.py | AlexKhakhlyuk/fixedconv | bf3848c3fd60af2e617f2118064ee6f551b45d95 | [
"Apache-1.1"
] | 1 | 2020-05-05T07:20:25.000Z | 2020-05-05T07:20:25.000Z | val_resnet.py | khakhlyuk/fixedconv | bf3848c3fd60af2e617f2118064ee6f551b45d95 | [
"Apache-1.1"
] | null | null | null | val_resnet.py | khakhlyuk/fixedconv | bf3848c3fd60af2e617f2118064ee6f551b45d95 | [
"Apache-1.1"
] | null | null | null | from subprocess import run
# python -u val_resnet.py
cuda = 0 # which gpu to use
dataset = 'cifar10'
logs_path = 'logs_resnet' + '_' + dataset
manualSeed = 99
workers = 0
for model in ['resnet20', 'preact_resnet20']:
commands = [
'python', '-u', 'validate_resnet.py',
'--dataset=' + dataset,
'--model=' + model,
'-c=' + str(cuda),
'--workers=' + str(workers),
'--manualSeed=' + str(manualSeed),
'--logs_path=' + logs_path,
]
run(commands)
for model in ['resnet20', 'preact_resnet20']:
f = True
for k in [1, 3]:
for ff in [False, True]:
commands = [
'python', '-u', 'validate_resnet.py',
'--dataset=' + dataset,
'--model=' + model,
'-k=' + str(k),
'-c=' + str(cuda),
'--workers=' + str(workers),
'--manualSeed=' + str(manualSeed),
'--logs_path=' + logs_path,
]
if f: commands.append('-f')
if ff: commands.append('--ff')
run(commands)
| 27.75 | 53 | 0.473874 |
f7e3584c6b4d27959b077f55eb4556611369a6be | 466 | py | Python | temboo/core/Library/KhanAcademy/Badges/__init__.py | jordanemedlock/psychtruths | 52e09033ade9608bd5143129f8a1bfac22d634dd | [
"Apache-2.0"
] | 7 | 2016-03-07T02:07:21.000Z | 2022-01-21T02:22:41.000Z | temboo/core/Library/KhanAcademy/Badges/__init__.py | jordanemedlock/psychtruths | 52e09033ade9608bd5143129f8a1bfac22d634dd | [
"Apache-2.0"
] | null | null | null | temboo/core/Library/KhanAcademy/Badges/__init__.py | jordanemedlock/psychtruths | 52e09033ade9608bd5143129f8a1bfac22d634dd | [
"Apache-2.0"
] | 8 | 2016-06-14T06:01:11.000Z | 2020-04-22T09:21:44.000Z | from temboo.Library.KhanAcademy.Badges.AllCategories import AllCategories, AllCategoriesInputSet, AllCategoriesResultSet, AllCategoriesChoreographyExecution
from temboo.Library.KhanAcademy.Badges.BadgesByCategory import BadgesByCategory, BadgesByCategoryInputSet, BadgesByCategoryResultSet, BadgesByCategoryChoreographyExecution
from temboo.Library.KhanAcademy.Badges.GetBadges import GetBadges, GetBadgesInputSet, GetBadgesResultSet, GetBadgesChoreographyExecution
| 116.5 | 171 | 0.909871 |
f7e5ec76a74f735b8085dae26118d20f0eea400d | 453 | py | Python | akagi/data_sources/spreadsheet_data_source.py | pauchan/akagi | 7cf1f5a52b8f1ebfdc74a527bf6b26254f99343b | [
"MIT"
] | 26 | 2017-05-18T11:52:04.000Z | 2018-08-25T22:03:07.000Z | akagi/data_sources/spreadsheet_data_source.py | pauchan/akagi | 7cf1f5a52b8f1ebfdc74a527bf6b26254f99343b | [
"MIT"
] | 325 | 2017-05-08T07:22:28.000Z | 2022-03-31T15:43:18.000Z | akagi/data_sources/spreadsheet_data_source.py | pauchan/akagi | 7cf1f5a52b8f1ebfdc74a527bf6b26254f99343b | [
"MIT"
] | 7 | 2017-05-02T02:06:15.000Z | 2020-04-09T05:32:11.000Z | from akagi.data_source import DataSource
from akagi.data_file import DataFile
| 28.3125 | 72 | 0.732892 |
f7e673c1a03cc4b207464e8a0e2d7bce749cb8ba | 7,401 | py | Python | stanCode_projects/my_drawing/my_drawing.py | kenhuang1204/stanCode_projects | f697a34a1c54a864c1140cb0f2f76e2d70b45698 | [
"MIT"
] | null | null | null | stanCode_projects/my_drawing/my_drawing.py | kenhuang1204/stanCode_projects | f697a34a1c54a864c1140cb0f2f76e2d70b45698 | [
"MIT"
] | null | null | null | stanCode_projects/my_drawing/my_drawing.py | kenhuang1204/stanCode_projects | f697a34a1c54a864c1140cb0f2f76e2d70b45698 | [
"MIT"
] | null | null | null | """
File: my_drawing.py
Name:
----------------------
TODO:
"""
from campy.graphics.gobjects import GOval, GRect, GLine, GLabel, GPolygon, GArc
from campy.graphics.gwindow import GWindow
def main():
"""
Meet Snorlax () of stanCode! He dreams of Python when he sleeps. Be like Snorlax.
"""
window = GWindow(width=300, height=300)
face_outer = GOval(120, 75, x=(window.width-120)/2, y=50)
face_outer.filled = True
face_outer.fill_color = 'darkcyan'
face_outer.color = 'darkcyan'
window.add(face_outer)
face_inner = GOval(100, 65, x=(window.width-100)/2, y=60)
face_inner.filled = True
face_inner.fill_color = 'lightsalmon'
face_inner.color = 'lightsalmon'
window.add(face_inner)
forehead = GPolygon()
forehead.add_vertex((135, 60))
forehead.add_vertex((165, 60))
forehead.add_vertex((150, 68))
forehead.filled = True
forehead.fill_color = 'darkcyan'
forehead.color = 'darkcyan'
window.add(forehead)
r_ear = GPolygon()
r_ear.add_vertex((113, 35))
r_ear.add_vertex((95, 75))
r_ear.add_vertex((140, 50))
r_ear.filled = True
r_ear.fill_color = 'darkcyan'
r_ear.color = 'darkcyan'
window.add(r_ear)
l_ear = GPolygon()
l_ear.add_vertex((187, 35))
l_ear.add_vertex((205, 75))
l_ear.add_vertex((160, 50))
l_ear.filled = True
l_ear.fill_color = 'darkcyan'
l_ear.color = 'darkcyan'
window.add(l_ear)
r_eye = GLine (120, 75, 140, 75)
window.add(r_eye)
l_eye = GLine(180, 75, 160, 75)
window.add(l_eye)
mouth = GLine(135, 85, 165, 85)
window.add(mouth)
r_tooth = GPolygon()
r_tooth.add_vertex((135, 84))
r_tooth.add_vertex((139, 84))
r_tooth.add_vertex((137, 80))
r_tooth.filled = True
r_tooth.fill_color = 'white'
r_tooth.color = 'white'
window.add(r_tooth)
l_tooth = GPolygon()
l_tooth.add_vertex((165, 84))
l_tooth.add_vertex((161, 84))
l_tooth.add_vertex((163, 80))
l_tooth.filled = True
l_tooth.fill_color = 'white'
l_tooth.color = 'white'
window.add(l_tooth)
r_arm = GOval(100, 45, x=25, y=98)
r_arm.filled = True
r_arm.fill_color = 'darkcyan'
r_arm.color = 'darkcyan'
window.add(r_arm)
l_arm = GOval(100, 45, x=175, y=98)
l_arm.filled = True
l_arm.fill_color = 'darkcyan'
l_arm.color = 'darkcyan'
window.add(l_arm)
body = GOval(200, 160, x=(window.width - 200) / 2, y=95)
body.filled = True
body.fill_color = 'darkcyan'
body.color = 'darkcyan'
window.add(body)
belly = GOval(176, 120, x=(window.width - 176) / 2, y=95)
belly.filled = True
belly.fill_color = 'lightsalmon'
window.add(belly)
r_claw1 = GPolygon()
r_claw1.add_vertex((38, 100))
r_claw1.add_vertex((44, 102))
r_claw1.add_vertex((40, 106))
r_claw1.filled = True
r_claw1.fill_color = 'white'
window.add(r_claw1)
r_claw2 = GPolygon()
r_claw2.add_vertex((32, 102))
r_claw2.add_vertex((38, 104))
r_claw2.add_vertex((35, 108))
r_claw2.filled = True
r_claw2.fill_color = 'white'
window.add(r_claw2)
r_claw3 = GPolygon()
r_claw3.add_vertex((28, 104))
r_claw3.add_vertex((34, 106))
r_claw3.add_vertex((31, 110))
r_claw3.filled = True
r_claw3.fill_color = 'white'
window.add(r_claw3)
r_claw4 = GPolygon()
r_claw4.add_vertex((24, 109))
r_claw4.add_vertex((30, 111))
r_claw4.add_vertex((27, 115))
r_claw4.filled = True
r_claw4.fill_color = 'white'
window.add(r_claw4)
r_claw5 = GPolygon()
r_claw5.add_vertex((19, 122))
r_claw5.add_vertex((25, 121))
r_claw5.add_vertex((28, 127))
r_claw5.filled = True
r_claw5.fill_color = 'white'
window.add(r_claw5)
l_claw1 = GPolygon()
l_claw1.add_vertex((262, 100))
l_claw1.add_vertex((256, 102))
l_claw1.add_vertex((260, 106))
l_claw1.filled = True
l_claw1.fill_color = 'white'
window.add(l_claw1)
l_claw2 = GPolygon()
l_claw2.add_vertex((268, 102))
l_claw2.add_vertex((262, 104))
l_claw2.add_vertex((265, 108))
l_claw2.filled = True
l_claw2.fill_color = 'white'
window.add(l_claw2)
l_claw3 = GPolygon()
l_claw3.add_vertex((272, 104))
l_claw3.add_vertex((266, 106))
l_claw3.add_vertex((269, 110))
l_claw3.filled = True
l_claw3.fill_color = 'white'
window.add(l_claw3)
r_claw4 = GPolygon()
r_claw4.add_vertex((276, 109))
r_claw4.add_vertex((270, 111))
r_claw4.add_vertex((273, 115))
r_claw4.filled = True
r_claw4.fill_color = 'white'
window.add(r_claw4)
r_claw5 = GPolygon()
r_claw5.add_vertex((281, 122))
r_claw5.add_vertex((275, 121))
r_claw5.add_vertex((272, 127))
r_claw5.filled = True
r_claw5.fill_color = 'white'
window.add(r_claw5)
r_foot = GOval(65, 60, x=50, y=220)
r_foot.filled = True
r_foot.fill_color = 'lightsalmon'
r_foot.color = 'lightsalmon'
window.add(r_foot)
r_palm = GOval(45, 40, x=65, y=235)
r_palm.filled = True
r_palm.fill_color = 'Chocolate'
r_palm.color = 'Chocolate'
window.add(r_palm)
r_nail1 = GPolygon()
r_nail1.add_vertex((80, 210))
r_nail1.add_vertex((88, 223))
r_nail1.add_vertex((78, 224))
r_nail1.filled = True
r_nail1.fill_color = 'white'
window.add(r_nail1)
r_nail2 = GPolygon()
r_nail2.add_vertex((52, 220))
r_nail2.add_vertex((65, 228))
r_nail2.add_vertex((57, 235))
r_nail2.filled = True
r_nail2.fill_color = 'white'
window.add(r_nail2)
r_nail3 = GPolygon()
r_nail3.add_vertex((43, 250))
r_nail3.add_vertex((54, 248))
r_nail3.add_vertex((52, 258))
r_nail3.filled = True
r_nail3.fill_color = 'white'
window.add(r_nail3)
l_foot = GOval(65, 60, x=185, y=220)
l_foot.filled = True
l_foot.fill_color = 'lightsalmon'
l_foot.color = 'lightsalmon'
window.add(l_foot)
l_palm = GOval(45, 40, x=190, y=235)
l_palm.filled = True
l_palm.fill_color = 'Chocolate'
l_palm.color = 'Chocolate'
window.add(l_palm)
l_nail1 = GPolygon()
l_nail1.add_vertex((220, 210))
l_nail1.add_vertex((212, 223))
l_nail1.add_vertex((222, 224))
l_nail1.filled = True
l_nail1.fill_color = 'white'
window.add(l_nail1)
r_nail2 = GPolygon()
r_nail2.add_vertex((248, 220))
r_nail2.add_vertex((235, 228))
r_nail2.add_vertex((243, 235))
r_nail2.filled = True
r_nail2.fill_color = 'white'
window.add(r_nail2)
r_nail3 = GPolygon()
r_nail3.add_vertex((257, 250))
r_nail3.add_vertex((246, 248))
r_nail3.add_vertex((248, 258))
r_nail3.filled = True
r_nail3.fill_color = 'white'
window.add(r_nail3)
word = GLabel('stanCode', x=123, y=185)
word.font = '-8-bold'
window.add(word)
bubble1 = GOval(10, 10, x=140, y=35)
window.add(bubble1)
bubble2 = GOval(15, 15, x=155, y=23)
window.add(bubble2)
bubble3 = GOval(20, 20, x=175, y=12)
window.add(bubble3)
bubble4 = GOval(95, 85, x=200, y=5)
window.add(bubble4)
word2 = GLabel('Python', x=207, y=50)
word2.font = 'Courier-18'
window.add(word2)
word3 = GLabel('Python', x=220, y=80)
word3.font = 'Courier-13'
window.add(word3)
word4 = GLabel('Python', x=242, y=60)
word4.font = 'Courier-8'
window.add(word4)
if __name__ == '__main__':
main()
| 28.910156 | 88 | 0.638427 |
f7e7736eb2b76396a07e8f09a10926efaa231ede | 748 | py | Python | kivy/core/clipboard/clipboard_xsel.py | CharaD7/kivy | 85065fe6633f5ac831c193dc84e3f636b789cc3a | [
"MIT"
] | 2 | 2021-05-16T09:46:14.000Z | 2021-11-17T11:23:15.000Z | kivy/core/clipboard/clipboard_xsel.py | CharaD7/kivy | 85065fe6633f5ac831c193dc84e3f636b789cc3a | [
"MIT"
] | 1 | 2016-11-11T13:45:42.000Z | 2016-11-11T13:45:42.000Z | kivy/core/clipboard/clipboard_xsel.py | CharaD7/kivy | 85065fe6633f5ac831c193dc84e3f636b789cc3a | [
"MIT"
] | 2 | 2017-03-09T14:27:03.000Z | 2019-05-03T08:36:02.000Z | '''
Clipboard xsel: an implementation of the Clipboard using xsel command line tool.
'''
__all__ = ('ClipboardXsel', )
from kivy.utils import platform
from kivy.core.clipboard._clipboard_ext import ClipboardExternalBase
if platform != 'linux':
raise SystemError('unsupported platform for xsel clipboard')
try:
import subprocess
p = subprocess.Popen(['xsel'], stdout=subprocess.PIPE)
p.communicate()
except:
raise
| 24.933333 | 80 | 0.67246 |
f7ea40e807af6204059adeba1056db95e63b5bcf | 492 | py | Python | plugins/hashsum_download/girder_hashsum_download/settings.py | JKitok/girder | 317962d155fc9811d25e5f33bd3e849c4ac96645 | [
"Apache-2.0"
] | 395 | 2015-01-12T19:20:13.000Z | 2022-03-30T05:40:40.000Z | plugins/hashsum_download/girder_hashsum_download/settings.py | JKitok/girder | 317962d155fc9811d25e5f33bd3e849c4ac96645 | [
"Apache-2.0"
] | 2,388 | 2015-01-01T20:09:19.000Z | 2022-03-29T16:49:14.000Z | plugins/hashsum_download/girder_hashsum_download/settings.py | JKitok/girder | 317962d155fc9811d25e5f33bd3e849c4ac96645 | [
"Apache-2.0"
] | 177 | 2015-01-04T14:47:00.000Z | 2022-03-25T09:01:51.000Z | from girder.exceptions import ValidationException
from girder.utility import setting_utilities
| 27.333333 | 85 | 0.802846 |
f7ea6e1ab40e2fa5eea55fc79f11b658b6c35f7e | 44,837 | py | Python | forager_server/forager_server_api/views.py | jeremyephron/forager | 6db1590686e0e34b2e42ff5deb70f62fcee73d7d | [
"MIT"
] | 1 | 2020-12-01T23:25:58.000Z | 2020-12-01T23:25:58.000Z | forager_server/forager_server_api/views.py | jeremyephron/forager | 6db1590686e0e34b2e42ff5deb70f62fcee73d7d | [
"MIT"
] | 2 | 2020-10-07T01:03:06.000Z | 2020-10-12T19:08:55.000Z | forager_server/forager_server_api/views.py | jeremyephron/forager | 6db1590686e0e34b2e42ff5deb70f62fcee73d7d | [
"MIT"
] | null | null | null | from collections import defaultdict, namedtuple
from dataclasses import dataclass
import distutils.util
import functools
import itertools
import json
import math
import operator
import os
import random
import uuid
import shutil
import logging
import time
from typing import List, Dict, NamedTuple, Optional
from django.db.models import Q
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
from django.shortcuts import get_object_or_404, get_list_or_404
from django.conf import settings
from google.cloud import storage
from rest_framework.decorators import api_view
import requests
from expiringdict import ExpiringDict
from .models import (
Dataset,
DatasetItem,
Category,
Mode,
User,
Annotation,
DNNModel,
CategoryCount,
)
BUILTIN_MODES = ["POSITIVE", "NEGATIVE", "HARD_NEGATIVE", "UNSURE"]
logger = logging.getLogger(__name__)
#
# V2 ENDPOINTS
# TODO(mihirg): Make these faster
#
Tag = namedtuple("Tag", "category value") # type: NamedTuple[str, str]
Box = namedtuple(
"Box", "category value x1 y1 x2 y2"
) # type: NamedTuple[str, str, float, float, float, float]
PkType = int
# TODO(fpoms): this needs to be wrapped in a lock so that
# updates are atomic across concurrent requests
current_result_sets = ExpiringDict(
max_age_seconds=30 * 60,
max_len=50,
) # type: Dict[str, ResultSet]
#
# ACTIVE VALIDATION
#
VAL_NEGATIVE_TYPE = "model_val_negative"
# DATASET INFO
def model_info(model):
if model is None:
return None
pos_tags = parse_tag_set_from_query_v2(model.category_spec.get("pos_tags", []))
neg_tags = parse_tag_set_from_query_v2(model.category_spec.get("neg_tags", []))
augment_negs_include = parse_tag_set_from_query_v2(
model.category_spec.get("augment_negs_include", [])
)
return {
"model_id": model.model_id,
"timestamp": model.last_updated,
"has_checkpoint": model.checkpoint_path is not None,
"has_output": model.output_directory is not None,
"pos_tags": serialize_tag_set_for_client_v2(pos_tags),
"neg_tags": serialize_tag_set_for_client_v2(neg_tags | augment_negs_include),
"augment_negs": model.category_spec.get("augment_negs", False),
"epoch": model.epoch,
}
def bulk_add_single_tag_annotations_v2(payload, images):
'''Adds annotations for a single tag to many dataset items'''
if not images:
return 0
user_email = payload["user"]
category_name = payload["category"]
mode_name = payload["mode"]
created_by = payload.get("created_by",
"tag" if len(images) == 1 else "tag-bulk")
dataset = None
if len(images) > 0:
dataset = images[0].dataset
user, _ = User.objects.get_or_create(email=user_email)
category, _ = Category.objects.get_or_create(name=category_name)
mode, _ = Mode.objects.get_or_create(name=mode_name)
Annotation.objects.filter(
dataset_item__in=images, category=category, is_box=False).delete()
# TODO: Add an actual endpoint to delete annotations (probably by pk); don't rely
# on this hacky "TOMBSTONE" string
annotations = [
Annotation(
dataset_item=di,
user=user,
category=category,
mode=mode,
is_box=False,
misc_data={"created_by": created_by},
)
for di in images
]
bulk_add_annotations_v2(dataset, annotations)
return len(annotations)
def bulk_add_multi_annotations_v2(payload : Dict):
'''Adds multiple annotations for the same dataset and user to the database
at once'''
dataset_name = payload["dataset"]
dataset = get_object_or_404(Dataset, name=dataset_name)
user_email = payload["user"]
user, _ = User.objects.get_or_create(email=user_email)
created_by = payload.get("created_by",
"tag" if len(payload["annotations"]) == 1 else
"tag-bulk")
# Get pks
idents = [ann['identifier'] for ann in payload["annotations"]
if 'identifier' in ann]
di_pks = list(DatasetItem.objects.filter(
dataset=dataset, identifier__in=idents
).values_list("pk", "identifier"))
ident_to_pk = {ident: pk for pk, ident in di_pks}
cats = {}
modes = {}
to_delete = defaultdict(set)
annotations = []
for ann in payload["annotations"]:
db_ann = Annotation()
category_name = ann["category"]
mode_name = ann["mode"]
if category_name not in cats:
cats[category_name] = Category.objects.get_or_create(
name=category_name)[0]
if mode_name not in modes:
modes[mode_name] = Mode.objects.get_or_create(
name=mode_name)[0]
if "identifier" in ann:
pk = ident_to_pk[ann["identifier"]]
else:
pk = ann["pk"]
db_ann.dataset_item_id = pk
db_ann.user = user
db_ann.category = cats[category_name]
db_ann.mode = modes[mode_name]
db_ann.is_box = ann.get("is_box", False)
if db_ann.is_box:
db_ann.bbox_x1 = ann["x1"]
db_ann.bbox_y1 = ann["y1"]
db_ann.bbox_x2 = ann["x2"]
db_ann.bbox_y2 = ann["y2"]
else:
to_delete[db_ann.category].add(pk)
db_ann.misc_data={"created_by": created_by}
annotations.append(db_ann)
for cat, pks in to_delete.items():
# Delete per-frame annotations for the category if they exist since
# we should only have on mode per image
Annotation.objects.filter(
category=cat, dataset_item_id__in=pks, is_box=False).delete()
# TODO: Add an actual endpoint to delete annotations (probably by pk); don't rely
# on this hacky "TOMBSTONE" string
bulk_add_annotations_v2(dataset, annotations)
return len(annotations)
def bulk_add_annotations_v2(dataset, annotations):
'''Handles book keeping for adding many annotations at once'''
Annotation.objects.bulk_create(annotations)
counts = defaultdict(int)
for ann in annotations:
counts[(ann.category, ann.mode)] += 1
for (cat, mode), count in counts.items():
category_count, _ = CategoryCount.objects.get_or_create(
dataset=dataset,
category=cat,
mode=mode
)
category_count.count += count
category_count.save()
| 31.072072 | 89 | 0.657872 |
f7eab2118d85cfe10c666d128c82a3c415e87f34 | 2,632 | py | Python | ccmlib/cluster_factory.py | justinchuch/ccm | 808b6ca13526785b0fddfe1ead2383c060c4b8b6 | [
"Apache-2.0"
] | 626 | 2015-01-01T18:11:03.000Z | 2017-12-19T00:06:49.000Z | ccmlib/cluster_factory.py | justinchuch/ccm | 808b6ca13526785b0fddfe1ead2383c060c4b8b6 | [
"Apache-2.0"
] | 358 | 2015-01-21T17:06:45.000Z | 2017-12-20T16:03:01.000Z | ccmlib/cluster_factory.py | justinchuch/ccm | 808b6ca13526785b0fddfe1ead2383c060c4b8b6 | [
"Apache-2.0"
] | 172 | 2015-01-02T21:40:45.000Z | 2017-12-19T20:17:49.000Z |
from __future__ import absolute_import
import os
import yaml
from ccmlib import common, extension, repository
from ccmlib.cluster import Cluster
from ccmlib.dse_cluster import DseCluster
from ccmlib.node import Node
from distutils.version import LooseVersion #pylint: disable=import-error, no-name-in-module
| 39.878788 | 150 | 0.628799 |
f7eb16ad3bcd19920bd13a45530065dd321f93c0 | 9,872 | py | Python | causalnex/structure/pytorch/dist_type/_base.py | Rishab26/causalnex | 127d9324a3d68c1795299c7522f22cdea880f344 | [
"Apache-2.0"
] | 1,523 | 2020-01-28T12:37:48.000Z | 2022-03-31T09:27:58.000Z | causalnex/structure/pytorch/dist_type/_base.py | Rishab26/causalnex | 127d9324a3d68c1795299c7522f22cdea880f344 | [
"Apache-2.0"
] | 124 | 2020-01-28T15:12:07.000Z | 2022-03-31T18:59:16.000Z | causalnex/structure/pytorch/dist_type/_base.py | Rishab26/causalnex | 127d9324a3d68c1795299c7522f22cdea880f344 | [
"Apache-2.0"
] | 169 | 2020-01-28T15:13:53.000Z | 2022-03-30T21:04:02.000Z | # Copyright 2019-2020 QuantumBlack Visual Analytics Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND
# NONINFRINGEMENT. IN NO EVENT WILL THE LICENSOR OR OTHER CONTRIBUTORS
# BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF, OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# The QuantumBlack Visual Analytics Limited ("QuantumBlack") name and logo
# (either separately or in combination, "QuantumBlack Trademarks") are
# trademarks of QuantumBlack. The License does not grant you any right or
# license to the QuantumBlack Trademarks. You may not use the QuantumBlack
# Trademarks or any confusingly similar mark as a trademark for your product,
# or use the QuantumBlack Trademarks in any other manner that might cause
# confusion in the marketplace, including but not limited to in advertising,
# on websites, or on software.
#
# See the License for the specific language governing permissions and
# limitations under the License.
"""
``causalnex.pytorch.dist_type._base`` defines the distribution type class interface and default behavior.
"""
import itertools
from abc import ABCMeta, abstractmethod
from copy import deepcopy
from typing import Dict, List, Tuple
import numpy as np
import torch
from causalnex.structure.structuremodel import StructureModel
| 31.742765 | 105 | 0.630976 |
f7ec17b78bb1ba2ad0135e9a1b1bf5b7c8916ff3 | 4,225 | py | Python | src/cmdsh/utils.py | kotfu/cmdsh | c9083793de9117e4c5c4dfcccdeee1b83a0be7ab | [
"MIT"
] | null | null | null | src/cmdsh/utils.py | kotfu/cmdsh | c9083793de9117e4c5c4dfcccdeee1b83a0be7ab | [
"MIT"
] | null | null | null | src/cmdsh/utils.py | kotfu/cmdsh | c9083793de9117e4c5c4dfcccdeee1b83a0be7ab | [
"MIT"
] | null | null | null | #
# -*- coding: utf-8 -*-
#
# Copyright (c) 2019 Jared Crapo
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
"""
Utility functions (not classes)
"""
import inspect
import types
from typing import Callable
def validate_callable_param_count(func: Callable, count: int) -> None:
"""Ensure a function has the given number of parameters."""
signature = inspect.signature(func)
# validate that the callable has the right number of parameters
nparam = len(signature.parameters)
if nparam != count:
raise TypeError('{} has {} positional arguments, expected {}'.format(
func.__name__,
nparam,
count,
))
def validate_callable_argument(func, argnum, typ) -> None:
"""Validate that a certain argument of func is annotated for a specific type"""
signature = inspect.signature(func)
paramname = list(signature.parameters.keys())[argnum-1]
param = signature.parameters[paramname]
if param.annotation != typ:
raise TypeError('argument {} of {} has incompatible type {}, expected {}'.format(
argnum,
func.__name__,
param.annotation,
typ.__name__,
))
def validate_callable_return(func, typ) -> None:
"""Validate that func is annotated to return a specific type"""
signature = inspect.signature(func)
if typ:
typname = typ.__name__
else:
typname = 'None'
if signature.return_annotation != typ:
raise TypeError("{} must declare return a return type of '{}'".format(
func.__name__,
typname,
))
def rebind_method(method, obj) -> None:
"""Rebind method from one object to another
Call it something like this:
rebind_method(obj1, obj2.do_command)
This rebinds the ``do_command`` method from obj2 to obj1. Meaning
after this function call you can:
obj1.do_command()
This works only on instantiated objects, not on classes.
"""
#
# this is dark python magic
#
# if we were doing this in a hardcoded way, we might do:
#
# obj.method_name = types.MethodType(self.method_name.__func__, obj)
#
# TODO add force keyword parameter which defaults to false. If false, raise an
# exception if the method already exists on obj
method_name = method.__name__
setattr(obj, method_name, types.MethodType(method.__func__, obj))
def bind_function(func, obj) -> None:
"""Bind a function to an object
You must define func with a ``self`` parameter, which is gonna look wierd:
def myfunc(self, param):
return param
shell = cmdsh.Shell()
utils.bind_function(myfunc, shell)
You can use this function to bind a function to a class, so that all future
objects of that class have the method:
cmdsh.utils.bind_function(cmdsh.parsers.SimpleParser.parse, cmdsh.Shell)
"""
#
# this is dark python magic
#
# if we were doing this in a hardcoded way, we would:
#
# obj.method_name = types.Methodtype(func, obj)
#
func_name = func.__name__
setattr(obj, func_name, types.MethodType(func, obj))
# TODO write bind_attribute()
| 32.5 | 89 | 0.680947 |
f7ecb294c442659591e90f954f3dc3437349ef17 | 4,992 | py | Python | tensorflow/python/tpu/tpu_outside_compilation_test.py | Arushacked/tensorflow | 9abd61ae0b2d239d3060cdd3d46b54a105159828 | [
"Apache-2.0"
] | 78 | 2020-08-04T12:36:25.000Z | 2022-03-25T04:23:40.000Z | tensorflow/python/tpu/tpu_outside_compilation_test.py | Arushacked/tensorflow | 9abd61ae0b2d239d3060cdd3d46b54a105159828 | [
"Apache-2.0"
] | 2 | 2021-11-10T20:08:14.000Z | 2022-02-10T02:44:26.000Z | tensorflow/python/tpu/tpu_outside_compilation_test.py | Arushacked/tensorflow | 9abd61ae0b2d239d3060cdd3d46b54a105159828 | [
"Apache-2.0"
] | 25 | 2020-08-31T12:21:19.000Z | 2022-03-20T05:16:32.000Z | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TPU outside compilation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.distribute import tpu_strategy as tpu_lib
from tensorflow.python.distribute.cluster_resolver import tpu_cluster_resolver
from tensorflow.python.eager import def_function
from tensorflow.python.eager import remote
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import flags
from tensorflow.python.tpu import tpu
from tensorflow.python.tpu import tpu_strategy_util
FLAGS = flags.FLAGS
flags.DEFINE_string("tpu", "", "Name of TPU to connect to.")
flags.DEFINE_string("project", None, "Name of GCP project with TPU.")
flags.DEFINE_string("zone", None, "Name of GCP zone with TPU.")
if __name__ == "__main__":
test.main()
| 29.192982 | 80 | 0.698317 |
f7eccedc6580e295788f95c53fa5d25556b9e059 | 1,338 | py | Python | Source/Oyooni/Text Recognition/server.py | Oyooni5245/Oyooni | a00b845ac97eaee74d40cab563b9532fdeca97c8 | [
"MIT"
] | null | null | null | Source/Oyooni/Text Recognition/server.py | Oyooni5245/Oyooni | a00b845ac97eaee74d40cab563b9532fdeca97c8 | [
"MIT"
] | null | null | null | Source/Oyooni/Text Recognition/server.py | Oyooni5245/Oyooni | a00b845ac97eaee74d40cab563b9532fdeca97c8 | [
"MIT"
] | null | null | null | from flask import Flask, request
from flask_restful import Resource, Api
from test import get_models, getTextFromImage
from testDocument import getText
from time import time
app = Flask(__name__)
api = Api(app)
net, refine_net = get_models()
api.add_resource(TextRecognizerService, "/recognize-text")
if __name__ == "__main__":
port = 5006
app.run(debug=True, port=port)
| 26.76 | 73 | 0.523916 |
f7ee1b4e15755381cc1c76d8d915f30011f727a3 | 17,132 | py | Python | varats/varats/plots/blame_interaction_graph_plots.py | Kaufi-Jonas/VaRA-Tool-Suite | 31563896ad7dd1c1a147202b0c5c9fffe772b803 | [
"BSD-2-Clause"
] | null | null | null | varats/varats/plots/blame_interaction_graph_plots.py | Kaufi-Jonas/VaRA-Tool-Suite | 31563896ad7dd1c1a147202b0c5c9fffe772b803 | [
"BSD-2-Clause"
] | null | null | null | varats/varats/plots/blame_interaction_graph_plots.py | Kaufi-Jonas/VaRA-Tool-Suite | 31563896ad7dd1c1a147202b0c5c9fffe772b803 | [
"BSD-2-Clause"
] | null | null | null | """Module for BlameInteractionGraph plots."""
import typing as tp
from datetime import datetime
from pathlib import Path
import click
import matplotlib.pyplot as plt
import networkx as nx
import pandas as pd
import plotly.offline as offply
from matplotlib import style
from varats.data.reports.blame_interaction_graph import (
create_blame_interaction_graph,
CIGNodeAttrs,
CIGEdgeAttrs,
AIGNodeAttrs,
CAIGNodeAttrs,
)
from varats.data.reports.blame_report import BlameReport
from varats.mapping.commit_map import get_commit_map
from varats.paper_mgmt.case_study import (
newest_processed_revision_for_case_study,
)
from varats.plot.plot import Plot, PlotDataEmpty
from varats.plot.plots import (
PlotGenerator,
REQUIRE_CASE_STUDY,
REQUIRE_REVISION,
)
from varats.plots.chord_plot_utils import (
make_chord_plot,
make_arc_plot,
NodeTy,
ChordPlotNodeInfo,
ChordPlotEdgeInfo,
ArcPlotEdgeInfo,
ArcPlotNodeInfo,
)
from varats.ts_utils.cli_util import CLIOptionTy, make_cli_option
from varats.utils.git_util import (
CommitRepoPair,
create_commit_lookup_helper,
UNCOMMITTED_COMMIT_HASH,
FullCommitHash,
ShortCommitHash,
)
NodeInfoTy = tp.TypeVar("NodeInfoTy", ChordPlotNodeInfo, ArcPlotNodeInfo)
EdgeInfoTy = tp.TypeVar("EdgeInfoTy", ChordPlotEdgeInfo, ArcPlotEdgeInfo)
OPTIONAL_SORT_METHOD: CLIOptionTy = make_cli_option(
"--sort-by",
type=click.Choice(["degree", "time"]),
default="degree",
required=False,
help="Sort method for commit interaction graph nodes."
)
| 33.330739 | 80 | 0.627714 |
f7ef21c429f9bf83356bf40d0aaa0462acb403b0 | 2,632 | py | Python | Day 7/Day 7.py | Dullstar/Advent-Of-Code-2020 | 7d3a64906ced2ac98bcfe67a9f3294c8756dc493 | [
"MIT"
] | null | null | null | Day 7/Day 7.py | Dullstar/Advent-Of-Code-2020 | 7d3a64906ced2ac98bcfe67a9f3294c8756dc493 | [
"MIT"
] | null | null | null | Day 7/Day 7.py | Dullstar/Advent-Of-Code-2020 | 7d3a64906ced2ac98bcfe67a9f3294c8756dc493 | [
"MIT"
] | null | null | null | import re
if __name__ == "__main__":
main()
| 33.74359 | 106 | 0.56383 |
f7efbdb4f4f2e1681183c05075e6b958502a3563 | 83,010 | py | Python | sdk/python/pulumi_aws_native/apigateway/outputs.py | AaronFriel/pulumi-aws-native | 5621690373ac44accdbd20b11bae3be1baf022d1 | [
"Apache-2.0"
] | 29 | 2021-09-30T19:32:07.000Z | 2022-03-22T21:06:08.000Z | sdk/python/pulumi_aws_native/apigateway/outputs.py | AaronFriel/pulumi-aws-native | 5621690373ac44accdbd20b11bae3be1baf022d1 | [
"Apache-2.0"
] | 232 | 2021-09-30T19:26:26.000Z | 2022-03-31T23:22:06.000Z | sdk/python/pulumi_aws_native/apigateway/outputs.py | AaronFriel/pulumi-aws-native | 5621690373ac44accdbd20b11bae3be1baf022d1 | [
"Apache-2.0"
] | 4 | 2021-11-10T19:42:01.000Z | 2022-02-05T10:15:49.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._enums import *
__all__ = [
'ApiKeyStageKey',
'ApiKeyTag',
'ClientCertificateTag',
'DeploymentAccessLogSetting',
'DeploymentCanarySetting',
'DeploymentCanarySettings',
'DeploymentMethodSetting',
'DeploymentStageDescription',
'DeploymentTag',
'DocumentationPartLocation',
'DomainNameEndpointConfiguration',
'DomainNameMutualTlsAuthentication',
'DomainNameTag',
'MethodIntegration',
'MethodIntegrationResponse',
'MethodResponse',
'RestApiEndpointConfiguration',
'RestApiS3Location',
'RestApiTag',
'StageAccessLogSetting',
'StageCanarySetting',
'StageMethodSetting',
'StageTag',
'UsagePlanApiStage',
'UsagePlanQuotaSettings',
'UsagePlanTag',
'UsagePlanThrottleSettings',
'VpcLinkTag',
]
| 42.85493 | 389 | 0.655427 |
f7f110d1e3f278e009edf38f3492952620bab08d | 619 | py | Python | bin/training_data/redmagic_ds_training_data.py | mclaughlin6464/pearce | 746f2bf4bf45e904d66996e003043661a01423ba | [
"MIT"
] | null | null | null | bin/training_data/redmagic_ds_training_data.py | mclaughlin6464/pearce | 746f2bf4bf45e904d66996e003043661a01423ba | [
"MIT"
] | 16 | 2016-11-04T22:24:32.000Z | 2018-05-01T22:53:39.000Z | bin/training_data/redmagic_ds_training_data.py | mclaughlin6464/pearce | 746f2bf4bf45e904d66996e003043661a01423ba | [
"MIT"
] | 3 | 2016-10-04T08:07:52.000Z | 2019-05-03T23:50:01.000Z | #!/.conda/envs/hodemulator/bin/python
from pearce.emulator import make_training_data
from pearce.emulator import DEFAULT_PARAMS as ordered_params
ordered_params['f_c'] = (0.05, .5)
ordered_params['logMmin'] = (11.5, 13.0)#(13.0, 14.5)
ordered_params['sigma_logM'] = (0.05, 1.0)
ordered_params['logM1'] = (12.0, 15.0)
ordered_params['alpha'] = (0.8, 1.5)
ordered_params.update({'mean_occupation_centrals_assembias_param1':( -1.0, 1.0),
'mean_occupation_satellites_assembias_param1':( -1.0, 1.0)})
make_training_data('/u/ki/swmclau2/Git/pearce/bin/training_data/ds_redmagic.cfg',ordered_params)
| 38.6875 | 96 | 0.726979 |
f7f1a1740efc36292fbb917d24b84a88544cbd25 | 40,478 | py | Python | src/legohdl/workspace.py | c-rus/legoHDL | d7d77c05514c8d6dc1070c4efe589f392307daac | [
"MIT"
] | 6 | 2021-12-16T05:40:37.000Z | 2022-02-07T15:04:39.000Z | src/legohdl/workspace.py | c-rus/legoHDL | d7d77c05514c8d6dc1070c4efe589f392307daac | [
"MIT"
] | 61 | 2021-09-28T03:05:13.000Z | 2022-01-16T00:03:14.000Z | src/legohdl/workspace.py | c-rus/legoHDL | d7d77c05514c8d6dc1070c4efe589f392307daac | [
"MIT"
] | 1 | 2021-12-16T07:03:18.000Z | 2021-12-16T07:03:18.000Z | # ------------------------------------------------------------------------------
# Project: legohdl
# Script: workspace.py
# Author: Chase Ruskin
# Description:
# The Workspace class. A Workspace object has a path and a list of available
# vendors. This is what the user keeps their work's scope within for a given
# "organization".
# ------------------------------------------------------------------------------
import os, shutil, glob
import logging as log
from datetime import datetime
from .vendor import Vendor
from .apparatus import Apparatus as apt
from .cfg import Cfg, Section, Key
from .map import Map
from .git import Git
from .block import Block
def getPath(self):
'''Returns the local path where downloaded blocks are located (str).'''
return self._path
def getDir(self):
'''Returns the base hidden directory where the workspace data is kept (str).'''
return self._ws_dir
def getCachePath(self):
'''Returns the hidden directory where workspace installations are kept. (str).'''
return self.getDir()+"cache/"
def getName(self):
'''Returns the workspace's identifier (str).'''
return self._name
def isActive(self):
'''Returns is this workspace is the active workspace (bool).'''
return self == self.getActive()
def getVendors(self, returnnames=False, lowercase=True):
'''
Return the vendor objects associated with the given workspace.
Parameters:
returnnames (bool): true will return vendor names
lowercase (bool): true will return lower-case names if returnnames is enabled
Returns:
([Vendor]) or ([str]): list of available vendors
'''
if(returnnames):
vndr_names = []
for vndr in self._vendors:
name = vndr.getName()
if(lowercase):
name = name.lower()
vndr_names += [name]
return vndr_names
else:
return self._vendors
# uncomment to use for debugging
# def __str__(self):
# return f'''
# ID: {hex(id(self))}
# Name: {self.getName()}
# Path: {self.getPath()}
# Active: {self.isActive()}
# Hidden directory: {self.getDir()}
# Linked to: {self.isLinked()}
# Vendors: {self.getVendors(returnnames=True)}
# '''
pass | 38.079022 | 186 | 0.531424 |
f7f1c343e2c46298649ddf9fe556e96b2bec9514 | 3,871 | py | Python | ev_de.py | avinashmnit30/Electric-Vehicle-Optimal-Charging | 7f09bdbb9904285ddbbfeaa28cf402f7ef6f4cb4 | [
"BSD-3-Clause"
] | 7 | 2018-03-09T11:19:39.000Z | 2022-01-19T13:45:20.000Z | ev_de.py | avinashmnit30/Electric-Vehicle-Optimal-Charging | 7f09bdbb9904285ddbbfeaa28cf402f7ef6f4cb4 | [
"BSD-3-Clause"
] | null | null | null | ev_de.py | avinashmnit30/Electric-Vehicle-Optimal-Charging | 7f09bdbb9904285ddbbfeaa28cf402f7ef6f4cb4 | [
"BSD-3-Clause"
] | 1 | 2022-03-03T12:08:52.000Z | 2022-03-03T12:08:52.000Z | # -*- coding: utf-8 -*-
"""
Created on Wed Dec 16 18:01:24 2015
@author: Avinash
"""
import numpy as np
from numpy import *
import numpy
from math import *
import ev_charge_schedule_modification1 as ev
#import ev_charge_schedule.static as func1
#import ev_charge_schedule.dynamic as func2
import time
#from numba import double
from numba.decorators import autojit
func1=ev.static
func=autojit(func1)
mode=1
runs=1
maxiter=2000
F=0.5 # Mutation Factor between 0 to 2
CR=0.2 # Probability 1. Put 0.9 if parameters are dependent while 0.2 if parameters are independent(seperable)
N=40
D=100*24 # Number of particles
ev.global_var(var_set=0,N_veh=int(D/float(24)))
# boundary constraints
ub=numpy.random.random(size=(1,D))[0]
lb=numpy.random.random(size=(1,D))[0]
i=0
while i<D:
ub[i]=8.8
lb[i]=2.2
i+=1
fitness_val=numpy.zeros(shape=(runs,maxiter))
best_pos=numpy.zeros(shape=(runs,D))
for run_no in range(runs):
# target vector initializtion
x=numpy.random.uniform(size=(N,D))
i=0
while i<N:
j=0
while j<D:
x[i][j]=lb[j]+x[i][j]*(ub[j]-lb[j])
j+=1
i+=1
v=np.zeros_like(x) # donar vectors
u=np.zeros_like(x) # trail vector
g=numpy.zeros(shape=(1,D))[0] # best vector found so far
# target vector initial fitness evaluation
x_fit=numpy.random.uniform(size=(1,N))[0]
i=0
while i<N:
x_fit[i]=func(x[i],mode=mode)
i+=1
u_fit=np.zeros_like(x_fit)
j=0
i=1
while i<N:
if x_fit[j]>x_fit[i]:
j=i
i+=1
g_fit=x_fit[j]
g=x[j].copy()
time1=time.time()
it=0
while it<maxiter:
# Mutation stage
for i in range(N):
r1=i
while r1==i:
r1=np.random.randint(low=0,high=N)
r2=i
while r2==i or r2==r1:
r2=np.random.randint(low=0,high=N)
r3=i
while r3==i or r3==r1 or r3==r2:
r3=np.random.randint(low=0,high=N)
v[i]=x[r1]+(x[r2]-x[r3])*F
for j in range(D):
# if v[i][j]>ub[j]:
# v[i][j]=v[i][j]-(1+numpy.random.rand())*(v[i][j]-ub[j])
# if v[i][j]<lb[j]:
# v[i][j]=v[i][j]-(1+numpy.random.rand())*(v[i][j]-lb[j])
# if v[i][j]>ub[j]:
# v[i][j]=ub[j]
# if v[i][j]<lb[j]:
# v[i][j]=lb[j]
if v[i][j]>ub[j]:
#v[i][j]=v[i][j]-1.1*(v[i][j]-ub[j])
v[i][j]=lb[j]+numpy.random.random()*(ub[j]-lb[j])
if v[i][j]<lb[j]:
v[i][j]=lb[j]+numpy.random.random()*(ub[j]-lb[j])
#v[i][j]=v[i][j]-1.1*(v[i][j]-lb[j])
# Recombination stage
for i in range(N):
for j in range(D):
if np.random.random()<=CR or j==numpy.random.randint(0,D):
u[i][j]=v[i][j]
else:
u[i][j]=x[i][j]
# Selection stage
for i in range(N):
u_fit[i]=func(u[i],mode=mode)
if u_fit[i]<x_fit[i]:
x[i]=u[i].copy()
x_fit[i]=u_fit[i]
if u_fit[i]<g_fit:
g=u[i].copy()
g_fit=u_fit[i]
fitness_val[run_no][it]=g_fit
print it,g_fit
it+=1
best_pos[run_no]=g.copy()
time2=time.time()
print time2-time1
run_no+=1
numpy.savetxt("DE_fitness_d1_m2"+str(mode)+str(D)+".csv",fitness_val,delimiter=",")
numpy.savetxt("DE_bestpos_d1_m2"+str(mode)+str(D)+".csv",best_pos,delimiter=",")
| 29.105263 | 112 | 0.482046 |
f7f1da41a1909260bbd83fee7efec53538a5f960 | 775 | py | Python | var/spack/repos/builtin/packages/memaxes/package.py | xiki-tempula/spack | 9d66c05e93ab8a933fc59915040c0e0c86a4aac4 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 9 | 2018-04-18T07:51:40.000Z | 2021-09-10T03:56:57.000Z | var/spack/repos/builtin/packages/memaxes/package.py | xiki-tempula/spack | 9d66c05e93ab8a933fc59915040c0e0c86a4aac4 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 907 | 2018-04-18T11:17:57.000Z | 2022-03-31T13:20:25.000Z | var/spack/repos/builtin/packages/memaxes/package.py | xiki-tempula/spack | 9d66c05e93ab8a933fc59915040c0e0c86a4aac4 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 29 | 2018-11-05T16:14:23.000Z | 2022-02-03T16:07:09.000Z | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
| 31 | 93 | 0.672258 |
f7f61f99b14ff05744c7eb403d860339bcd27eae | 3,970 | py | Python | auth/decorators.py | dongboyan77/quay | 8018e5bd80f17e6d855b58b7d5f2792d92675905 | [
"Apache-2.0"
] | null | null | null | auth/decorators.py | dongboyan77/quay | 8018e5bd80f17e6d855b58b7d5f2792d92675905 | [
"Apache-2.0"
] | null | null | null | auth/decorators.py | dongboyan77/quay | 8018e5bd80f17e6d855b58b7d5f2792d92675905 | [
"Apache-2.0"
] | null | null | null | import logging
from functools import wraps
from flask import request, session
from prometheus_client import Counter
from auth.basic import validate_basic_auth
from auth.oauth import validate_bearer_auth
from auth.cookie import validate_session_cookie
from auth.signedgrant import validate_signed_grant
from util.http import abort
logger = logging.getLogger(__name__)
authentication_count = Counter(
"quay_authentication_attempts_total",
"number of authentication attempts accross the registry and API",
labelnames=["auth_kind", "success"],
)
def _auth_decorator(pass_result=False, handlers=None):
""" Builds an auth decorator that runs the given handlers and, if any return successfully,
sets up the auth context. The wrapped function will be invoked *regardless of success or
failure of the auth handler(s)*
"""
return processor
process_oauth = _auth_decorator(handlers=[validate_bearer_auth, validate_session_cookie])
process_auth = _auth_decorator(handlers=[validate_signed_grant, validate_basic_auth])
process_auth_or_cookie = _auth_decorator(handlers=[validate_basic_auth, validate_session_cookie])
process_basic_auth = _auth_decorator(handlers=[validate_basic_auth], pass_result=True)
process_basic_auth_no_pass = _auth_decorator(handlers=[validate_basic_auth])
def require_session_login(func):
""" Decorates a function and ensures that a valid session cookie exists or a 401 is raised. If
a valid session cookie does exist, the authenticated user and identity are also set.
"""
return wrapper
def extract_namespace_repo_from_session(func):
""" Extracts the namespace and repository name from the current session (which must exist)
and passes them into the decorated function as the first and second arguments. If the
session doesn't exist or does not contain these arugments, a 400 error is raised.
"""
return wrapper
| 35.446429 | 101 | 0.668766 |
f7f6435a685ce7599500c328cd1e055481aa5830 | 5,353 | py | Python | ddpm_proteins/utils.py | lucidrains/ddpm-proteins | 88bfacbd3cbdc4e38585fab420106f56e890c5f7 | [
"MIT"
] | 61 | 2021-06-14T16:41:54.000Z | 2022-03-23T14:09:46.000Z | ddpm_proteins/utils.py | lucidrains/ddpm-proteins | 88bfacbd3cbdc4e38585fab420106f56e890c5f7 | [
"MIT"
] | null | null | null | ddpm_proteins/utils.py | lucidrains/ddpm-proteins | 88bfacbd3cbdc4e38585fab420106f56e890c5f7 | [
"MIT"
] | 5 | 2021-06-15T11:51:47.000Z | 2022-03-18T08:01:48.000Z | import os
from PIL import Image
import seaborn as sn
import matplotlib.pyplot as plt
import torch
import torch.nn.functional as F
from sidechainnet.utils.sequence import ProteinVocabulary
from einops import rearrange
# general functions
# singleton msa transformer
msa_instances = None
# MSA embedding related functions
VOCAB = ProteinVocabulary()
# getting a single MSA attention embedding, with caching
CACHE_PATH = default(os.getenv('CACHE_PATH'), os.path.expanduser('~/.cache.ddpm-proteins'))
FETCH_FROM_CACHE = not exists(os.getenv('CLEAR_CACHE'))
os.makedirs(CACHE_PATH, exist_ok = True)
# training utils
| 29.092391 | 134 | 0.655707 |
f7f93aac7b9d793ef23c38a97b1f3ca8216eaa8d | 24,348 | py | Python | samples/python/efficientdet/create_onnx.py | L-Net-1992/TensorRT | 34b664d404001bd724cb56b52a6e0e05e1fd97f2 | [
"Apache-2.0"
] | null | null | null | samples/python/efficientdet/create_onnx.py | L-Net-1992/TensorRT | 34b664d404001bd724cb56b52a6e0e05e1fd97f2 | [
"Apache-2.0"
] | null | null | null | samples/python/efficientdet/create_onnx.py | L-Net-1992/TensorRT | 34b664d404001bd724cb56b52a6e0e05e1fd97f2 | [
"Apache-2.0"
] | null | null | null | #
# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import argparse
import logging
import tensorflow as tf
import onnx_graphsurgeon as gs
import numpy as np
import onnx
from onnx import shape_inference
from tf2onnx import tfonnx, optimizer, tf_loader
import onnx_utils
logging.basicConfig(level=logging.INFO)
logging.getLogger("EfficientDetGraphSurgeon").setLevel(logging.INFO)
log = logging.getLogger("EfficientDetGraphSurgeon")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-m", "--saved_model", required=True,
help="The TensorFlow saved model directory to load")
parser.add_argument("-o", "--onnx", required=True,
help="The output ONNX model file to write")
parser.add_argument("-f", "--input_format", default="NHWC", choices=["NHWC", "NCHW"],
help="Set the input data format of the graph, either NCHW or NHWC, default: NHWC")
parser.add_argument("-i", "--input_size", default="512,512",
help="Set the input shape of the graph, as a comma-separated dimensions in H,W format, "
"default: 512,512")
parser.add_argument("-p", "--preprocessor", default="imagenet", choices=["imagenet", "scale_range"],
help="Set the preprocessor to apply on the graph, either 'imagenet' for standard mean "
"subtraction and stdev normalization, or 'scale_range' for uniform [-1,+1] "
"normalization as is used in the AdvProp models, default: imagenet")
parser.add_argument("-t", "--nms_threshold", type=float,
help="Override the NMS score threshold, default: use the original value in the model")
parser.add_argument("-d", "--nms_detections", type=int,
help="Override the NMS max detections, default: use the original value in the model")
parser.add_argument("--tf2onnx",
help="The path where to save the intermediate ONNX graph generated by tf2onnx, useful"
"for graph debugging purposes, default: not saved")
args = parser.parse_args()
main(args)
| 53.986696 | 122 | 0.619065 |
f7f9d815fd74248ee87d991bd107aab15b47f8cc | 618 | py | Python | easy/867-transpose-matrix.py | wanglongjiang/leetcode | c61d2e719e81575cfb5bde9d64e15cee7cf01ef3 | [
"MIT"
] | 2 | 2021-03-14T11:38:26.000Z | 2021-03-14T11:38:30.000Z | easy/867-transpose-matrix.py | wanglongjiang/leetcode | c61d2e719e81575cfb5bde9d64e15cee7cf01ef3 | [
"MIT"
] | null | null | null | easy/867-transpose-matrix.py | wanglongjiang/leetcode | c61d2e719e81575cfb5bde9d64e15cee7cf01ef3 | [
"MIT"
] | 1 | 2022-01-17T19:33:23.000Z | 2022-01-17T19:33:23.000Z | '''
matrix matrix
'''
from typing import List
'''
m*nn*m
'''
s = Solution()
print(s.transpose([[1, 2, 3], [4, 5, 6], [7, 8, 9]]))
print(s.transpose([[1, 2, 3], [4, 5, 6]]))
| 20.6 | 68 | 0.548544 |
f7fa229686aa6986aa8b8f8a1dc2ccded74af095 | 5,940 | py | Python | adam_visual_perception/head_gaze_estimator.py | isi-vista/adam-visual-perception | 8ad6ed883b184b5407a1bf793617b226c78b3a13 | [
"MIT"
] | 1 | 2020-07-21T10:52:26.000Z | 2020-07-21T10:52:26.000Z | adam_visual_perception/head_gaze_estimator.py | isi-vista/adam-visual-perception | 8ad6ed883b184b5407a1bf793617b226c78b3a13 | [
"MIT"
] | null | null | null | adam_visual_perception/head_gaze_estimator.py | isi-vista/adam-visual-perception | 8ad6ed883b184b5407a1bf793617b226c78b3a13 | [
"MIT"
] | 2 | 2020-07-21T15:30:42.000Z | 2021-01-20T21:54:09.000Z | from adam_visual_perception import LandmarkDetector
from adam_visual_perception.utility import *
import numpy as np
import math
import cv2
import os
import sys
| 35.783133 | 87 | 0.458754 |
f7fa5e91400000b4953ab8022408df2a80e3be82 | 3,388 | py | Python | pypoca/cogs/general.py | leandcesar/PyPoca | 416f690faad0b511ca9d04b012af35256ee95089 | [
"MIT"
] | 1 | 2021-11-22T04:22:08.000Z | 2021-11-22T04:22:08.000Z | pypoca/cogs/general.py | leandcesar/PyPoca | 416f690faad0b511ca9d04b012af35256ee95089 | [
"MIT"
] | null | null | null | pypoca/cogs/general.py | leandcesar/PyPoca | 416f690faad0b511ca9d04b012af35256ee95089 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import disnake
from disnake.ext import commands
from pypoca.config import COLOR, URLS
from pypoca.database import Server
from pypoca.ext import ALL, DEFAULT, Choice, Option
| 47.055556 | 113 | 0.626328 |
f7fab2882ba44013b1ca7273273e6b041c1e46c3 | 1,301 | py | Python | costor_server/storage/api/views/authcheck.py | rphi/costor | 081de65778d404cf7a22c5524bf89a146fa8326b | [
"CNRI-Python"
] | 2 | 2019-12-31T16:49:36.000Z | 2021-02-17T09:47:41.000Z | costor_server/storage/api/views/authcheck.py | rphi/costor | 081de65778d404cf7a22c5524bf89a146fa8326b | [
"CNRI-Python"
] | null | null | null | costor_server/storage/api/views/authcheck.py | rphi/costor | 081de65778d404cf7a22c5524bf89a146fa8326b | [
"CNRI-Python"
] | null | null | null | from rest_framework.decorators import api_view, permission_classes
from rest_framework.parsers import MultiPartParser
from rest_framework.response import Response
from rest_framework import permissions
from rest_framework.exceptions import APIException
from rest_framework.decorators import parser_classes
from django.shortcuts import get_object_or_404
from manager.models import Agent
| 30.97619 | 117 | 0.704074 |
f7facb852a3db388a7c69659114114ea83276164 | 12,295 | py | Python | tensorflow_probability/python/experimental/mcmc/sample_fold.py | rupei/probability | 4aa1ee652853a19c4e80d39216c3fa535ed3e589 | [
"Apache-2.0"
] | null | null | null | tensorflow_probability/python/experimental/mcmc/sample_fold.py | rupei/probability | 4aa1ee652853a19c4e80d39216c3fa535ed3e589 | [
"Apache-2.0"
] | null | null | null | tensorflow_probability/python/experimental/mcmc/sample_fold.py | rupei/probability | 4aa1ee652853a19c4e80d39216c3fa535ed3e589 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Drivers for streaming reductions framework."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import warnings
# Dependency imports
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.experimental.mcmc import sample as exp_sample_lib
from tensorflow_probability.python.experimental.mcmc import sample_discarding_kernel
from tensorflow_probability.python.experimental.mcmc import tracing_reducer
from tensorflow_probability.python.experimental.mcmc import with_reductions
from tensorflow_probability.python.mcmc import sample
from tensorflow.python.util import nest # pylint: disable=g-direct-tensorflow-import
__all__ = [
'sample_chain',
'sample_fold',
]
def sample_fold(
num_steps,
current_state,
previous_kernel_results=None,
kernel=None,
reducer=None,
num_burnin_steps=0,
num_steps_between_results=0,
parallel_iterations=10,
seed=None,
name=None,
):
"""Computes the requested reductions over the `kernel`'s samples.
To wit, runs the given `kernel` for `num_steps` steps, and consumes
the stream of samples with the given `Reducer`s' `one_step` method(s).
This runs in constant memory (unless a given `Reducer` builds a
large structure).
The driver internally composes the correct onion of `WithReductions`
and `SampleDiscardingKernel` to implement the requested optionally
thinned reduction; however, the kernel results of those applied
Transition Kernels will not be returned. Hence, if warm-restarting
reductions is desired, one should manually build the Transition Kernel
onion and use `tfp.experimental.mcmc.step_kernel`.
An arbitrary collection of `reducer` can be provided, and the resulting
finalized statistic(s) will be returned in an identical structure.
Args:
num_steps: Integer or scalar `Tensor` representing the number of `Reducer`
steps.
current_state: `Tensor` or Python `list` of `Tensor`s representing the
current state(s) of the Markov chain(s).
previous_kernel_results: A `Tensor` or a nested collection of `Tensor`s.
Warm-start for the auxiliary state needed by the given `kernel`.
If not supplied, `sample_fold` will cold-start with
`kernel.bootstrap_results`.
kernel: An instance of `tfp.mcmc.TransitionKernel` which implements one step
of the Markov chain.
reducer: A (possibly nested) structure of `Reducer`s to be evaluated
on the `kernel`'s samples. If no reducers are given (`reducer=None`),
then `None` will be returned in place of streaming calculations.
num_burnin_steps: Integer or scalar `Tensor` representing the number
of chain steps to take before starting to collect results.
Defaults to 0 (i.e., no burn-in).
num_steps_between_results: Integer or scalar `Tensor` representing
the number of chain steps between collecting a result. Only one out
of every `num_steps_between_samples + 1` steps is included in the
returned results. Defaults to 0 (i.e., no thinning).
parallel_iterations: The number of iterations allowed to run in parallel. It
must be a positive integer. See `tf.while_loop` for more details.
seed: Optional seed for reproducible sampling.
name: Python `str` name prefixed to Ops created by this function.
Default value: `None` (i.e., 'mcmc_sample_fold').
Returns:
reduction_results: A (possibly nested) structure of finalized reducer
statistics. The structure identically mimics that of `reducer`.
end_state: The final state of the Markov chain(s).
final_kernel_results: `collections.namedtuple` of internal calculations
used to advance the supplied `kernel`. These results do not include
the kernel results of `WithReductions` or `SampleDiscardingKernel`.
"""
with tf.name_scope(name or 'mcmc_sample_fold'):
num_steps = tf.convert_to_tensor(
num_steps, dtype=tf.int32, name='num_steps')
current_state = tf.nest.map_structure(
lambda x: tf.convert_to_tensor(x, name='current_state'),
current_state)
reducer_was_none = False
if reducer is None:
reducer = []
reducer_was_none = True
reduction_kernel = with_reductions.WithReductions(
inner_kernel=sample_discarding_kernel.SampleDiscardingKernel(
inner_kernel=kernel,
num_burnin_steps=num_burnin_steps,
num_steps_between_results=num_steps_between_results),
reducer=reducer,
)
end_state, final_kernel_results = exp_sample_lib.step_kernel(
num_steps=num_steps,
current_state=current_state,
previous_kernel_results=previous_kernel_results,
kernel=reduction_kernel,
return_final_kernel_results=True,
parallel_iterations=parallel_iterations,
seed=seed,
name=name,
)
reduction_results = nest.map_structure_up_to(
reducer,
lambda r, s: r.finalize(s),
reducer,
final_kernel_results.streaming_calculations,
check_types=False)
if reducer_was_none:
reduction_results = None
return (reduction_results,
end_state,
final_kernel_results.inner_results.inner_results)
def sample_chain(
num_results,
current_state,
previous_kernel_results=None,
kernel=None,
num_burnin_steps=0,
num_steps_between_results=0,
trace_fn=_trace_kernel_results,
return_final_kernel_results=False,
parallel_iterations=10,
seed=None,
name=None,
):
"""Implements Markov chain Monte Carlo via repeated `TransitionKernel` steps.
This function samples from a Markov chain at `current_state` whose
stationary distribution is governed by the supplied `TransitionKernel`
instance (`kernel`).
This function can sample from multiple chains, in parallel. (Whether or not
there are multiple chains is dictated by the `kernel`.)
The `current_state` can be represented as a single `Tensor` or a `list` of
`Tensors` which collectively represent the current state.
Since MCMC states are correlated, it is sometimes desirable to produce
additional intermediate states, and then discard them, ending up with a set of
states with decreased autocorrelation. See [Owen (2017)][1]. Such 'thinning'
is made possible by setting `num_steps_between_results > 0`. The chain then
takes `num_steps_between_results` extra steps between the steps that make it
into the results. The extra steps are never materialized, and thus do not
increase memory requirements.
In addition to returning the chain state, this function supports tracing of
auxiliary variables used by the kernel. The traced values are selected by
specifying `trace_fn`. By default, all kernel results are traced but in the
future the default will be changed to no results being traced, so plan
accordingly. See below for some examples of this feature.
Args:
num_results: Integer number of Markov chain draws.
current_state: `Tensor` or Python `list` of `Tensor`s representing the
current state(s) of the Markov chain(s).
previous_kernel_results: A `Tensor` or a nested collection of `Tensor`s
representing internal calculations made within the previous call to this
function (or as returned by `bootstrap_results`).
kernel: An instance of `tfp.mcmc.TransitionKernel` which implements one step
of the Markov chain.
num_burnin_steps: Integer number of chain steps to take before starting to
collect results.
Default value: 0 (i.e., no burn-in).
num_steps_between_results: Integer number of chain steps between collecting
a result. Only one out of every `num_steps_between_samples + 1` steps is
included in the returned results. The number of returned chain states is
still equal to `num_results`. Default value: 0 (i.e., no thinning).
trace_fn: A callable that takes in the current chain state and the previous
kernel results and return a `Tensor` or a nested collection of `Tensor`s
that is then traced along with the chain state.
return_final_kernel_results: If `True`, then the final kernel results are
returned alongside the chain state and the trace specified by the
`trace_fn`.
parallel_iterations: The number of iterations allowed to run in parallel. It
must be a positive integer. See `tf.while_loop` for more details.
seed: Optional, a seed for reproducible sampling.
name: Python `str` name prefixed to Ops created by this function.
Default value: `None` (i.e., 'experimental_mcmc_sample_chain').
Returns:
checkpointable_states_and_trace: if `return_final_kernel_results` is
`True`. The return value is an instance of
`CheckpointableStatesAndTrace`.
all_states: if `return_final_kernel_results` is `False` and `trace_fn` is
`None`. The return value is a `Tensor` or Python list of `Tensor`s
representing the state(s) of the Markov chain(s) at each result step. Has
same shape as input `current_state` but with a prepended
`num_results`-size dimension.
states_and_trace: if `return_final_kernel_results` is `False` and
`trace_fn` is not `None`. The return value is an instance of
`StatesAndTrace`.
#### References
[1]: Art B. Owen. Statistically efficient thinning of a Markov chain sampler.
_Technical Report_, 2017.
http://statweb.stanford.edu/~owen/reports/bestthinning.pdf
"""
with tf.name_scope(name or 'experimental_mcmc_sample_chain'):
if not kernel.is_calibrated:
warnings.warn('supplied `TransitionKernel` is not calibrated. Markov '
'chain may not converge to intended target distribution.')
if trace_fn is None:
trace_fn = lambda *args: ()
no_trace = True
else:
no_trace = False
if trace_fn is sample_chain.__defaults__[4]:
warnings.warn('Tracing all kernel results by default is deprecated. Set '
'the `trace_fn` argument to None (the future default '
'value) or an explicit callback that traces the values '
'you are interested in.')
# `WithReductions` assumes all its reducers want to reduce over the
# immediate inner results of its kernel results. However,
# We don't care about the kernel results of `SampleDiscardingKernel`; hence,
# we evaluate the `trace_fn` on a deeper level of inner results.
trace_reducer = tracing_reducer.TracingReducer(
trace_fn=real_trace_fn,
size=num_results
)
trace_results, _, final_kernel_results = sample_fold(
num_steps=num_results,
current_state=current_state,
previous_kernel_results=previous_kernel_results,
kernel=kernel,
reducer=trace_reducer,
num_burnin_steps=num_burnin_steps,
num_steps_between_results=num_steps_between_results,
parallel_iterations=parallel_iterations,
seed=seed,
name=name,
)
all_states, trace = trace_results
if return_final_kernel_results:
return sample.CheckpointableStatesAndTrace(
all_states=all_states,
trace=trace,
final_kernel_results=final_kernel_results)
else:
if no_trace:
return all_states
else:
return sample.StatesAndTrace(all_states=all_states, trace=trace)
| 43.140351 | 85 | 0.727938 |
f7facc8714f2358ff5e4f5bf725d3516243bec69 | 10,025 | py | Python | algos/custom_ppo2.py | Ottawa-Autonomous-Vehicle-Group/learning-to-drive-in-5-minutes | fb82bc77593605711289e03f95dcfb6d3ea9e6c3 | [
"MIT"
] | 1 | 2020-08-02T20:47:44.000Z | 2020-08-02T20:47:44.000Z | algos/custom_ppo2.py | vijpandaturtle/learning-to-drive-in-5-minutes | fb82bc77593605711289e03f95dcfb6d3ea9e6c3 | [
"MIT"
] | null | null | null | algos/custom_ppo2.py | vijpandaturtle/learning-to-drive-in-5-minutes | fb82bc77593605711289e03f95dcfb6d3ea9e6c3 | [
"MIT"
] | null | null | null | import time
from collections import deque
import gym
import numpy as np
from stable_baselines import logger, PPO2
from stable_baselines.a2c.utils import total_episode_reward_logger
from stable_baselines.common import explained_variance, TensorboardWriter
from stable_baselines.common.runners import AbstractEnvRunner
from stable_baselines.ppo2.ppo2 import get_schedule_fn, safe_mean, swap_and_flatten
| 52.213542 | 121 | 0.572569 |
f7fafc3eca2a0d5f684ce78dbf8d565f8e0da8a0 | 787 | py | Python | craw/modules/trail/trails/feeds/urlvir.py | xuluhang/DomainBlockList | e9e69138ffdba6a73741fe204306f1f0b66eff19 | [
"MIT"
] | 19 | 2019-11-25T09:02:15.000Z | 2021-07-24T12:05:28.000Z | craw/modules/trail/trails/feeds/urlvir.py | xuluhang/DomainBlockList | e9e69138ffdba6a73741fe204306f1f0b66eff19 | [
"MIT"
] | 1 | 2019-11-25T09:06:08.000Z | 2019-11-25T09:06:08.000Z | craw/modules/trail/trails/feeds/urlvir.py | xuluhang/DomainBlockList | e9e69138ffdba6a73741fe204306f1f0b66eff19 | [
"MIT"
] | 10 | 2019-11-26T02:42:02.000Z | 2021-08-28T07:16:08.000Z | #!/usr/bin/env python2
"""
Copyright (c) 2014-2019 Maltrail developers (https://github.com/stamparm/maltrail/)
See the file 'LICENSE' for copying permission
"""
from craw.modules.trail.plugins.util import wget_content
__url__ = "http://www.urlvir.com/export-hosts/"
__check__ = "Updated on"
__info__ = "malware"
__reference__ = "urlvir.com"
maintainer_url = __reference__
maintainer = "urlvir"
list_source_url = __url__
category = __info__
| 23.848485 | 83 | 0.66709 |
f7fb1109bf89db5bf87c82699fc7b9493c2500d3 | 1,035 | py | Python | tests/continuous_integration.py | kfaRabi/online-judge-tools | 79de8d37e1aa78a7c4c82c6a666f1f1602caf545 | [
"MIT"
] | null | null | null | tests/continuous_integration.py | kfaRabi/online-judge-tools | 79de8d37e1aa78a7c4c82c6a666f1f1602caf545 | [
"MIT"
] | null | null | null | tests/continuous_integration.py | kfaRabi/online-judge-tools | 79de8d37e1aa78a7c4c82c6a666f1f1602caf545 | [
"MIT"
] | null | null | null | import os
import subprocess
import sys
import unittest
# TODO: these command should be written at once, at only .travis.yml or at only here
paths = ['oj', 'onlinejudge', 'setup.py', 'tests']
| 39.807692 | 127 | 0.68599 |
f7fbd980831ccec066261d37e528035e5f2d7c7a | 12,278 | py | Python | open-hackathon-client/src/client/config_sample.py | overbest/open-hackathon | 62e085fbe603bcb00ca56d2b96cfc43bf44c710b | [
"MIT"
] | null | null | null | open-hackathon-client/src/client/config_sample.py | overbest/open-hackathon | 62e085fbe603bcb00ca56d2b96cfc43bf44c710b | [
"MIT"
] | null | null | null | open-hackathon-client/src/client/config_sample.py | overbest/open-hackathon | 62e085fbe603bcb00ca56d2b96cfc43bf44c710b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# -----------------------------------------------------------------------------------
# Copyright (c) Microsoft Open Technologies (Shanghai) Co. Ltd. All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# -----------------------------------------------------------------------------------
# "javascript" section for javascript. see @app.route('/config.js') in app/views.py
# NOTE: all following key/secrets for test purpose.
HOSTNAME = "http://localhost" # host name of the UI site
# hacking.kaiyuanshe.cn is used for wechat oauth login
# HOSTNAME = "http://hacking.kaiyuanshe.cn"
# HOSTNAME = "http://open-hackathon-dev.chinacloudapp.cn" # host name of the UI site
# HOSTNAME = "http://hacking.kaiyuanshe.cn"
QQ_OAUTH_STATE = "openhackathon" # todo state should be constant. Actually it should be unguessable to prevent CSFA
HACKATHON_API_ENDPOINT = "http://localhost:15000"
# HACKATHON_API_ENDPOINT = "http://open-hackathon-dev.chinacloudapp.cn:15000"
# HACKATHON_API_ENDPOINT = "http://hacking.kaiyuanshe.cn:15000"
# github key for `localhost`
GITHUB_CLIENT_ID = "b44f3d47bdeb26b9c4e6"
GITHUB_CLIENT_SECRET = "98de14161c4b2ed3ea7a19787d62cda73b8e292c"
# github oauth key for `open-hackathon-dev.chinacloudapp.cn`
# GITHUB_CLIENT_ID = "b8e407813350f26bf537"
# GITHUB_CLIENT_SECRET = "daa78ae27e13c9f5b4a884bd774cadf2f75a199f"
QQ_CLIENT_ID = "101200890"
QQ_CLIENT_SECRET = "88ad67bd4521c4cc47136854781cb9b5"
QQ_META_CONTENT = "274307566465013314076545663016134754100636"
WECHAT_APP_ID = "wxe75b8aef71c2059f"
WECHAT_SECRET = "4532b90750f4c7bc70fcfbc42d881622"
WECHAT_OAUTH_STATE = "openhackathon" # NOTE: may be should be same as QQ_OAUTH_STATE?
WEIBO_CLIENT_ID = "479757037"
WEIBO_CLIENT_SECRET = "efc5e75ff8891be37d90b4eaec5c02de"
WEIBO_META_CONTENT = "ae884e09bc02b700"
LIVE_CLIENT_ID = "000000004414E0A6"
LIVE_CLIENT_SECRET = "b4mkfVqjtwHY2wJh0T4tj74lxM5LgAT2"
ALAUDA_CLIENT_ID = "4VR9kzNZVyWcnk9OnAwMuSus7xOOcozJIpic6W6y"
ALAUDA_CLIENT_SECRET = "E5PUL5h9feLlEirec5HQhjIzYecv7vVbEBjWLBkRMoCoFXdvS1PzNmd4AAeNgu4M2AJ87uGnnJaoDLCcDuVxkBoHRWCn6LmfB4SKK1Dty1SkGukkTcZPEk9wpHLSiRQ3"
Config = {
"environment": "local",
"app": {
"secret_key": "secret_key"
},
"login": {
"github": {
"client_id": GITHUB_CLIENT_ID,
"access_token_url": 'https://github.com/login/oauth/access_token?client_id=%s&client_secret=%s&redirect_uri=%s/github&code=' % (
GITHUB_CLIENT_ID, GITHUB_CLIENT_SECRET, HOSTNAME),
"user_info_url": 'https://api.github.com/user?access_token=',
"emails_info_url": 'https://api.github.com/user/emails?access_token='
},
"qq": {
"client_id": QQ_CLIENT_ID,
"meta_content": QQ_META_CONTENT,
"access_token_url": 'https://graph.qq.com/oauth2.0/token?grant_type=authorization_code&client_id=%s&client_secret=%s&redirect_uri=%s/qq&code=' % (
QQ_CLIENT_ID, QQ_CLIENT_SECRET, HOSTNAME),
"openid_url": 'https://graph.qq.com/oauth2.0/me?access_token=',
"user_info_url": 'https://graph.qq.com/user/get_user_info?access_token=%s&oauth_consumer_key=%s&openid=%s'
},
"wechat": {
"client_id": WECHAT_APP_ID,
"access_token_url": "https://api.weixin.qq.com/sns/oauth2/access_token?appid=%s&secret=%s&code=%%s&grant_type=authorization_code" % (
WECHAT_APP_ID, WECHAT_SECRET),
"user_info_url": "https://api.weixin.qq.com/sns/userinfo?access_token=%s&openid=%s"
},
"weibo": {
"client_id": WEIBO_CLIENT_ID,
"meta_content": WEIBO_META_CONTENT,
"user_info_url": 'https://api.weibo.com/2/users/show.json?access_token=',
"email_info_url": 'https://api.weibo.com/2/account/profile/email.json?access_token=',
"access_token_url": 'https://api.weibo.com/oauth2/access_token?client_id=%s&client_secret=%s&grant_type=authorization_code&redirect_uri=%s/weibo&code=' % (
WEIBO_CLIENT_ID, WEIBO_CLIENT_SECRET, HOSTNAME)
},
"live": {
"client_id": LIVE_CLIENT_ID,
"client_secret": LIVE_CLIENT_SECRET,
"redirect_uri": '%s/live' % HOSTNAME,
"access_token_url": 'https://login.live.com/oauth20_token.srf',
"user_info_url": 'https://apis.live.net/v5.0/me?access_token='
},
"alauda": {
"client_id": ALAUDA_CLIENT_ID,
"client_secret": ALAUDA_CLIENT_SECRET,
"redirect_uri": '%s/alauda' % HOSTNAME,
"access_token_url": 'http://console.int.alauda.io/oauth/token'
},
"provider_enabled": ["github", "wechat"],
"session_valid_time_minutes": 60
},
"hackathon-api": {
"endpoint": HACKATHON_API_ENDPOINT
},
"javascript": {
"github": {
"authorize_url": "https://github.com/login/oauth/authorize?client_id=%s&redirect_uri=%s/github&scope=user" % (
GITHUB_CLIENT_ID, HOSTNAME)
},
"weibo": {
"authorize_url": "https://api.weibo.com/oauth2/authorize?client_id=%s&redirect_uri=%s/weibo&scope=all" % (
WEIBO_CLIENT_ID, HOSTNAME)
},
"qq": {
"authorize_url": "https://graph.qq.com/oauth2.0/authorize?client_id=%s&redirect_uri=%s/qq&scope=get_user_info&state=%s&response_type=code" % (
QQ_CLIENT_ID, HOSTNAME, QQ_OAUTH_STATE)
},
"wechat": {
"authorize_url": "https://open.weixin.qq.com/connect/qrconnect?appid=%s&redirect_uri=%s/wechat&response_type=code&scope=snsapi_login&state=%s#wechat_redirect" % (
WECHAT_APP_ID, HOSTNAME, WECHAT_OAUTH_STATE)
},
"live": {
"authorize_url": "https://login.live.com/oauth20_authorize.srf?client_id=%s&scope=wl.basic+,wl.emails&response_type=code&redirect_uri=%s/live" % (
LIVE_CLIENT_ID, HOSTNAME)
},
"alauda": {
"authorize_url": "http://console.int.alauda.io/oauth/authorize?response_type=code&client_id=%s&state=state&redirect_uri=%s/alauda" % (
ALAUDA_CLIENT_ID, HOSTNAME)
},
"hackathon": {
"endpoint": HACKATHON_API_ENDPOINT
},
"apiconfig": {
"proxy": HACKATHON_API_ENDPOINT,
"api": {
"admin": {
"hackathon": {
"": ["get", "post", "put", "delete"],
"checkname": ["get"],
"list": ["get"],
"online": ["post"],
"applyonline": ["post"],
"offline": ["post"],
"tags": ["get", "post", "put", "delete"],
"config": ["get", "post", "put", "delete"],
"administrator": {
"": ["put", "post", "delete"],
"list": ["get"]
},
"template": {
"": ["post", "delete"],
"list": ["get"],
"check": ["get"]
},
"organizer": {
"": ["get", "post", "put", "delete"]
},
"award": {
"": ["get", "post", "put", "delete"],
"list": ["get"]
},
"notice": {
"": ["get", "post", "put", "delete"]
}
},
"registration": {
"": ["get", "post", "delete", "put"],
"list": ["get"]
},
"azure": {
"": ["get", "post", "delete", "put"],
"checksubid": ["post"]
},
"experiment": {
"list": ["get"],
"": ["post", "put"]
},
"team": {
"list": ["get"],
"score": {
"list": ["get"]
},
"award": ["get", "post", "delete"]
},
"user": {
"list": ["get"]
},
"hostserver": {
"": ["get", "post", "delete", "put"],
"list": ["get"]
}
},
"template": {
"": ["get", "post", "delete", "put"],
"file": ["post"],
"list": ["get"],
"check": ["get"]
},
"user": {
"": ["get"],
"login": ["post", "delete"],
"experiment": {
"": ["get", "post", "delete", "put"]
},
"registration": {
"": ["put", "post", "get"],
"checkemail": ["get"],
"list": ["get"]
},
"profile": {
"": ["post", "put"]
},
"picture": {
"": ["put"]
},
"team": {
"member": ["get"]
},
"hackathon": {
"like": ["get", "post", "delete"]
},
"notice": {
"read": ["put"]
},
"show": {
"list": ["get"]
},
"file": {
"": ["post"]
}
},
"hackathon": {
"": ["get"],
"list": ["get"],
"stat": ["get"],
"template": ["get"],
"team": {
"list": ["get"]
},
"registration": {
"list": ["get"]
},
"show": {
"list": ["get"]
},
"grantedawards": ["get"],
"notice": {
"list": ["get"]
}
},
"team": {
"": ["get", "post", "put", "delete"],
"score": ["get", "post", "put"],
"member": {
"": ["post", "put", "delete"],
"list": ["get"]
},
"show": ["get", "post", "delete"],
"template": ["post", "delete"]
},
"talent": {
"list": ["get"]
},
"grantedawards": ["get"]
}
}
}
}
| 42.93007 | 174 | 0.476136 |
f7fbf451f7ab0b316753c8ad61a542b73cbff82d | 14,904 | py | Python | processing_provider/Rast_fillRasterwithPatches.py | geodourados/lftools | 4b9d703513bd3d49ac7952014575bf95492a2d90 | [
"MIT"
] | 1 | 2022-03-28T22:18:09.000Z | 2022-03-28T22:18:09.000Z | processing_provider/Rast_fillRasterwithPatches.py | geodourados/lftools | 4b9d703513bd3d49ac7952014575bf95492a2d90 | [
"MIT"
] | null | null | null | processing_provider/Rast_fillRasterwithPatches.py | geodourados/lftools | 4b9d703513bd3d49ac7952014575bf95492a2d90 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
fillRasterwithPatches.py
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Leandro Frana'
__date__ = '2020-09-01'
__copyright__ = '(C) 2020, Leandro Frana'
from PyQt5.QtCore import QCoreApplication, QVariant
from qgis.core import (QgsProcessing,
QgsFeatureSink,
QgsWkbTypes,
QgsFields,
QgsField,
QgsFeature,
QgsPointXY,
QgsGeometry,
QgsProcessingException,
QgsProcessingAlgorithm,
QgsProcessingParameterString,
QgsProcessingParameterField,
QgsProcessingParameterBoolean,
QgsProcessingParameterCrs,
QgsProcessingParameterEnum,
QgsFeatureRequest,
QgsExpression,
QgsProcessingParameterFeatureSource,
QgsProcessingParameterFeatureSink,
QgsProcessingParameterFileDestination,
QgsProcessingParameterMultipleLayers,
QgsProcessingParameterRasterLayer,
QgsProcessingParameterRasterDestination,
QgsApplication,
QgsProject,
QgsRasterLayer,
QgsCoordinateTransform,
QgsCoordinateReferenceSystem)
from osgeo import osr, gdal_array, gdal #https://gdal.org/python/
from math import floor, ceil
import numpy as np
from lftools.geocapt.dip import Interpolar
from lftools.geocapt.imgs import Imgs
import os
from qgis.PyQt.QtGui import QIcon
| 41.51532 | 135 | 0.541063 |
f7fc84f573aa97d3b828afe66e29e4f49f7bb79c | 1,393 | py | Python | quantlab/COCO/utils/inference.py | lukasc-ch/QuantLab | 7ddcc51ec1131a58269768cd898ce04e8b49beb6 | [
"Apache-2.0"
] | 6 | 2019-05-24T17:39:07.000Z | 2021-11-06T22:19:55.000Z | quantlab/COCO/utils/inference.py | lukasc-ch/QuantLab | 7ddcc51ec1131a58269768cd898ce04e8b49beb6 | [
"Apache-2.0"
] | null | null | null | quantlab/COCO/utils/inference.py | lukasc-ch/QuantLab | 7ddcc51ec1131a58269768cd898ce04e8b49beb6 | [
"Apache-2.0"
] | 4 | 2019-05-24T17:39:15.000Z | 2021-04-02T07:13:11.000Z | import matplotlib.patches as patches
import matplotlib.pyplot as plt
import numpy as np
| 42.212121 | 128 | 0.580761 |
f7fcc0247bffa7d5ad90651380c319258f099e35 | 633 | py | Python | dockwidhistory.py | kimoamer/Clinic-Manager | 53184a4e8f369bf083109d065b2042fc7cf5bfbd | [
"MIT"
] | 3 | 2021-05-12T01:05:12.000Z | 2022-02-11T15:43:00.000Z | dockwidhistory.py | kimoamer/Clinic-Manager | 53184a4e8f369bf083109d065b2042fc7cf5bfbd | [
"MIT"
] | null | null | null | dockwidhistory.py | kimoamer/Clinic-Manager | 53184a4e8f369bf083109d065b2042fc7cf5bfbd | [
"MIT"
] | null | null | null | from PyQt5.QtWidgets import QDialog
from PyQt5.QtGui import QFont
from PyQt5.QtCore import Qt
from dockwina import Ui_Form as docka
| 33.315789 | 51 | 0.665087 |
f7fcf7559948b6752dd0ee377be44bd42c092522 | 351 | py | Python | forest_lite/server/lib/palette.py | uk-gov-mirror/MetOffice.forest-lite | 9406b53f7e6a9651eb675e0ac2e5945421b25557 | [
"BSD-3-Clause"
] | 6 | 2020-08-05T16:12:57.000Z | 2022-01-06T01:34:19.000Z | forest_lite/server/lib/palette.py | uk-gov-mirror/MetOffice.forest-lite | 9406b53f7e6a9651eb675e0ac2e5945421b25557 | [
"BSD-3-Clause"
] | 49 | 2020-08-14T13:58:32.000Z | 2021-06-29T11:42:32.000Z | forest_lite/server/lib/palette.py | uk-gov-mirror/MetOffice.forest-lite | 9406b53f7e6a9651eb675e0ac2e5945421b25557 | [
"BSD-3-Clause"
] | 2 | 2020-12-03T09:24:13.000Z | 2021-04-11T06:10:36.000Z | import bokeh.palettes
| 27 | 68 | 0.566952 |
f7fdd8880ea99f126ba61a61e3b34ab49ba52b93 | 1,549 | py | Python | runtests.py | ombu/django-sortedm2m | 2691cf00174577bc667d5d8c1d42071604ee2095 | [
"BSD-3-Clause"
] | null | null | null | runtests.py | ombu/django-sortedm2m | 2691cf00174577bc667d5d8c1d42071604ee2095 | [
"BSD-3-Clause"
] | null | null | null | runtests.py | ombu/django-sortedm2m | 2691cf00174577bc667d5d8c1d42071604ee2095 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os, sys, warnings
parent = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, parent)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "test_project.settings")
import django
from django.core.management import execute_from_command_line
if django.VERSION < (1, 6):
default_test_apps = [
'sortedm2m_tests',
'test_south_support',
]
else:
default_test_apps = [
'sortedm2m_tests',
]
# Only test south support for Django 1.6 and lower.
if django.VERSION < (1, 7):
default_test_apps += [
'test_south_support',
]
if __name__ == '__main__':
runtests(*sys.argv[1:])
| 28.163636 | 88 | 0.654616 |
f7fe2e12189f5c7bd5c301d8cd6a29b000ff6951 | 4,352 | py | Python | origin_check.py | mikispag/OriginCheck | b3bda26c382cdbfd78bddc11d99d6e8723255599 | [
"MIT"
] | 1 | 2020-08-19T06:53:24.000Z | 2020-08-19T06:53:24.000Z | origin_check.py | mikispag/OriginCheck | b3bda26c382cdbfd78bddc11d99d6e8723255599 | [
"MIT"
] | null | null | null | origin_check.py | mikispag/OriginCheck | b3bda26c382cdbfd78bddc11d99d6e8723255599 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import concurrent.futures
import logging
import requests
from sys import argv, exit
from urllib.parse import urlparse
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
HEADERS = {
'User-Agent':
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.30 Safari/537.36'
}
MIN_RESPONSE_LENGTH = 100
NUM_WORKERS = 50
urls = []
if len(argv) < 2:
exit("Please specify a URLs file.")
with open(argv[1]) as f:
urls = [line.rstrip() for line in f]
with open('results.csv', 'w') as w:
print('url,SAMEORIGIN_OK,CROSSORIGIN_OK,SAMEORIGIN_KO_STATUS,SAMEORIGIN_KO_RESPONSE,CROSSORIGIN_KO_STATUS,CROSSORIGIN_KO_RESPONSE', file=w)
with concurrent.futures.ThreadPoolExecutor(max_workers=NUM_WORKERS) as executor:
future_to_result = {executor.submit(check, url): url for url in urls}
for future in concurrent.futures.as_completed(future_to_result):
try:
result = future.result()
except:
continue
else:
if result:
print('{},{},{},{},{},{},{}'.format(result['url'],
int(result['SAMEORIGIN_OK']),
int(result['CROSSORIGIN_OK']),
int(result['SAMEORIGIN_KO_STATUS']),
int(result['SAMEORIGIN_KO_RESPONSE']),
int(result['CROSSORIGIN_KO_STATUS']),
int(result['CROSSORIGIN_KO_RESPONSE'])
), file=w)
| 39.563636 | 143 | 0.584789 |
f7ff07662b3e96ced8491b8279428f96107213e1 | 743 | py | Python | orange3/Orange/preprocess/setup.py | rgschmitz1/BioDepot-workflow-builder | f74d904eeaf91ec52ec9b703d9fb38e9064e5a66 | [
"MIT"
] | 54 | 2017-01-08T17:21:49.000Z | 2021-11-02T08:46:07.000Z | orange3/Orange/preprocess/setup.py | Synthia-3/BioDepot-workflow-builder | 4ee93abe2d79465755e82a145af3b6a6e1e79fd4 | [
"MIT"
] | 22 | 2017-03-28T06:03:14.000Z | 2021-07-28T05:43:55.000Z | orange3/Orange/preprocess/setup.py | Synthia-3/BioDepot-workflow-builder | 4ee93abe2d79465755e82a145af3b6a6e1e79fd4 | [
"MIT"
] | 21 | 2017-01-26T21:12:09.000Z | 2022-01-31T21:34:59.000Z | # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD Style.
import os
import numpy
if __name__ == "__main__":
from numpy.distutils.core import setup
setup(**configuration(top_path="").todict())
| 24.766667 | 66 | 0.644684 |
f7ff646590489831f35fa9fe7ca9c0fe9f2f76be | 592 | py | Python | ProjectEuler_plus/euler_042.py | byung-u/HackerRank | 4c02fefff7002b3af774b99ebf8d40f149f9d163 | [
"MIT"
] | null | null | null | ProjectEuler_plus/euler_042.py | byung-u/HackerRank | 4c02fefff7002b3af774b99ebf8d40f149f9d163 | [
"MIT"
] | null | null | null | ProjectEuler_plus/euler_042.py | byung-u/HackerRank | 4c02fefff7002b3af774b99ebf8d40f149f9d163 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import sys
from math import sqrt
# (n * (n + 1)) / 2 -> n ** 2 + n - (2 * x)
# Solved with quadratic equation
# https://en.wikipedia.org/wiki/Quadratic_equation
for _ in range(int(input().strip())):
t = int(input().strip())
d = (sqrt(4 * 2 * t + 1) - 1)
if d.is_integer():
print(int(d) // 2)
else:
print(-1)
| 21.925926 | 52 | 0.489865 |
7900515320c3b3319c03f61841dc3f24a082e7f3 | 12,476 | py | Python | src/lpb.py | RobbinBouwmeester/LIT | 0516a69fbf1b8e9976524e0c243f82de041df544 | [
"Apache-2.0"
] | null | null | null | src/lpb.py | RobbinBouwmeester/LIT | 0516a69fbf1b8e9976524e0c243f82de041df544 | [
"Apache-2.0"
] | null | null | null | src/lpb.py | RobbinBouwmeester/LIT | 0516a69fbf1b8e9976524e0c243f82de041df544 | [
"Apache-2.0"
] | null | null | null | """
Copyright (c) 2017 Robbin Bouwmeester
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE."""
__author__ = "Robbin Bouwmeester"
__copyright__ = "Copyright 2017"
__credits__ = ["Robbin Bouwmeester"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "Robbin Bouwmeester"
__email__ = "Robbin.bouwmeester@ugent.be"
__status__ = "nightly funzies"
import pandas as pd
from itertools import groupby
import logging
if __name__ == "__main__":
logging.basicConfig(filename="prec_filter.log",
level=logging.DEBUG,
filemode="w",
format="%(levelname)s:%(created)f:%(asctime)s:%(message)s")
logging.info("Reading the LPB database ...")
lpb = LipidBLAST()
logging.info("Done reading the LPB database ...")
logging.info(lpb)
step_three_df = pd.read_csv("stepone_new.csv")
precf = Precursor_filter(lpb)
prec_filt_result = []
for index,row in step_three_df.iterrows():
if (index % 10000==0):
logging.info("Analyzing row number and m/z: %s - %s" % (index,row["mz"]))
prec_hits = precf.retrieve_entry_pre_c_mass(row["mz"])
for hit in prec_hits:
prec_filt_result.append([row["mz"],hit[2].mw,hit[1],hit[0].split("|")[0],hit[2].chem_form,hit[0].split("|")[1]])
prec_filt_result = pd.DataFrame(prec_filt_result)
prec_filt_result.columns = ["Input Mass","Matched Mass","Delta","Abbreviation","Formula","Ion"]
prec_filt_result.to_excel("batch_results.xlsx",index=False)
| 36.162319 | 303 | 0.655579 |
79016946767147d0fbaeddece8c5f2511d1e6b1d | 178 | py | Python | floris/tools/optimization/scipy/__init__.py | eirikur16/flrs | c98604593753def05086b54ce82f5551f01d2529 | [
"Apache-2.0"
] | 91 | 2019-06-04T08:56:29.000Z | 2022-03-13T17:39:22.000Z | floris/tools/optimization/scipy/__init__.py | eirikur16/flrs | c98604593753def05086b54ce82f5551f01d2529 | [
"Apache-2.0"
] | 224 | 2019-04-08T22:03:45.000Z | 2022-03-31T17:56:09.000Z | floris/tools/optimization/scipy/__init__.py | eirikur16/flrs | c98604593753def05086b54ce82f5551f01d2529 | [
"Apache-2.0"
] | 97 | 2019-04-23T20:48:20.000Z | 2022-03-29T08:17:02.000Z | from . import (
yaw,
layout,
base_COE,
optimization,
layout_height,
power_density,
yaw_wind_rose,
power_density_1D,
yaw_wind_rose_parallel,
)
| 14.833333 | 27 | 0.651685 |
790266e9a7bcf554bd70851b9a13216ab9f797e3 | 11,530 | py | Python | src/gdata/spreadsheets/data.py | Cloudlock/gdata-python3 | a6481a13590bfa225f91a97b2185cca9aacd1403 | [
"Apache-2.0"
] | 19 | 2017-06-09T13:38:03.000Z | 2020-12-12T07:45:48.000Z | src/gdata/spreadsheets/data.py | AlexxIT/gdata-python3 | 5cc5a83a469d87f804d1fda8760ec76bcb6050c9 | [
"Apache-1.1"
] | 11 | 2017-07-22T07:09:54.000Z | 2020-12-02T15:08:48.000Z | src/gdata/spreadsheets/data.py | AlexxIT/gdata-python3 | 5cc5a83a469d87f804d1fda8760ec76bcb6050c9 | [
"Apache-1.1"
] | 25 | 2017-07-03T11:30:39.000Z | 2020-10-01T02:21:13.000Z | #!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License 2.0;
# This module is used for version 2 of the Google Data APIs.
"""Provides classes and constants for the XML in the Google Spreadsheets API.
Documentation for the raw XML which these classes represent can be found here:
http://code.google.com/apis/spreadsheets/docs/3.0/reference.html#Elements
"""
# __author__ = 'j.s@google.com (Jeff Scudder)'
import atom.core
import gdata.data
GS_TEMPLATE = '{http://schemas.google.com/spreadsheets/2006}%s'
GSX_NAMESPACE = 'http://schemas.google.com/spreadsheets/2006/extended'
INSERT_MODE = 'insert'
OVERWRITE_MODE = 'overwrite'
WORKSHEETS_REL = 'http://schemas.google.com/spreadsheets/2006#worksheetsfeed'
BATCH_POST_ID_TEMPLATE = ('https://spreadsheets.google.com/feeds/cells'
'/%s/%s/private/full')
BATCH_ENTRY_ID_TEMPLATE = '%s/R%sC%s'
BATCH_EDIT_LINK_TEMPLATE = '%s/batch'
def build_batch_cells_update(spreadsheet_key, worksheet_id):
"""Creates an empty cells feed for adding batch cell updates to.
Call batch_set_cell on the resulting CellsFeed instance then send the batch
request TODO: fill in
Args:
spreadsheet_key: The ID of the spreadsheet
worksheet_id:
"""
feed_id_text = BATCH_POST_ID_TEMPLATE % (spreadsheet_key, worksheet_id)
return CellsFeed(
id=atom.data.Id(text=feed_id_text),
link=[atom.data.Link(
rel='edit', href=BATCH_EDIT_LINK_TEMPLATE % (feed_id_text,))])
BuildBatchCellsUpdate = build_batch_cells_update
| 31.162162 | 82 | 0.674761 |
79028a174225260b671df8c8ac4560369e16c2c8 | 710 | py | Python | tests/test_issues/test_member_example.py | hsolbrig/pyjsg | 5ef46d9af6a94a0cd0e91ebf8b22f61c17e78429 | [
"CC0-1.0"
] | 3 | 2017-07-23T11:11:23.000Z | 2020-11-30T15:36:51.000Z | tests/test_issues/test_member_example.py | hsolbrig/pyjsg | 5ef46d9af6a94a0cd0e91ebf8b22f61c17e78429 | [
"CC0-1.0"
] | 15 | 2018-01-05T17:18:34.000Z | 2021-12-13T17:40:25.000Z | tests/test_issues/test_member_example.py | hsolbrig/pyjsg | 5ef46d9af6a94a0cd0e91ebf8b22f61c17e78429 | [
"CC0-1.0"
] | null | null | null | import unittest
from pyjsg.validate_json import JSGPython
if __name__ == '__main__':
unittest.main()
| 28.4 | 77 | 0.533803 |
7902cca06e3a841cee96255c053ca834cc5022f5 | 7,223 | py | Python | src/pte/filetools/filefinder_abc.py | richardkoehler/pynm-decode | 3120a410d79d3fce45d0f59025d68ba2d5e80d9e | [
"MIT"
] | 1 | 2022-01-08T09:33:09.000Z | 2022-01-08T09:33:09.000Z | src/pte/filetools/filefinder_abc.py | richardkoehler/pynm-decode | 3120a410d79d3fce45d0f59025d68ba2d5e80d9e | [
"MIT"
] | null | null | null | src/pte/filetools/filefinder_abc.py | richardkoehler/pynm-decode | 3120a410d79d3fce45d0f59025d68ba2d5e80d9e | [
"MIT"
] | null | null | null | """Define abstract base classes to construct FileFinder classes."""
import os
import shutil
from abc import ABC, abstractmethod
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Sequence, Union
import mne_bids
| 33.439815 | 79 | 0.56417 |
790323f724e852cdcf7d4d9d3e4d89703473f768 | 3,725 | py | Python | panel/routes/server.py | emilio2hd/pz-panel | 6b53f465b2c041e963e2b75e48b1612549ad6fea | [
"MIT"
] | null | null | null | panel/routes/server.py | emilio2hd/pz-panel | 6b53f465b2c041e963e2b75e48b1612549ad6fea | [
"MIT"
] | null | null | null | panel/routes/server.py | emilio2hd/pz-panel | 6b53f465b2c041e963e2b75e48b1612549ad6fea | [
"MIT"
] | null | null | null | import glob
import time
from os import path
from flask import Blueprint, jsonify, current_app, request, Response, json
from flask_login import login_required
from .. import pz_server_state
from ..services.power_actions_service import is_valid_power_action, execute_action
from ..services.server_options_service import read_config, save_config, prepared_config_to_view, formatted_config_lines
from ..services.server_status_service import get_server_status
from ..utils.resources_functions import server_resources
server_blueprint = Blueprint('server', __name__, url_prefix='/server')
def get_config(pz_server_config):
config = read_config(pz_server_config)
return {
"WorkshopItems": config["WorkshopItems"],
"Mods": config["Mods"]
}
| 29.8 | 119 | 0.68698 |
7903777a50ff41a94bed60837d113e3a3fca6cc0 | 23,095 | py | Python | sub_models.py | tmartin2/EnsembleSplice-Inactive | a161ff007b47ceadd3a21376f2eac2971bb81d90 | [
"MIT"
] | null | null | null | sub_models.py | tmartin2/EnsembleSplice-Inactive | a161ff007b47ceadd3a21376f2eac2971bb81d90 | [
"MIT"
] | null | null | null | sub_models.py | tmartin2/EnsembleSplice-Inactive | a161ff007b47ceadd3a21376f2eac2971bb81d90 | [
"MIT"
] | null | null | null | # -----------------------------------------------------------------------------
# Copyright (c) 2021 Trevor P. Martin. All rights reserved.
# Distributed under the MIT License.
# -----------------------------------------------------------------------------
from Data import encode_data
# from utils import cross_validation
from Models import utils
from Models import build_models
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import Perceptron
from sklearn.svm import LinearSVC
import matplotlib.pyplot as plt
import matplotlib.font_manager as font_manager
import numpy as np
import pandas as pd
import tensorflow as tf
import copy
def run(datasets,
splice_sites,
sub_models,
save,
vis,
iter,
metrics,
summary,
config,
num_folds,
bal,
imbal,
imbal_t,
imbal_f,
batch_size,
epochs
):
"""
Parameters
----------
dataset: a string {nn269, ce, hs3d} indicating which dataset to use
splice_site_type: a string {acceptor, donor} indicating which splice
site to train on
model_architecture: a string {cnn, dnn, rnn} indicating which model
architecture to use for training
save_model: boolean, whether to save the current model
bal: boolean, whether to balance the dataset
summary: boolean, whether to print out the model architecture summary
config: boolean, whether to print out the model's configuration
visualize: boolean, whether to save a performance graph of the model
metrics: boolean, whether to print out the evaluation metrics for the model
num_folds: int (default 10), the number of folds for k-fold cross validation
epochs: int (default 15), the number of epochs for the chosen model
batch_size: int (default 32), the model batch size
model_iter: integer, the iteration of the current model architecture (e.g.
if this is the third cnn architecture you are testing, use 3)
"""
# (acceptor row len, donor row len) by dataset
network_rows = {
'acceptor':{
'nn269':90, 'ce':141,
'hs3d':140, 'hs2':602,
'ce2':602, 'dm':602,
'ar':602, 'or':602,
},
'donor':{
'nn269':15, 'ce':141,
'hs3d':140, 'hs2':602,
'ce2':602, 'dm':602,
'ar':602, 'or':602,
},
}
# initialize selected sub models
to_run = dict(
[
(sub_model,{
'nn269':'', 'ce':'',
'hs3d':'', 'hs2':'',
'ce2':'', 'dm':'',
'ar':'', 'or':''
}) for sub_model in sub_models
]
)
# results dictionary
results = copy.deepcopy(to_run)
# populate sub models with encoded data
for sub_model in sub_models:
for dataset in datasets:
# encode datasets -> return (acc_x, acc_y, don_x, don_y)
to_run[sub_model][dataset] = encode_data.encode(dataset, sub_model, bal)
# get a metrics dictionary
evals = dict(
[
(sub_model, {
'f1':'', 'precision':'',
'sensitivity':'', 'specificity':'',
'recall':'', 'mcc':'',
'err_rate':''
}) for sub_model in sub_models
]
)
# accumulate results from running cross validation
for sub_model in sub_models:
for dataset in datasets:
if to_run[sub_model][dataset] == '':
pass
else:
results[sub_model][dataset] = utils.cross_validation(
num_folds,
sub_model,
splice_sites,
dataset,
to_run[sub_model][dataset],# encoded data for dataset (ds)
network_rows, # donor, acceptor rows for ds
evals,
summary,
config,
batch_size,
epochs,
save,
)
# if vis:
print(results)
return results
# plot results
# loss_acc_sub_models(
# results,
# datasets,
# sub_models,
# epochs,
# num_folds,
# bal
# )
# # different by splice site type
# if splice_site_type == 'acceptor':
# cnn_X_train, cnn_y_train = cnn_acc_x, acc_y
# # same name to preserve for loop structure
# X_train, y_train = rd_acc_x, acc_y
# dataset_row_num = network_rows[dataset][0]
# if splice_site_type == 'donor':
# cnn_X_train, cnn_y_train = cnn_don_x, don_y
# X_train, y_train = rd_don_x, don_y
# dataset_row_num = network_rows[dataset][1]
#
#
# # if tune_rnn:
# # tune_rnn()
#
# # perform cross validation
# # general
# trn_fold_accs, trn_fold_losses = [], []
# val_fold_accs, val_fold_losses = [], []
# # esplice
# rnn_va, rnn_vl, cnn_vl, cnn_va, dnn_vl, dnn_va = [],[],[],[],[],[]
# rnn_ta, rnn_tl, cnn_tl, cnn_ta, dnn_tl, dnn_ta = [],[],[],[],[],[]
#
# # this loop inspired by https://www.machinecurve.com/
# #index.php/2020/02/18/how-to-use-k-fold-cross-validation-with-keras/
# k_fold = KFold(n_splits=num_folds, shuffle=False)
# fold = 1
# for train, test in k_fold.split(X_train, y_train):
# if model_architecture != 'esplice':
# X_trn, y_trn = X_train[train], y_train[train]
# X_val, y_val = X_train[test], y_train[test]
# if model_architecture=='cnn':
# history, model = build_cnn(
# dataset_row_num,
# summary,
# X_trn,
# y_trn,
# batch_size,
# epochs,
# X_val,#becomes X_val
# y_val,#becomes y_val
# fold,
# num_folds
# )
# if model_architecture=='dnn':
# history, model = build_dnn(
# dataset_row_num,
# summary,
# X_trn,
# y_trn,
# batch_size,
# epochs,
# X_val,#becomes X_val
# y_val,#becomes y_val
# fold,
# num_folds
# )
# if model_architecture=='rnn':
# history, model = build_rnn(
# dataset_row_num,
# summary,
# X_trn,
# y_trn,
# batch_size,
# epochs,
# X_val,#becomes X_val
# y_val,#becomes y_val
# fold,
# num_folds
# )
# # model.predict(X_trn)
# val_fold_accs.append(history.history['val_accuracy'])
# val_fold_losses.append(history.history['val_loss'])
# trn_fold_accs.append(history.history['accuracy'])
# trn_fold_losses.append(history.history['loss'])
# fold += 1
# else:
# # set up submodel datasets
# cnn_X_trn, cnn_y_trn = cnn_X_train[train], cnn_y_train[train]
# cnn_X_val, cnn_y_val = cnn_X_train[test], cnn_y_train[test]
# rd_X_trn, rd_y_trn = X_train[train], y_train[train]
# rd_X_val, rd_y_val = X_train[test], y_train[test]
# # build each submodel
# hist01, submodel_01 = build_cnn(
# dataset_row_num,
# summary,
# cnn_X_trn,
# cnn_y_trn,
# batch_size,
# epochs,
# cnn_X_val,
# cnn_y_val,
# fold,
# num_folds
# )
# hist02, submodel_02 = build_dnn(
# dataset_row_num,
# summary,
# rd_X_trn,
# rd_y_trn,
# batch_size,
# epochs,
# rd_X_val,
# rd_y_val,
# fold,
# num_folds
# )
# # hist03, submodel_03 = build_rnn(
# # dataset_row_num,
# # summary,
# # rd_X_trn,
# # rd_y_trn,
# # batch_size,
# # epochs,
# # rd_X_val,
# # rd_y_val,
# # fold,
# # num_folds
# # )
# models = [submodel_01, submodel_02]#, submodel_03]
# trn_scores, val_scores = EnsembleSplice.build(
# models,
# batch_size,
# cnn_X_trn,
# cnn_y_trn,
# cnn_X_val,
# cnn_y_val,
# rd_X_trn,
# rd_y_trn,
# rd_X_val,
# rd_y_val,
# )
# # get final epoch accuracy
# trn_fold_accs.append(trn_scores)
# val_fold_accs.append(val_scores)
# # rnn_va.append(hist03.history['val_accuracy'])
# # rnn_vl.append(hist03.history['val_loss'])
# # rnn_ta.append(hist03.history['accuracy'])
# # rnn_tl.append(hist03.history['loss'])
# # cnn_vl.append(hist01.history['val_loss'])
# # cnn_va.append(hist01.history['val_accuracy'])
# # cnn_tl.append(hist01.history['loss'])
# # cnn_ta.append(hist01.history['accuracy'])
# # dnn_vl.append(hist02.history['val_loss'])
# # dnn_va.append(hist02.history['val_accuracy'])
# # dnn_tl.append(hist02.history['loss'])
# # dnn_ta.append(hist02.history['accuracy'])
#
# # rnn_va.append(hist03.history['val_accuracy'][-1])
# # rnn_vl.append(hist03.history['val_loss'][-1])
# # rnn_ta.append(hist03.history['accuracy'][-1])
# # rnn_tl.append(hist03.history['loss'][-1])
# cnn_vl.append(hist01.history['val_loss'][-1])
# cnn_va.append(hist01.history['val_accuracy'][-1])
# cnn_tl.append(hist01.history['loss'][-1])
# cnn_ta.append(hist01.history['accuracy'][-1])
# dnn_vl.append(hist02.history['val_loss'][-1])
# dnn_va.append(hist02.history['val_accuracy'][-1])
# dnn_tl.append(hist02.history['loss'][-1])
# dnn_ta.append(hist02.history['accuracy'][-1])
#
# fold += 1
#
# # do something with predicted values and real values to get AUC-ROC scores
# # sklearn.metrics.roc_auc_score
# # also get f-score and other scores here
# # maybe connect tune_rnn and build_rnn -> get tuned parameters and plug them
# # in automatically to RNN
#
# if model_architecture != 'esplice':
#
# val_acc_by_epoch = np.apply_along_axis(lambda row: np.mean(row), 1, np.asarray(val_fold_accs).T)
# val_loss_by_epoch = np.apply_along_axis(lambda row: np.mean(row), 1, np.asarray(val_fold_losses).T)
# trn_acc_by_epoch = np.apply_along_axis(lambda row: np.mean(row), 1, np.asarray(trn_fold_accs).T)
# trn_loss_by_epoch = np.apply_along_axis(lambda row: np.mean(row), 1, np.asarray(trn_fold_losses).T)
#
# std_val_acc = np.apply_along_axis(lambda row: np.std(row), 1, np.asarray(val_fold_accs).T)
# std_val_loss = np.apply_along_axis(lambda row: np.std(row), 1, np.asarray(val_fold_losses).T)
# std_trn_acc = np.apply_along_axis(lambda row: np.std(row), 1, np.asarray(trn_fold_accs).T)
# std_trn_loss = np.apply_along_axis(lambda row: np.std(row), 1, np.asarray(trn_fold_losses).T)
#
# values = [
# val_acc_by_epoch,
# std_val_acc,
# trn_acc_by_epoch,
# std_trn_acc,
# val_loss_by_epoch,
# std_val_loss,
# trn_loss_by_epoch,
# std_trn_loss
# ]
#
# if model_architecture == 'esplice':
#
# # make a DICTIONARY AREY
# # ES_Val_ACc: (vacc, std_va)
# mean_good = lambda seq: np.apply_along_axis(lambda row: np.mean(row), 1, np.asarray(seq).T)
# std_good = lambda seq: np.apply_along_axis(lambda row: np.std(row), 1, np.asarray(seq).T)
# vacc = val_fold_accs
# tacc = trn_fold_accs
# # std_va = val_fold_accs
# # std_ta = trn_fold_accs
#
# values = [
# val_fold_accs,
# trn_fold_accs,
# #rnn_va,
# # rnn_vl,
# #rnn_ta,
# # rnn_tl,
# # cnn_vl,
# cnn_va,
# # cnn_tl,
# cnn_ta,
# # dnn_vl,
# dnn_va,
# # dnn_tl,
# dnn_ta
# ]
#
# # cnn_mva = mean_good(cnn_va)
# # cnn_mvl = mean_good(cnn_vl)
# # cnn_mta = mean_good(cnn_ta)
# # cnn_mtl = mean_good(cnn_tl)
# # cnn_sva = std_good(cnn_va)
# # cnn_svl = std_good(cnn_vl)
# # cnn_sta = std_good(cnn_ta)
# # cnn_stl = std_good(cnn_tl)
# #
# # dnn_mva = mean_good(dnn_va)
# # dnn_mvl = mean_good(dnn_vl)
# # dnn_mta = mean_good(dnn_ta)
# # dnn_mtl = mean_good(dnn_tl)
# # dnn_sva = std_good(dnn_va)
# # dnn_svl = std_good(dnn_vl)
# # dnn_sta = std_good(dnn_ta)
# # dnn_stl = std_good(dnn_tl)
# #
# # rnn_mva = mean_good(rnn_va)
# # rnn_mvl = mean_good(rnn_vl)
# # rnn_mta = mean_good(rnn_ta)
# # rnn_mtl = mean_good(rnn_tl)
# # rnn_sva = std_good(rnn_va)
# # rnn_svl = std_good(rnn_vl)
# # rnn_sta = std_good(rnn_ta)
# # rnn_stl = std_good(rnn_tl)
#
# # values = [
# # vacc,
# # # std_va,
# # tacc,
# # # std_ta,
# # cnn_mva,
# # cnn_sva,
# # cnn_mvl,
# # cnn_svl,
# # cnn_mta,
# # cnn_sta,
# # cnn_mtl,
# # cnn_stl,
# # dnn_mva,
# # dnn_sva,
# # dnn_mvl,
# # dnn_svl,
# # dnn_mta,
# # dnn_sta,
# # dnn_mtl,
# # dnn_stl,
# # rnn_mva,
# # rnn_sva,
# # rnn_mvl,
# # rnn_svl,
# # rnn_mta,
# # rnn_sta,
# # rnn_mtl,
# # rnn_stl,
# # ]
# if config:
# print(model.get_config())
# if save_model:
# name = input('What would you like to name this model?: ')
# model.save(f'{name}')
# tf.keras.utils.plot_model(model, f'{name}.png', show_shapes=True)
# if visualize:
# loss_acc_esplice(
# values,
# model_architecture,
# dataset,
# splice_site_type,
# num_folds,
# epochs,
# bal,
# )
| 34.781627 | 126 | 0.525352 |
7903ec9c043049b9e677a2917e22d25071fe1f34 | 3,227 | py | Python | tracportalopt/project/notification.py | isabella232/TracPortalPlugin | 985581b16aad360cfc78d6b901c93fb922f7bc30 | [
"MIT"
] | 2 | 2015-01-19T05:53:30.000Z | 2016-01-08T10:30:02.000Z | tracportalopt/project/notification.py | iij/TracPortalPlugin | 985581b16aad360cfc78d6b901c93fb922f7bc30 | [
"MIT"
] | 1 | 2022-01-20T12:47:18.000Z | 2022-01-20T12:47:18.000Z | tracportalopt/project/notification.py | isabella232/TracPortalPlugin | 985581b16aad360cfc78d6b901c93fb922f7bc30 | [
"MIT"
] | 3 | 2016-12-08T02:25:36.000Z | 2022-01-20T12:10:58.000Z | #! -*- coding: utf-8 -*-
#
# (C) 2013 Internet Initiative Japan Inc.
# All rights reserved.
#
# Created on 2013/05/15
# @author: yosinobu@iij.ad.jp
"""Notify project owner with email when the project created successfully."""
from pkg_resources import resource_filename
from trac.config import Option, ListOption
from trac.core import Component, implements
from trac.notification import Notify, NotifyEmail
from trac.web.chrome import ITemplateProvider
from tracportal.i18n import _
from tracportal.project.api import IProjectCreationInterceptor
| 37.523256 | 115 | 0.654478 |
790488091f13f4b2ff427e7b9bda7aa18b0d732c | 1,391 | py | Python | misc/style/check-include-guard-convention.py | nitinkaveriappa/downward | 5c9a1b5111d667bb96f94da61ca2a45b1b70bb83 | [
"MIT"
] | 4 | 2019-04-23T10:41:35.000Z | 2019-10-27T05:14:42.000Z | misc/style/check-include-guard-convention.py | nitinkaveriappa/downward | 5c9a1b5111d667bb96f94da61ca2a45b1b70bb83 | [
"MIT"
] | null | null | null | misc/style/check-include-guard-convention.py | nitinkaveriappa/downward | 5c9a1b5111d667bb96f94da61ca2a45b1b70bb83 | [
"MIT"
] | 4 | 2018-01-16T00:00:22.000Z | 2019-11-01T23:35:01.000Z | #! /usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import glob
import os.path
import sys
DIR = os.path.dirname(os.path.abspath(__file__))
REPO = os.path.dirname(os.path.dirname(DIR))
SRC_DIR = os.path.join(REPO, "src")
if __name__ == "__main__":
main()
| 28.979167 | 90 | 0.591661 |
7905a7207409a36e542edd41a689eb3240d45b7e | 432 | py | Python | kyu_7/fun_with_lists_length/length.py | pedrocodacyorg2/codewars | ba3ea81125b6082d867f0ae34c6c9be15e153966 | [
"Unlicense"
] | 1 | 2022-02-12T05:56:04.000Z | 2022-02-12T05:56:04.000Z | kyu_7/fun_with_lists_length/length.py | pedrocodacyorg2/codewars | ba3ea81125b6082d867f0ae34c6c9be15e153966 | [
"Unlicense"
] | 182 | 2020-04-30T00:51:36.000Z | 2021-09-07T04:15:05.000Z | kyu_7/fun_with_lists_length/length.py | pedrocodacyorg2/codewars | ba3ea81125b6082d867f0ae34c6c9be15e153966 | [
"Unlicense"
] | 4 | 2020-04-29T22:04:20.000Z | 2021-07-13T20:04:14.000Z | # Created by Egor Kostan.
# GitHub: https://github.com/ikostan
# LinkedIn: https://www.linkedin.com/in/egor-kostan/
def length(head) -> int:
"""
The method length, which accepts a linked list
(head), and returns the length of the list.
:param head:
:return:
"""
i = 0
if head is None:
return 0
while head.next is not None:
head = head.next
i += 1
return i + 1
| 18.782609 | 53 | 0.581019 |