hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1b50b3ddf987ed769b7797b25c5046700b7a5755 | 4,172 | py | Python | data/scons/icl12.py | mmanzi/gradientdomain-mitsuba | c7c94e66e17bc41cca137717971164de06971bc7 | [
"Unlicense"
] | 92 | 2015-09-30T20:41:19.000Z | 2022-02-08T03:28:06.000Z | data/scons/icl12.py | mmanzi/gradientdomain-mitsuba | c7c94e66e17bc41cca137717971164de06971bc7 | [
"Unlicense"
] | null | null | null | data/scons/icl12.py | mmanzi/gradientdomain-mitsuba | c7c94e66e17bc41cca137717971164de06971bc7 | [
"Unlicense"
] | 21 | 2015-09-29T20:20:04.000Z | 2021-01-13T12:20:40.000Z | import os, sys, subprocess, copy, re
def get_output(script, args = None, shellenv = None):
if sys.platform == 'win32':
cmdLine = '"%s" %s & set' % (script, (args if args else ''))
shell = False
elif sys.platform.startswith('linux'):
cmdLine = 'source "%s" %s ; set' % (script, (args if args else ''))
shell = True
else:
raise Exception("Unsuported OS type: " + sys.platform)
popen = subprocess.Popen(cmdLine, shell=shell, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=shellenv)
# Use the .stdout and .stderr attributes directly because the
# .communicate() method uses the threading module on Windows
# and won't work under Pythons not built with threading.
stdout = popen.stdout.read()
if popen.wait() != 0:
raise IOError(popen.stderr.read())
output = stdout
return output
def parse_output(output, keep = None):
ret={} #this is the data we will return
## parse everything
reg=re.compile('(\\w*)=(.*)', re.I)
for line in output.splitlines():
m=reg.match(line)
if m:
if keep is not None:
#see if we need to filter out data
k=m.group(1)
if k in keep:
ret[k]=m.group(2)#.split(os.pathsep)
else:
# take everything
ret[m.group(1)]=m.group(2)#.split(os.pathsep)
#see if we need to filter out data
if keep is not None:
pass
return ret
def normalize_env(shellenv, keys):
"""Given a dictionary representing a shell environment, add the variables
from os.environ needed for the processing of .bat files; the keys are
controlled by the keys argument.
It also makes sure the environment values are correctly encoded.
Note: the environment is copied"""
normenv = {}
if shellenv:
if sys.platform=='win32':
for k in shellenv.keys():
normenv[k] = copy.deepcopy(shellenv[k]).encode('mbcs')
for k in keys:
if os.environ.has_key(k):
normenv[k] = os.environ[k]
return normenv
def get_script_env(env,script,args=None,vars=None):
'''
this function returns a dictionary of all the data we want to merge
or process in some other way.
'''
if sys.platform=='win32':
nenv = normalize_env(env['ENV'], ['COMSPEC'])
else:
nenv = normalize_env(env['ENV'], [])
output = get_output(script,args,nenv)
vars = parse_output(output, vars)
return vars
def merge_script_vars(env,script,args=None,vars=None):
'''
This merges the data retieved from the script in to the Enviroment
by prepending it.
script is the name of the script, args is optional arguments to pass
vars are var we want to retrieve, if None it will retieve everything found
'''
shell_env=get_script_env(env,script,args,vars)
for k, v in shell_env.iteritems():
env.PrependENVPath(k, v, delete_existing=1)
def generate(env):
if 'INTEL_COMPILER' not in env or env['INTEL_COMPILER'] != True:
return
if env['TARGET_ARCH'] == 'x86':
arch = 'ia32'
arch_redist = 'ia32'
elif env['TARGET_ARCH'] == 'x86_64' or env['TARGET_ARCH'] == 'amd64':
arch = 'ia32_intel64'
arch_redist = 'intel64'
else:
raise Exception('Unknown architecture ' + env['TARGET_ARCH'])
if env['MSVC_VERSION'] == '9.0':
vsrelease = 'vs2008'
elif env['MSVC_VERSION'] == '10.0':
vsrelease = 'vs2010'
else:
raise Exception('Unknown version of visual studio!')
if 'ICPP_COMPOSER2014' in os.environ:
icpp_path = os.environ.get('ICPP_COMPOSER2014')
elif 'ICPP_COMPILER14' in os.environ:
icpp_path = os.environ.get('ICPP_COMPILER14')
elif 'ICPP_COMPOSER2013' in os.environ:
icpp_path = os.environ.get('ICPP_COMPOSER2013')
elif 'ICPP_COMPILER13' in os.environ:
icpp_path = os.environ.get('ICPP_COMPILER13')
elif 'ICPP_COMPOSER2011' in os.environ:
icpp_path = os.environ.get('ICPP_COMPOSER2011')
elif 'ICPP_COMPILER12' in os.environ:
icpp_path = os.environ.get('ICPP_COMPILER12')
else:
raise Exception('Could not find any of the ICCPP_* environment variables!')
merge_script_vars(env, os.path.join(icpp_path, 'bin/iclvars.bat'), arch + ' ' + vsrelease)
env['REDIST_PATH'] = os.path.join(os.path.join(os.path.join(icpp_path, 'redist'), arch_redist), 'compiler')
def exists(env):
if 'INTEL_COMPILER' not in env or env['INTEL_COMPILER'] != True:
return False
return 'ICPP_COMPOSER2011' in os.environ
| 30.452555 | 109 | 0.704938 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,739 | 0.416826 |
1b52292b45e0add22d3b789bcff47a696f671cfa | 2,740 | py | Python | src/backend/expungeservice/test/test_expunger.py | htharker42/recordExpung | c4cc21918f9974dc58bc6265407edbaa28d8b674 | [
"CNRI-Python",
"Linux-OpenIB",
"CECILL-B"
] | null | null | null | src/backend/expungeservice/test/test_expunger.py | htharker42/recordExpung | c4cc21918f9974dc58bc6265407edbaa28d8b674 | [
"CNRI-Python",
"Linux-OpenIB",
"CECILL-B"
] | null | null | null | src/backend/expungeservice/test/test_expunger.py | htharker42/recordExpung | c4cc21918f9974dc58bc6265407edbaa28d8b674 | [
"CNRI-Python",
"Linux-OpenIB",
"CECILL-B"
] | null | null | null | import datetime
import copy
import unittest
from expungeservice.expunger import *
def test_statute():
tests = [
[[113, 45], '113.045'],
[[113, 45, 5], '113.045(5)'],
[[113, 45, 5, 'd'], '113.045(5)(d)'],
]
for t in tests:
if len(t[0]) == 4:
s1 = Statute(t[0][0], t[0][1], t[0][2], t[0][3])
s2 = copy.deepcopy(s1)
assert(s1 == s2)
assert(str(s1) == t[1])
elif len(t[0]) == 3:
s1 = Statute(t[0][0], t[0][1], t[0][2])
s2 = copy.deepcopy(s1)
assert(s1 == s2)
assert(str(s1) == t[1])
elif len(t[0]) == 2:
s1 = Statute(t[0][0], t[0][1])
s2 = copy.deepcopy(s1)
assert(s1 == s2)
assert(str(s1) == t[1])
else:
assert(0)
def get_convicted_disp():
return Disposition(DispositionType.CONVICTED, datetime.date(1996, 1, 1))
def get_dummy_statute():
return Statute(113, 45, 5, 'd')
def get_charge_crime_level(type_, class_):
disp = get_convicted_disp()
statute = get_dummy_statute()
level = CrimeLevel(type_, class_)
return Charge('%s %s charge' % (type_, class_), statute, level,
datetime.date(1995, 1, 1), disp)
"""
This mainly tests if we're able to construct the objects.
"""
def test_expunger_classes():
disp = get_convicted_disp()
statute = get_dummy_statute()
charges = [
get_charge_crime_level('Felony', 'A'),
get_charge_crime_level('Felony', 'A'),
]
cases = [
Case(charges, CaseState.OPEN, 100.50),
Case(charges, CaseState.CLOSED, 0),
]
client = Client('John Doe', datetime.date(1970, 1, 1), cases)
assert(client.num_charges() == 4)
class TestExpunger(unittest.TestCase):
def setUp(self):
# add charge(s) when using this in a test
self.open_case = Case(None, CaseState.OPEN, 0)
self.closed_case = Case(None, CaseState.CLOSED, 0)
# add case(s) when using this in a test
self.client = Client('John Doe', datetime.date(1970, 1, 1), None)
self.statute_137_225_5 = Statute(137, 225, 5)
def test_type_elig_felony(self):
record_analyzer = RecordAnalyzer(self.client)
result = record_analyzer.type_eligibility(
get_charge_crime_level('Felony', 'A'))
assert(result.code == ResultCode.INELIGIBLE)
assert(result.statute == self.statute_137_225_5)
def test_time_elig_open_case(self):
self.client.cases = [self.closed_case, self.open_case]
record_analyzer = RecordAnalyzer(self.client)
result = record_analyzer.time_eligibility()
assert(result.code == ResultCode.OPEN_CASE)
| 33.012048 | 76 | 0.584672 | 973 | 0.355109 | 0 | 0 | 0 | 0 | 0 | 0 | 254 | 0.092701 |
1b52e20e625677fa6203cae0d82c1a6e58aa1a2a | 254 | py | Python | django_tutorial/views/error_views.py | twtrubiks/django-tutorial | 9cb92ca03ba3de574b124446ab49c94f9900dcc8 | [
"MIT"
] | 431 | 2017-04-09T11:44:30.000Z | 2022-03-09T09:22:00.000Z | django_tutorial/views/error_views.py | zshen00/django-tutorial | 9cb92ca03ba3de574b124446ab49c94f9900dcc8 | [
"MIT"
] | 1 | 2017-10-26T06:17:58.000Z | 2018-04-27T06:52:01.000Z | django_tutorial/views/error_views.py | zshen00/django-tutorial | 9cb92ca03ba3de574b124446ab49c94f9900dcc8 | [
"MIT"
] | 138 | 2017-04-10T13:36:03.000Z | 2022-03-16T13:16:09.000Z | from django.shortcuts import render
def view_404(request):
return render(request, 'django_tutorial/error_pages/page_404.html', status=404)
def view_500(request):
return render(request, 'django_tutorial/error_pages/page_500.html', status=500)
| 25.4 | 83 | 0.783465 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 86 | 0.338583 |
1b535c50608ad5d4557f8f150480d6750d1307a3 | 4,092 | py | Python | IMDB Sentiment Analysis using LSTMs, CNNs and BERT/main.py | lucalaringe/pytorch-examples | 5d5682bff0490748b7358ad71c6afc22bfdafab5 | [
"MIT"
] | 1 | 2018-11-21T07:01:16.000Z | 2018-11-21T07:01:16.000Z | IMDB Sentiment Analysis using LSTMs, CNNs and BERT/main.py | lucalaringe/pytorch-examples | 5d5682bff0490748b7358ad71c6afc22bfdafab5 | [
"MIT"
] | null | null | null | IMDB Sentiment Analysis using LSTMs, CNNs and BERT/main.py | lucalaringe/pytorch-examples | 5d5682bff0490748b7358ad71c6afc22bfdafab5 | [
"MIT"
] | 1 | 2018-11-29T06:51:06.000Z | 2018-11-29T06:51:06.000Z | # importing the libraries I need
import numpy as np
import matplotlib.pyplot as plt
import pickle
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from torch.utils.data import Dataset, DataLoader
import nltk # natural language toolkit
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
import string
from utils import *
from train_utils import *
from lstm import LSTMSentimentClassifier
from cnn import CNNSentimentClassifier
def main():
# loading train, dev and test toy data
# train_ls = read_file('senti_binary.train.txt')
# dev_ls = read_file('senti_binary.dev.txt')
# test_ls = read_file('senti_binary.test.txt')
# Loading IMDB data if batches not already saved
if not (os.path.exists('train_batches.json') and\
os.path.exists('dev_batches.json') and\
os.path.exists('test_batches.json')):
print('Loading the data in memory...')
ls = read_IMDB('IMDB Dataset.csv')
train_ls, dev_ls, test_ls = train_test_split(ls)
print('Done.\n')
# Loading the vocabulary if in memory, otherwise create it
if os.path.exists('my_voc.pkl'):
print('Loading the vocabulary in memory...')
with open('my_voc.pkl', 'rb') as input:
my_voc = pickle.load(input)
print('Done.\n')
else:
print('Creating and saving the vocabulary...')
my_voc = WordVocabulary()
my_voc.add_corpus_from_list(train_ls)
# Save my_voc
with open('my_voc.pkl', 'wb') as output:
pickle.dump(my_voc, output, pickle.HIGHEST_PROTOCOL)
print('Done.\n')
# Creating Datasets and Batchify data
batch_size = 32
if os.path.exists('train_batches.json') and os.path.exists('dev_batches.json') and os.path.exists('test_batches.json'):
print('Loading batches in memory...')
train_batches = read_batches_from_disk('train_batches.json')
dev_batches = read_batches_from_disk('dev_batches.json')
test_batches = read_batches_from_disk('test_batches.json')
print('Done\n')
else:
print('Instantiating, batchifying and saving the datasets...')
# Instantiating Datasets
train = MovieReviewsDataset(train_ls, my_voc)
dev = MovieReviewsDataset(dev_ls, my_voc)
test = MovieReviewsDataset(test_ls, my_voc)
# Batchifying
train_batches = batchify_data(train, batch_size)
dev_batches = batchify_data(dev, batch_size)
test_batches = batchify_data(test, batch_size)
# Saving with json
store_batches_to_disk(train_batches, 'train_batches.json')
store_batches_to_disk(dev_batches, 'dev_batches.json')
store_batches_to_disk(test_batches, 'test_batches.json')
print('Done.\n')
# Load model if already in memory, otherwise random initialization of the weights
if os.path.exists('my_model.pt'):
print('Loading the model...')
# my_model = LSTMSentimentClassifier(my_voc, 50, 40, 20, 10)
my_model = CNNSentimentClassifier(my_voc, max_len, 100, 6, 4, 3, 3)
my_model.load_state_dict(torch.load('my_model.pt'))
my_model.eval()
print('Done.\n')
else:
print('Initializing the model...')
# my_model = LSTMSentimentClassifier(my_voc, 50, 40, 20, 10)
max_len = train_batches[0]['x'].shape[1]
print(max_len)
my_model = CNNSentimentClassifier(my_voc, max_len, 50, 6, 4, 3, 3)
print('Done.\n')
print('Starting training...\n')
# Train the model
train_model(train_batches, dev_batches, my_model, nesterov=True)
# Evaluate the model on test data
loss, accuracy = run_epoch(test_batches, my_model.eval(), None)
print("Loss on test set:" + str(loss) + " Accuracy on test set: " + str(accuracy))
if __name__ == '__main__':
# Specify seed for deterministic behavior, then shuffle.
np.random.seed(314) # for reproducibility
torch.manual_seed(314)
main() | 39.728155 | 123 | 0.676197 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,473 | 0.359971 |
1b553f907737e4266f1cd586e890b52f138a78d7 | 6,708 | py | Python | pooch/tests/test_hashes.py | rabernat/pooch | bc32d4eecec115e1fdf9bd4e306df5a6c22661fd | [
"BSD-3-Clause"
] | null | null | null | pooch/tests/test_hashes.py | rabernat/pooch | bc32d4eecec115e1fdf9bd4e306df5a6c22661fd | [
"BSD-3-Clause"
] | null | null | null | pooch/tests/test_hashes.py | rabernat/pooch | bc32d4eecec115e1fdf9bd4e306df5a6c22661fd | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2018 The Pooch Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
#
# This code is part of the Fatiando a Terra project (https://www.fatiando.org)
#
# pylint: disable=redefined-outer-name
"""
Test the hash calculation and checking functions.
"""
import os
from pathlib import Path
from tempfile import NamedTemporaryFile
import pytest
from ..core import Pooch
from ..hashes import (
make_registry,
file_hash,
hash_matches,
)
from .utils import check_tiny_data, mirror_directory
DATA_DIR = str(Path(__file__).parent / "data" / "store")
REGISTRY = (
"tiny-data.txt baee0894dba14b12085eacb204284b97e362f4f3e5a5807693cc90ef415c1b2d\n"
)
REGISTRY_RECURSIVE = (
"subdir/tiny-data.txt baee0894dba14b12085eacb204284b97e362f4f3e5a5807693cc90ef415c1b2d\n"
"tiny-data.txt baee0894dba14b12085eacb204284b97e362f4f3e5a5807693cc90ef415c1b2d\n"
)
TINY_DATA_HASHES_HASHLIB = {
"sha1": "c03148994acd89317915ea2f2d080d6dd127aa09",
"sha256": "baee0894dba14b12085eacb204284b97e362f4f3e5a5807693cc90ef415c1b2d",
"md5": "70e2afd3fd7e336ae478b1e740a5f08e",
}
TINY_DATA_HASHES_XXH = {
"xxh128": "0267d220db258fffb0c567c0ecd1b689",
"xxh3_128": "0267d220db258fffb0c567c0ecd1b689",
"xxh64": "f843815fe57948fa",
"xxh3_64": "811e3f2a12aec53f",
"xxh32": "98d6f1a2",
}
TINY_DATA_HASHES = TINY_DATA_HASHES_HASHLIB.copy()
TINY_DATA_HASHES.update(TINY_DATA_HASHES_XXH)
@pytest.fixture
def data_dir_mirror(tmp_path):
"""
Mirror the test data folder on a temporary directory. Needed to avoid
permission errors when pooch is installed on a non-writable path.
"""
return mirror_directory(DATA_DIR, tmp_path)
def test_make_registry(data_dir_mirror):
"Check that the registry builder creates the right file names and hashes"
outfile = NamedTemporaryFile(delete=False)
# Need to close the file before writing to it.
outfile.close()
try:
make_registry(data_dir_mirror, outfile.name, recursive=False)
with open(outfile.name) as fout:
registry = fout.read()
assert registry == REGISTRY
# Check that the registry can be used.
pup = Pooch(path=data_dir_mirror, base_url="some bogus URL", registry={})
pup.load_registry(outfile.name)
true = str(data_dir_mirror / "tiny-data.txt")
fname = pup.fetch("tiny-data.txt")
assert true == fname
check_tiny_data(fname)
finally:
os.remove(outfile.name)
def test_make_registry_recursive(data_dir_mirror):
"Check that the registry builder works in recursive mode"
outfile = NamedTemporaryFile(delete=False)
# Need to close the file before writing to it.
outfile.close()
try:
make_registry(data_dir_mirror, outfile.name, recursive=True)
with open(outfile.name) as fout:
registry = fout.read()
assert registry == REGISTRY_RECURSIVE
# Check that the registry can be used.
pup = Pooch(path=data_dir_mirror, base_url="some bogus URL", registry={})
pup.load_registry(outfile.name)
assert str(data_dir_mirror / "tiny-data.txt") == pup.fetch("tiny-data.txt")
check_tiny_data(pup.fetch("tiny-data.txt"))
true = str(data_dir_mirror / "subdir" / "tiny-data.txt")
assert true == pup.fetch("subdir/tiny-data.txt")
check_tiny_data(pup.fetch("subdir/tiny-data.txt"))
finally:
os.remove(outfile.name)
def test_file_hash_invalid_algorithm():
"Test an invalid hashing algorithm"
with pytest.raises(ValueError) as exc:
file_hash(fname="something", alg="blah")
assert "'blah'" in str(exc.value)
@pytest.mark.parametrize(
"alg,expected_hash",
list(TINY_DATA_HASHES.items()),
ids=list(TINY_DATA_HASHES.keys()),
)
def test_file_hash(alg, expected_hash):
"Test the hash calculation using hashlib and xxhash"
if alg.startswith("xxh"):
pytest.importorskip("xxhash")
fname = os.path.join(DATA_DIR, "tiny-data.txt")
check_tiny_data(fname)
returned_hash = file_hash(fname, alg)
assert returned_hash == expected_hash
@pytest.mark.parametrize(
"alg,expected_hash",
list(TINY_DATA_HASHES.items()),
ids=list(TINY_DATA_HASHES.keys()),
)
def test_hash_matches(alg, expected_hash):
"Make sure the hash checking function works"
if alg.startswith("xxh"):
pytest.importorskip("xxhash")
fname = os.path.join(DATA_DIR, "tiny-data.txt")
check_tiny_data(fname)
# Check if the check passes
known_hash = f"{alg}:{expected_hash}"
assert hash_matches(fname, known_hash)
# And also if it fails
known_hash = f"{alg}:blablablabla"
assert not hash_matches(fname, known_hash)
@pytest.mark.parametrize(
"alg,expected_hash",
list(TINY_DATA_HASHES_HASHLIB.items()),
ids=list(TINY_DATA_HASHES_HASHLIB.keys()),
)
def test_hash_matches_strict(alg, expected_hash):
"Make sure the hash checking function raises an exception if strict"
fname = os.path.join(DATA_DIR, "tiny-data.txt")
check_tiny_data(fname)
# Check if the check passes
known_hash = f"{alg}:{expected_hash}"
assert hash_matches(fname, known_hash, strict=True)
# And also if it fails
bad_hash = f"{alg}:blablablabla"
with pytest.raises(ValueError) as error:
hash_matches(fname, bad_hash, strict=True, source="Neverland")
assert "Neverland" in str(error.value)
with pytest.raises(ValueError) as error:
hash_matches(fname, bad_hash, strict=True, source=None)
assert fname in str(error.value)
def test_hash_matches_none():
"The hash checking function should always returns True if known_hash=None"
fname = os.path.join(DATA_DIR, "tiny-data.txt")
assert hash_matches(fname, known_hash=None)
# Should work even if the file is invalid
assert hash_matches(fname="", known_hash=None)
# strict should cause an error if this wasn't working
assert hash_matches(fname, known_hash=None, strict=True)
@pytest.mark.parametrize(
"alg,expected_hash",
list(TINY_DATA_HASHES_HASHLIB.items()),
ids=list(TINY_DATA_HASHES_HASHLIB.keys()),
)
def test_hash_matches_uppercase(alg, expected_hash):
"Hash matching should be independent of upper or lower case"
fname = os.path.join(DATA_DIR, "tiny-data.txt")
check_tiny_data(fname)
# Check if the check passes
known_hash = f"{alg}:{expected_hash.upper()}"
assert hash_matches(fname, known_hash, strict=True)
# And also if it fails
with pytest.raises(ValueError) as error:
hash_matches(fname, known_hash[:-5], strict=True, source="Neverland")
assert "Neverland" in str(error.value)
| 35.305263 | 93 | 0.714669 | 0 | 0 | 0 | 0 | 2,820 | 0.420394 | 0 | 0 | 2,467 | 0.36777 |
1b5728fb13835525abdbb3be08ce7804a39a37e6 | 134 | py | Python | TemplateEngineForRESP-F3T/RoofFunctions/trigonometric_function.py | riku-sakamoto/RESP-ProgramTips | 45be5afb90283e56ddf5d32681c58dec17986ba8 | [
"MIT"
] | 1 | 2021-10-13T02:07:17.000Z | 2021-10-13T02:07:17.000Z | TemplateEngineForRESP-F3T/RoofFunctions/trigonometric_function.py | riku-sakamoto/RESP-ProgramTips | 45be5afb90283e56ddf5d32681c58dec17986ba8 | [
"MIT"
] | null | null | null | TemplateEngineForRESP-F3T/RoofFunctions/trigonometric_function.py | riku-sakamoto/RESP-ProgramTips | 45be5afb90283e56ddf5d32681c58dec17986ba8 | [
"MIT"
] | null | null | null | # -*- coding:utf-8 -*-
import numpy as np
def sin_sin(x,y):
return 1000*abs(np.sin(x/2000*np.pi) + np.sin(y/2000.0*np.pi))+100
| 13.4 | 68 | 0.61194 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 22 | 0.164179 |
1b579f594f5cf06701a36a04347c2a0975c62ec7 | 2,534 | py | Python | tidal_precession.py | ddeb32/APR_Testing | 08cf346e6047f75ad57a7c9b497ea2dd9cb13f59 | [
"MIT"
] | null | null | null | tidal_precession.py | ddeb32/APR_Testing | 08cf346e6047f75ad57a7c9b497ea2dd9cb13f59 | [
"MIT"
] | null | null | null | tidal_precession.py | ddeb32/APR_Testing | 08cf346e6047f75ad57a7c9b497ea2dd9cb13f59 | [
"MIT"
] | null | null | null |
#####################################################################
### Evaluating the tidal component to the precession rate ###
#####################################################################
import numpy as np
import pandas as pd
from numpy import pi
from scipy import integrate
import scipy.constants as const
from scipy import interpolate
import math
import matplotlib.pyplot as plt
from mpmath import *
from matplotlib.ticker import AutoMinorLocator
import pdfkit as pdf
M_sun = 1.98847*10**30
R_sun = 6.957*10**8
G = 6.67430*10**(-11)
c = 299792458 #--values taken from physics.nist.gov
k = G/c**2.0
fact = 1.476981739 #--to get G/c^2 in M_sun and Kms.
exp = math.exp
sin = math.sin
log = math.log #--takes arguments ( , ) where the second one is the base, by default e.
#--------------------------------------------------------------------#
def precessionEQ (Pb, Rns, aNS, Mbh, Mns, e, k2):
#--the value found will be in SI, angle/s.
fact1 = 30*pi/Pb
fact2 = (Rns/aNS)**5
fact3 = Mbh/Mns
fact4 = (1 + 3/2*(e**2) + 1/8*(e**4)) / ((1-e**2)**5)
fact5 = k2
return(fact1 * fact2 * fact3 * fact4 * fact5)
def aNSeq (Mbh, Mns, Pb):
aR = ( (G*(Mbh+Mns)*(Pb**2)) / (4*(pi**2)) )**(1/3)
aNS = ( Mbh/(Mbh+Mns) )*aR
return(aR, aNS)
#--Main Program--#
def main ():
print('\n Give the parameter values - ')
Pb = float(input('\tPb (Hr):\t'))
Rns = float(input('\tRns (km):\t'))
Mbh = float(input('\tMbh (M_sun):\t'))
Mns = float(input('\tMns (M_sun):\t'))
e = float(input('\te:\t'))
k2 = float(input('\tk2:\t'))
#--Converting to SI--#
Mbh, Mns = Mbh*M_sun, Mns*M_sun #--masses in Kg.
Rns = Rns*1000.0 #--distances in meter.
Pb = Pb*3600.0 #--times in second.
aR, aNS = aNSeq(Mbh, Mns, Pb)
precession = precessionEQ(Pb, Rns, aNS, Mbh, Mns, e, k2)
precession = precession*1.807e+9 #--rad/sec to deg/year.
print('We get - ')
"""print('Pb:\t', Pb, ' hours')
print('Rns:\t', Rns/1000, ' km')
print('Mbh:\t', Mbh/M_sun, ' M_sun')
print('Mns:\t', Mns/M_sun, ' M_sun')
print('e:\t', e)
print('k2:\t', k2)
print('aNS:\t', aNS/1000, ' km')"""
print(' omegadot_tidal:\t', precession, ' deg/yr')
main()
############################--End of Program--##########################
########################################################################
| 28.155556 | 98 | 0.480268 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,117 | 0.440805 |
1b588099d4b30e26fb0a274ed99ed4ea86b7a285 | 1,038 | py | Python | appcred.py | tamalsaha/keystone-demo | dd13b3d283565f72f38d696b235be4e2ad2c2845 | [
"Apache-2.0"
] | 1 | 2018-07-31T09:25:53.000Z | 2018-07-31T09:25:53.000Z | appcred.py | tamalsaha/keystone-demo | dd13b3d283565f72f38d696b235be4e2ad2c2845 | [
"Apache-2.0"
] | null | null | null | appcred.py | tamalsaha/keystone-demo | dd13b3d283565f72f38d696b235be4e2ad2c2845 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
from keystoneauth1.identity import v3
from keystoneauth1 import session
from keystoneclient.v3 import client
from pprint import pprint
import os
def main():
AUTH_URL = os.getenv('OS_AUTH_URL')
USERNAME = os.getenv('OS_USERNAME')
USER_DOMAIN_NAME = os.getenv('OS_USER_DOMAIN_NAME')
PASSWD = os.getenv('OS_PASSWORD')
PROJECT_ID = os.getenv('OS_PROJECT_ID')
PROJECT_NAME = os.getenv('OS_PROJECT_NAME')
pprint('AUTH_URL = ' + AUTH_URL)
pprint('USERNAME = ' + USERNAME)
pprint('USER_DOMAIN_NAME = ' + USER_DOMAIN_NAME)
pprint('PASSWD = ' + PASSWD)
pprint('PROJECT_ID = ' + PROJECT_ID)
pprint('PROJECT_NAME = ' + PROJECT_NAME)
auth = v3.Password(auth_url=AUTH_URL,
username=USERNAME,
user_domain_name=USER_DOMAIN_NAME,
password=PASSWD,
project_id=PROJECT_ID,
project_name=PROJECT_NAME)
sess = session.Session(auth=auth)
keystone = client.Client(session=sess)
app_cred = keystone.application_credentials.create(
name='kubernetes')
pprint(app_cred.to_dict())
if __name__ == "__main__":
main()
| 27.315789 | 52 | 0.754335 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 221 | 0.212909 |
1b5abd620bf6c3fd9ffce5839f82ac6ace822f1a | 664 | py | Python | data/admin.py | FSavoy/visuo-server | d9c93ec7ae9dd033f3f0290381ddbac413bb6f9a | [
"BSD-3-Clause"
] | 2 | 2017-11-16T08:32:46.000Z | 2018-04-02T13:36:42.000Z | data/admin.py | FSavoy/visuo-server | d9c93ec7ae9dd033f3f0290381ddbac413bb6f9a | [
"BSD-3-Clause"
] | null | null | null | data/admin.py | FSavoy/visuo-server | d9c93ec7ae9dd033f3f0290381ddbac413bb6f9a | [
"BSD-3-Clause"
] | 2 | 2017-11-16T08:33:52.000Z | 2021-05-12T06:31:54.000Z | from django.contrib.gis import admin
# Register your models here.
from models import SkyPicture, MeasuringDevice, WeatherMeasurement, RadiosondeMeasurement
from django.contrib.gis import forms
# Custom interface for selecting the location of devices
class MeasuringDeviceAdminForm(forms.ModelForm):
location = forms.PointField(widget=forms.OSMWidget(attrs={
'display_raw': True}))
class MeasuringDeviceAdmin(admin.GeoModelAdmin):
form = MeasuringDeviceAdminForm
admin.site.register(SkyPicture)
admin.site.register(WeatherMeasurement)
admin.site.register(MeasuringDevice, MeasuringDeviceAdmin)
admin.site.register(RadiosondeMeasurement) | 34.947368 | 89 | 0.813253 | 230 | 0.346386 | 0 | 0 | 0 | 0 | 0 | 0 | 97 | 0.146084 |
1b5ae15c5f93d6a5c129b200254b544ee0bfa0ee | 1,577 | py | Python | setup.py | ilkka/nap | 3ea7b41ef6b24b7e127bc87bb010d8a8bb18a4bd | [
"MIT"
] | 31 | 2015-02-11T22:36:26.000Z | 2019-03-26T17:00:36.000Z | setup.py | ilkka/nap | 3ea7b41ef6b24b7e127bc87bb010d8a8bb18a4bd | [
"MIT"
] | 6 | 2015-11-16T13:29:34.000Z | 2019-10-28T13:37:57.000Z | setup.py | ilkka/nap | 3ea7b41ef6b24b7e127bc87bb010d8a8bb18a4bd | [
"MIT"
] | 11 | 2015-04-15T00:09:08.000Z | 2020-08-25T13:50:21.000Z | #!/usr/bin/env python
from pip.req import parse_requirements
# parse_requirements() returns generator of pip.req.InstallRequirement objects
install_reqs = parse_requirements('requirements.txt')
# reqs is a list of requirement
# e.g. ['django==1.5.1', 'mezzanine==1.4.6']
reqs = [str(ir.req) for ir in install_reqs]
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
readme = """Read docs from GitHub_
.. _GitHub: https://github.com/kimmobrunfeldt/nap
"""
setup(
name='nap',
version='2.0.0-dev',
description='Convenient way to request HTTP APIs',
long_description=readme,
author='Kimmo Brunfeldt',
author_email='kimmobrunfeldt@gmail.com',
url='https://github.com/kimmobrunfeldt/nap',
packages=[
'nap',
],
package_dir={'nap': 'nap'},
include_package_data=True,
install_requires=reqs,
license='MIT',
zip_safe=False,
keywords='nap rest requests http',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: Implementation :: PyPy',
],
)
| 28.160714 | 78 | 0.641725 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 912 | 0.578313 |
1b5b58c46eceee94ff2083f63bbd9128f0ea25c7 | 555 | py | Python | ivy/functional/backends/jax/statistical.py | odehDanOps/ivy | 996cabd0901ed331d5e8761e2cdee429d0ca8d43 | [
"Apache-2.0"
] | null | null | null | ivy/functional/backends/jax/statistical.py | odehDanOps/ivy | 996cabd0901ed331d5e8761e2cdee429d0ca8d43 | [
"Apache-2.0"
] | null | null | null | ivy/functional/backends/jax/statistical.py | odehDanOps/ivy | 996cabd0901ed331d5e8761e2cdee429d0ca8d43 | [
"Apache-2.0"
] | null | null | null | # global
import jax.numpy as jnp
from typing import Tuple, Union
# Array API Standard #
# -------------------#
def min(x: jnp.ndarray,
axis: Union[int, Tuple[int]] = None,
keepdims = False, device = None) \
-> jnp.ndarray:
return jnp.min(a = jnp.asarray(x), axis = axis, keepdims = keepdims)
def max(x: jnp.ndarray,
axis: Union[int, Tuple[int]] = None,
keepdims = False, device = None) \
-> jnp.ndarray:
return jnp.max(a = jnp.asarray(x), axis = axis, keepdims = keepdims)
# Extra #
# ------#
| 23.125 | 72 | 0.569369 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 70 | 0.126126 |
1b5dfc54272c0e15842823c60d77faea8d0bda76 | 2,955 | py | Python | tests/generate_new_site/utilities/test_tables.py | aychen99/Excavating-Occaneechi-Town | 6e864ca69ff1881554eb4c88aebed236bafbeaf4 | [
"MIT"
] | 1 | 2020-10-01T01:07:11.000Z | 2020-10-01T01:07:11.000Z | tests/generate_new_site/utilities/test_tables.py | aychen99/Excavating-Occaneechi-Town | 6e864ca69ff1881554eb4c88aebed236bafbeaf4 | [
"MIT"
] | null | null | null | tests/generate_new_site/utilities/test_tables.py | aychen99/Excavating-Occaneechi-Town | 6e864ca69ff1881554eb4c88aebed236bafbeaf4 | [
"MIT"
] | null | null | null | from unittest import mock
from unittest.mock import patch
from src.generate_new_site.utilities import tables
from pathlib import Path
###############################
# PathTable integration tests #
###############################
def test_pathtable_register_and_gets():
pathtable = tables.PathTable()
test_objs = [{
'entity': "{}".format(i) if i < 5 else None,
'old_path': Path("{}old".format(i)),
'new_path': Path("{}old".format(i))
} for i in range(10)]
for test_obj in test_objs:
pathtable.register(
old_path=test_obj['old_path'],
new_path=test_obj['new_path'],
entity=test_obj['entity']
)
for test_obj in test_objs:
assert pathtable.get_entity(test_obj['old_path']) == test_obj['entity']
assert pathtable.get_path(test_obj['old_path']) == test_obj['new_path']
###############################
# PageTable integration tests #
###############################
def test_pagetable_register_and_gets():
def mock_page_num_to_arabic(page_num):
if page_num.isdigit():
return page_num
elif page_num == "i":
return "1"
elif page_num == "ii":
return "2"
elif page_num == "iii":
return "3"
elif page_num == "vi":
return "6"
return page_num
pagetable = tables.PageTable()
page_nums = ["1", "2", "3", "6", "i", "ii", "iii", "vi"]
test_pages = {num: Path("page{}".format(num)) for num in page_nums}
with patch('src.generate_new_site.utilities.tables.page_num_to_arabic', mock_page_num_to_arabic):
for num, path in test_pages.items():
pagetable.register(
page_num=num,
path=path
)
# Test get_path_path
for num, path in test_pages.items():
assert pagetable.get_page_path(num) == path
# Test get_prev/next_page_path
# 1
assert pagetable.get_prev_page_path("1") is None
assert pagetable.get_next_page_path("1") == test_pages["2"]
# 2
assert pagetable.get_prev_page_path("2") == test_pages["1"]
assert pagetable.get_next_page_path("2") == test_pages["3"]
# 3
assert pagetable.get_prev_page_path("3") == test_pages["2"]
assert pagetable.get_next_page_path("3") is None
# 6
assert pagetable.get_prev_page_path("6") is None
assert pagetable.get_next_page_path("6") is None
# i
assert pagetable.get_prev_page_path("i") is None
assert pagetable.get_next_page_path("i") == test_pages["ii"]
# ii
assert pagetable.get_prev_page_path("ii") == test_pages["i"]
assert pagetable.get_next_page_path("ii") == test_pages["iii"]
# iii
assert pagetable.get_prev_page_path("iii") == test_pages["ii"]
assert pagetable.get_next_page_path("iii") is None
# vi
assert pagetable.get_prev_page_path("vi") is None
assert pagetable.get_next_page_path("vi") is None
| 33.579545 | 101 | 0.609475 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 583 | 0.197293 |
1b5e5af181654c4b7294055065638f813e904dc8 | 910 | py | Python | tools/dnase/wellingtonAnalysis.py | globusgenomics/galaxy | 7caf74d9700057587b3e3434c64e82c5b16540f1 | [
"CC-BY-3.0"
] | 1 | 2021-02-05T13:19:58.000Z | 2021-02-05T13:19:58.000Z | tools/dnase/wellingtonAnalysis.py | globusgenomics/galaxy | 7caf74d9700057587b3e3434c64e82c5b16540f1 | [
"CC-BY-3.0"
] | null | null | null | tools/dnase/wellingtonAnalysis.py | globusgenomics/galaxy | 7caf74d9700057587b3e3434c64e82c5b16540f1 | [
"CC-BY-3.0"
] | null | null | null | import sys
import pyDNase
import pyDNase.footprinting as fp
import subprocess
import os
if __name__ == "__main__":
#There should be two input files:
#1. original .bam file
#2. fseq output .bed file
#3. folder needs to contain .bam.bai file for reference
input_bed_name = sys.argv[1]
input_bam_name = sys.argv[2]
input_bam_index = sys.argv[3]
output_file_name = sys.argv[4]
pvalue_cutoff = sys.argv[5]
# create links for input bam and indexes
os.symlink(input_bam_name, "input.bam")
os.symlink(input_bam_index, "input.bam.bai")
#wellington
regions = pyDNase.GenomicIntervalSet(input_bed_name)
reads = pyDNase.BAMHandler("input.bam")
footprinter = fp.wellington(regions[0], reads)
footprints = footprinter.footprints(withCutoff=pvalue_cutoff)
with open(output_file_name, "w") as resultout:
resultout.write(str(footprints))
| 27.575758 | 65 | 0.708791 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 237 | 0.26044 |
1b5ef5d971968c8909b478034323bf06ff019c92 | 3,432 | py | Python | idiotic/util/blocks/http.py | idiotic/idiotic | 283b2919356c0735e43d1b42526c54fc7babf7d6 | [
"MIT"
] | 7 | 2016-03-27T04:26:05.000Z | 2021-02-24T17:16:10.000Z | idiotic/util/blocks/http.py | idiotic/idiotic | 283b2919356c0735e43d1b42526c54fc7babf7d6 | [
"MIT"
] | 38 | 2016-03-18T05:53:04.000Z | 2017-07-01T05:06:36.000Z | idiotic/util/blocks/http.py | idiotic/idiotic | 283b2919356c0735e43d1b42526c54fc7babf7d6 | [
"MIT"
] | null | null | null | import logging
from urllib.parse import urlparse, urlunparse
from idiotic import block
from idiotic.util.resources import http
import aiohttp
import asyncio
import json
import types
log = logging.getLogger(__name__)
class HTTP(block.Block):
def __init__(self, name, url, method="GET", parameters=None, defaults=None, skip_repeats=False, format_data=True,
output=True, data=None, json=False, **options):
super().__init__(name, **options)
self.url = url
self.parameters = parameters or []
self.method = method
self.data = data or {}
self.headers = {}
self.json = json
self.defaults = defaults or {}
self.skip_repeats = skip_repeats
self.format_data = format_data
if output:
if output is True:
self.outputter = lambda d: d
elif output == "int":
self.outputter = int
elif output == "float":
self.outputter = float
elif output == "bool":
self.outputter = bool
elif output == "str":
self.outputter = str
elif output == "json":
self.outputter = json.loads
else:
raise ValueError("Invalid output type: {}".format(output))
else:
self.outputter = None
parsed_url = urlparse(url, scheme='http')
url_root = urlunparse((parsed_url[0], parsed_url[1], '', '', '', ''))
#: Options
self.options = options
self._param_dict = {n: self.defaults.get(n, None) for n in self.parameters}
for name in self.parameters:
async def setparam(self, val):
await self._setparam(name, val)
setattr(self, name, types.MethodType(setparam, self))
self.inputs = {}
self.resources = [http.URLReachable(url_root)]
async def _setparam(self, name, value):
if not self.skip_repeats or value != self._param_dict.get(name):
self._param_dict[name] = value
await self.perform()
def formatted_data(self):
if self.format_data:
return {
k: v.format(**self.data) for k, v in self._param_dict.items()
}
else:
return self.data
async def perform(self, *_):
while True:
try:
async with aiohttp.ClientSession() as client:
headers = dict(self.headers)
data = self.formatted_data()
if self.json:
data = json.dumps(data)
if 'content-type' not in headers:
headers['content-type'] = 'application/json'
async with client.request(
self.method,
self.url.format(**self._param_dict),
data=data,
headers=headers,
) as request:
res = await request.text()
if self.outputter:
output_val = self.outputter(res)
await self.output(output_val)
break
except IOError:
log.error("%s: Unable to retrieve %s", self.name, self.url)
await asyncio.sleep(5)
| 32.074766 | 117 | 0.516026 | 3,209 | 0.935023 | 0 | 0 | 0 | 0 | 1,364 | 0.397436 | 156 | 0.045455 |
1b5f5814e99811388d8791f4808e71fca930b211 | 651 | py | Python | chapter9/msfrpc/msfrpc_connect.py | abbbhucho/Mastering-Python-for-Networking-and-Security | f4fb1131253e9daad8da501c297758fdcedfbac3 | [
"MIT"
] | 98 | 2018-05-13T20:41:43.000Z | 2022-03-31T00:24:01.000Z | chapter9/msfrpc/msfrpc_connect.py | Cyb3rid10ts/Mastering-Python-for-Networking-and-Security | 4cf04d1758f17ae378b5e3422404e5b7a174a243 | [
"MIT"
] | null | null | null | chapter9/msfrpc/msfrpc_connect.py | Cyb3rid10ts/Mastering-Python-for-Networking-and-Security | 4cf04d1758f17ae378b5e3422404e5b7a174a243 | [
"MIT"
] | 62 | 2018-06-19T13:46:34.000Z | 2022-02-11T05:47:24.000Z | # -*- encoding: utf-8 -*-
import msfrpc
client = msfrpc.Msfrpc({'uri':'/msfrpc', 'port':'5553', 'host':'127.0.0.1', 'ssl': True})
auth = client.login('msf','password')
if auth:
print str(client.call('core.version'))+'\n'
print str(client.call('core.thread_list', []))+'\n'
print str(client.call('job.list', []))+'\n'
print str(client.call('module.exploits', []))+'\n'
print str(client.call('module.auxiliary', []))+'\n'
print str(client.call('module.post', []))+'\n'
print str(client.call('module.payloads', []))+'\n'
print str(client.call('module.encoders', []))+'\n'
print str(client.call('module.nops', []))+'\n' | 46.5 | 89 | 0.597542 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 261 | 0.400922 |
1b6202aaa27a1de6b25a387e3e874186d7ae1c19 | 351 | py | Python | examples/anagrams_demo.py | aathi2002/open-tamil | 84161b622b7cb5d369b4ab4e8025dcf432e867e8 | [
"MIT"
] | 218 | 2016-03-19T20:59:17.000Z | 2022-03-31T03:35:32.000Z | examples/anagrams_demo.py | aathi2002/open-tamil | 84161b622b7cb5d369b4ab4e8025dcf432e867e8 | [
"MIT"
] | 132 | 2016-02-23T06:16:32.000Z | 2022-02-17T05:16:43.000Z | examples/anagrams_demo.py | aathi2002/open-tamil | 84161b622b7cb5d369b4ab4e8025dcf432e867e8 | [
"MIT"
] | 62 | 2016-03-19T20:59:26.000Z | 2022-03-01T11:34:48.000Z | import codecs
from solthiruthi.dictionary import *
from tamil import wordutils
TVU, TVU_size = DictionaryBuilder.create(TamilVU)
ag, ag2 = wordutils.anagrams_in_dictionary(TVU)
with codecs.open("demo.txt", "w", "utf-8") as fp:
itr = 1
for k, c in ag:
v = ag2[k]
fp.write("%03d) %s\n" % (itr, " | ".join(v)))
itr += 1
| 25.071429 | 53 | 0.623932 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 37 | 0.105413 |
1b62a0d4ec377c212288a73dec2ea0b7124e85ec | 659 | py | Python | examples/notiltangle.py | jckw/Adafruit_LSM9DS0 | 98ff135fbf1702160a9277df1fd637022f91e234 | [
"MIT"
] | 6 | 2017-11-14T07:21:58.000Z | 2018-08-24T03:47:58.000Z | examples/notiltangle.py | jckw/Adafruit_LSM9DS0 | 98ff135fbf1702160a9277df1fd637022f91e234 | [
"MIT"
] | null | null | null | examples/notiltangle.py | jckw/Adafruit_LSM9DS0 | 98ff135fbf1702160a9277df1fd637022f91e234 | [
"MIT"
] | 2 | 2017-09-26T16:57:16.000Z | 2018-12-06T12:33:11.000Z | # Simple example whereby the angle is calculated, assuming the magnetometer is flat,
# i.e. there is no tilt-compensation.
# Author: Jack Weatherilt
# License: Public Domain
import math
import time
# Import the LSM9DS0 module
import Adafruit_LSM9DS0
# Create new LSM9DS0 instance
imu = Adafruit_LSM9DS0.LSM9DS0()
while True:
# Unpack (x, y, z) readings from magnetometer
(mag_x, mag_y, mag_z) = imu.readMag()
# Calculate the angle using trigonometry
angle_deg = math.degrees(math.atan2(mag_y, mag_x))
# NOTE: this method does not account for tilt!
print("Non-tilt: deg:", angle_deg)
# Wait half a second before repeating
time.sleep(0.05)
| 21.258065 | 84 | 0.743551 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 412 | 0.62519 |
1b6463efb1da06009a0314631e1b650f5443584c | 1,702 | py | Python | proj/python/opencvtest/Test/FaceCountingCCTV-master/CCTV-Code.py | joalvis777/c-through | 7e8bf43b88436edb4ffb779f75e641941216ea11 | [
"MIT"
] | null | null | null | proj/python/opencvtest/Test/FaceCountingCCTV-master/CCTV-Code.py | joalvis777/c-through | 7e8bf43b88436edb4ffb779f75e641941216ea11 | [
"MIT"
] | null | null | null | proj/python/opencvtest/Test/FaceCountingCCTV-master/CCTV-Code.py | joalvis777/c-through | 7e8bf43b88436edb4ffb779f75e641941216ea11 | [
"MIT"
] | null | null | null | import numpy as np
import datetime
import smtplib
import logging
import time
import csv
import cv2
print ("======================START======================")
all_count = 0 #Checking finding count
true_count = 0 #Checking detection count
#open result CSV file
file = open('./result/res_Insert_name.csv', 'w')
#https://github.com/Itseez/opencv/blob/master/data/haarcascades/haarcascade_frontalface_default.xml
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
cap = cv2.VideoCapture(0)
one_m_timer_start = time.time()
while 1:
s = time.clock() #Start time
ret, img = cap.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
all_count = all_count + 1 #Plus finding count
for (x,y,w,h) in faces:
cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = img[y:y+h, x:x+w]
true_count = true_count + 1
e = time.clock() #Finish time
msg = str(s) + ',' + str(e) + ',' + str(e-s) + ',' + str(true_count) +'\n'
file.write(msg) #writing about start time, end time, spend time, face detection count
print ("Detection Face Number : ", true_count)
cv2.imshow('img',img)
k = cv2.waitKey(30) & 0xff #If you press "ESC" button on your keyboard program is end
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
file.close()
print ("All count :" , all_count) #show all_count
print ("Detection count :" , true_count) #show detection count
print ("======================END======================")
# :: Last Edit ::
# :: 2018-04-03 ::
# :: Poberlater ::
| 30.945455 | 99 | 0.611633 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 680 | 0.39953 |
1b64d2f0be129dd3396f04b7059ff171be4e05bd | 3,039 | py | Python | Scraping/Code/scraping_tools.py | Jhagrut/Sports-Bot | 4c1cf06cf97317644ed924455abcf9a9cc2eb149 | [
"MIT"
] | null | null | null | Scraping/Code/scraping_tools.py | Jhagrut/Sports-Bot | 4c1cf06cf97317644ed924455abcf9a9cc2eb149 | [
"MIT"
] | null | null | null | Scraping/Code/scraping_tools.py | Jhagrut/Sports-Bot | 4c1cf06cf97317644ed924455abcf9a9cc2eb149 | [
"MIT"
] | 1 | 2021-09-06T12:16:34.000Z | 2021-09-06T12:16:34.000Z | """
Scraping Tools
Provides all tools for getting data onto/off the pi and submitting back to google drive.
"""
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
import requests
import time
# for twint
import twint
import nest_asyncio
def download_file_from_google_drive(id, destination):
URL = "https://docs.google.com/uc?export=download"
session = requests.Session()
response = session.get(URL, params = { 'id' : id }, stream = True)
token = get_confirm_token(response)
if token:
params = { 'id' : id, 'confirm' : token }
response = session.get(URL, params = params, stream = True)
save_response_content(response, destination)
def get_confirm_token(response):
for key, value in response.cookies.items():
if key.startswith('download_warning'):
return value
return None
def save_response_content(response, destination):
CHUNK_SIZE = 32768
with open(destination, "wb") as f:
for chunk in response.iter_content(CHUNK_SIZE):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
def download_files():
with open('download_ids_and_locations.csv') as file:
ids_and_locations = [line.rstrip('\n').split(',')
for line in file.readlines()]
for i in range(len(ids_and_locations)):
file_id = ids_and_locations[i][0]
destination = ids_and_locations[i][1]
download_file_from_google_drive(file_id, destination)
def upload_files():
gauth = GoogleAuth()
drive = GoogleDrive(gauth)
with open('upload_ids_and_locations.csv') as file:
ids_and_locations = [line.rstrip('\n').split(',')
for line in file.readlines()]
for i in range(len(ids_and_locations)):
gfile = drive.CreateFile({'parents': [{'id': ids_and_locations[i][0]}],
'id': ids_and_locations[i][1]})
filename = ids_and_locations[i][2].split('/')
filename = filename[len(filename)-1]
gfile.SetContentFile(filename)
gfile.Upload()
time.sleep(5)
def scrape_twitter():
nest_asyncio.apply()
file = open('accountList.txt')
text = file.readlines()
file.close()
userids = [userid.strip('\n') for userid in text]
broken_ids = list()
count=0
while count < len(userids) - 1:
if count % 250 == 0: print(count, 'usernames reached.')
try:
c = twint.Config()
c.Username = userids[count]
c.Limit = 100
c.Store_csv = True
c.Output = 'TweetData/' + userids[count] + ".csv"
c.Hide_output = True
twint.run.Search(c)
del c
time.sleep(15)
count+=1
except ValueError:
broken_ids.append(userids[count])
count+=1 | 27.880734 | 88 | 0.582757 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 395 | 0.129977 |
1b653b94676163dc7e361fd0adb14baf9eddd8ca | 6,522 | py | Python | tests/api/test_rest.py | oarepo/oarepo-communities | eb05dbdd5caf29e8741df5213456220d8f359cfa | [
"MIT"
] | null | null | null | tests/api/test_rest.py | oarepo/oarepo-communities | eb05dbdd5caf29e8741df5213456220d8f359cfa | [
"MIT"
] | 24 | 2021-02-01T17:30:33.000Z | 2022-02-08T09:54:22.000Z | tests/api/test_rest.py | oarepo/oarepo-communities | eb05dbdd5caf29e8741df5213456220d8f359cfa | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright (C) 2021 CESNET.
#
# OARepo-Communities is free software; you can redistribute it and/or modify
# it under the terms of the MIT License; see LICENSE file for more details.
"""OArepo module that adds support for communities"""
from flask import url_for
from invenio_access import ActionRoles
from invenio_accounts.models import Role, User
from invenio_accounts.proxies import current_datastore
from oarepo_communities.constants import COMMUNITY_READ, COMMUNITY_CREATE, STATE_PUBLISHED, COMMUNITY_UPDATE
from oarepo_communities.models import OARepoCommunityModel
from oarepo_communities.permissions import community_record_owner
def test_links_from_search(app, client, es, sample_records):
resp = client.get('https://localhost/C/')
assert resp.status_code == 200
assert resp.json['hits']['total'] == 1 # 1 published record having secondary community C assigned
assert resp.json['hits']['hits'][0]['links']['self'] == 'https://localhost/B/6'
def test_records_get(db, app, community, client, users, es, sample_records, test_blueprint):
# Non-community members cannot read on primary neither secondary community
resp = client.get('https://localhost/B/7')
assert resp.status_code == 401
resp = client.get('https://localhost/comtest/7')
assert resp.status_code == 401
role = Role.query.all()[0]
user = User.query.all()[0]
community[1].allow_action(role, COMMUNITY_READ)
db.session.add(ActionRoles(action=COMMUNITY_READ, argument='B', role=role))
db.session.commit()
current_datastore.add_role_to_user(user, role)
with app.test_client() as client:
resp = client.get(url_for(
'_tests.test_login_{}'.format(user.id), _scheme='https', _external=True))
assert resp.status_code == 200
# Record should be accessible in the primary community collection
resp = client.get('https://localhost/comtest/7')
assert resp.status_code == 200
assert resp.json['links']['self'] == 'https://localhost/comtest/7'
# Record should also be readable in the secondary community collection
resp = client.get('https://localhost/B/7')
assert resp.status_code == 200
assert resp.json['links']['self'] == 'https://localhost/comtest/7'
# Record get should return 404 on any other community
resp = client.get('https://localhost/C/7')
assert resp.status_code == 404
def test_record_create(db, app, community, client, users, es, test_blueprint):
# Non-community members cannot create records in a community.
recdata = {
'title': 'Test record',
'oarepo:primaryCommunity': community[0],
'state': '',
'oarepo:secondaryCommunities': ['B'],
'access': {
'owned_by': [1]
}
}
resp = client.post('https://localhost/comtest/', json=recdata)
assert resp.status_code == 401
role = Role.query.all()[0]
user = User.query.all()[0]
community[1].allow_action(role, COMMUNITY_READ)
community[1].allow_action(role, COMMUNITY_CREATE)
current_datastore.add_role_to_user(user, role)
with app.test_client() as client:
resp = client.get(url_for(
'_tests.test_login_{}'.format(user.id), _external=True))
assert resp.status_code == 200
# Create with correct primary community data succeeds
resp = client.post('https://localhost/comtest/', json=recdata)
assert resp.status_code == 201
def test_anonymous_permissions(sample_records, community, client):
"""Test anonymous rest permissions."""
for state, record in sample_records['comtest'][1].items():
if state != STATE_PUBLISHED:
resp = client.get(f'https://localhost/comtest/{record.pid.pid_value}')
assert resp.status_code == 401
else:
resp = client.get(f'https://localhost/comtest/{record.pid.pid_value}')
assert resp.status_code == 200
# No create
resp = client.post(f'https://localhost/comtest/', json=record.record.dumps())
assert resp.status_code == 401
# No update
resp = client.put(f'https://localhost/comtest/{record.pid.pid_value}',
json={'op': 'replace', 'path': '/title', 'value': 'qux'})
assert resp.status_code == 401
# No delete
resp = client.delete(f'https://localhost/comtest/{record.pid.pid_value}')
assert resp.status_code == 401
def test_community_list(app, db, client, community):
resp = client.get(
url_for('oarepo_communities.community_list'))
assert resp.status_code == 200
assert len(resp.json) == 1
assert resp.json == [{
'id': 'comtest',
'metadata': {'description': 'Community description'},
'title': 'Title',
'type': 'Other',
'links': {
'self': 'https://localhost/communities/comtest'
}
}]
def test_community_detail(app, db, client, community, test_blueprint, community_member):
resp = client.get(
url_for('oarepo_communities.community_detail', community_id=community[0]))
assert resp.status_code == 200
assert 'id' in resp.json.keys()
assert 'actions' not in resp.json.keys()
resp = client.get(
url_for('oarepo_communities.community_detail', community_id='blah'))
assert resp.status_code == 404
user = User.query.filter_by(id=community_member.id).one()
with app.test_client() as client:
resp = client.get(url_for(
'_tests.test_login_{}'.format(user.id), _external=True))
assert resp.status_code == 200
resp = client.get(
url_for('oarepo_communities.community_detail', community_id=community[0]))
assert resp.status_code == 200
c: OARepoCommunityModel = community[1]
c.allow_action(c.roles[1], COMMUNITY_READ)
c.allow_action(community_record_owner, COMMUNITY_READ, system=True)
c.allow_action(community_record_owner, COMMUNITY_UPDATE, system=True)
db.session.commit()
resp = client.get(
url_for('oarepo_communities.community_detail', community_id=community[0]))
assert resp.status_code == 200
assert 'actions' in resp.json.keys()
assert resp.json['actions'] == {'community-read': ['community:comtest:curator', 'community-record-owner'],
'community-update': ['community-record-owner']}
| 39.053892 | 114 | 0.657927 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,041 | 0.312941 |
1b66af2900d9a50cf311b38e534f344bfbeea028 | 5,073 | py | Python | code/datasets/adult.py | eth-sri/lcifer | 4a21f9c330d3cc458153689442d2fca702ed837b | [
"MIT"
] | 23 | 2020-02-25T12:49:59.000Z | 2021-11-01T13:59:39.000Z | code/datasets/adult.py | eth-sri/lcifer | 4a21f9c330d3cc458153689442d2fca702ed837b | [
"MIT"
] | 2 | 2020-04-15T08:30:29.000Z | 2021-05-15T16:11:30.000Z | code/datasets/adult.py | eth-sri/lcifer | 4a21f9c330d3cc458153689442d2fca702ed837b | [
"MIT"
] | 7 | 2020-02-24T17:24:44.000Z | 2022-01-16T03:29:43.000Z | from os import path
from urllib import request
import numpy as np
import pandas as pd
import torch
from sklearn.model_selection import train_test_split
from datasets import AbstractDataset
class AdultDataset(AbstractDataset):
column_names = [
'age', 'workclass', 'fnlwgt', 'education', 'education_num', 'marital_status', 'occupation', 'relationship',
'race', 'sex', 'capital_gain', 'capital_loss', 'hours_per_week', 'native_country', 'income'
]
train_labels_map = {'<=50K': 0, '>50K': 1}
test_labels_map = {'<=50K.': 0, '>50K.': 1}
def __init__(self, split, args, normalize=True):
super().__init__('adult', split)
train_data_file = path.join(self.data_dir, 'adult.data')
test_data_file = path.join(self.data_dir, 'adult.test')
if not path.exists(train_data_file):
request.urlretrieve(
'https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data', train_data_file
)
if not path.exists(test_data_file):
request.urlretrieve(
'https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.test', test_data_file
)
train_dataset = pd.read_csv(train_data_file, sep=',', header=None, names=AdultDataset.column_names)
test_dataset = pd.read_csv(test_data_file, sep=',', header=0, names=AdultDataset.column_names)
# preprocess strings
train_dataset = train_dataset.applymap(lambda x: x.strip() if isinstance(x, str) else x)
test_dataset = test_dataset.applymap(lambda x: x.strip() if isinstance(x, str) else x)
# drop missing values
train_dataset.replace(to_replace='?', value=np.nan, inplace=True)
test_dataset.replace(to_replace='?', value=np.nan, inplace=True)
train_dataset.dropna(axis=0, inplace=True)
test_dataset.dropna(axis=0, inplace=True)
# encode labels
train_dataset.replace(AdultDataset.train_labels_map, inplace=True)
test_dataset.replace(AdultDataset.test_labels_map, inplace=True)
# split features and labels
train_features, train_labels = train_dataset.drop('income', axis=1), train_dataset['income']
test_features, test_labels = test_dataset.drop('income', axis=1), test_dataset['income']
continuous_vars = []
self.categorical_columns = []
for col in train_features.columns:
if train_features[col].isnull().sum() > 0:
train_features.drop(col, axis=1, inplace=True)
else:
if train_features[col].dtype == np.object:
self.categorical_columns += [col]
else:
continuous_vars += [col]
protected_att = args.protected_att if args.protected_att is not None else 'sex'
self.protected_unique = train_features[protected_att].nunique()
protected_train = np.logical_not(pd.Categorical(train_features[protected_att]).codes)
protected_test = np.logical_not(pd.Categorical(test_features[protected_att]).codes)
# one-hot encode categorical data
train_features = pd.get_dummies(train_features, columns=self.categorical_columns, prefix_sep='=')
test_features = pd.get_dummies(test_features, columns=self.categorical_columns, prefix_sep='=')
self.continuous_columns = [train_features.columns.get_loc(var) for var in continuous_vars]
# add missing column to test dataset
test_features.insert(
loc=train_features.columns.get_loc('native_country=Holand-Netherlands'),
column='native_country=Holand-Netherlands', value=0
)
self.one_hot_columns = {}
for column_name in self.categorical_columns:
ids = [i for i, col in enumerate(train_features.columns) if col.startswith('{}='.format(column_name))]
if len(ids) > 0:
assert len(ids) == ids[-1] - ids[0] + 1
self.one_hot_columns[column_name] = ids
print('categorical features: ', self.one_hot_columns.keys())
self.column_ids = {col: idx for idx, col in enumerate(train_features.columns)}
train_features = torch.tensor(train_features.values.astype(np.float32), device=self.device)
train_labels = torch.tensor(train_labels.values.astype(np.int64), device=self.device)
train_protected = torch.tensor(protected_train.astype(np.bool), device=self.device)
self.X_train, self.X_val, self.y_train, self.y_val, self.protected_train, self.protected_val = train_test_split(
train_features, train_labels, train_protected, test_size=0.2, random_state=0
)
self.X_test = torch.tensor(test_features.values.astype(np.float32), device=self.device)
self.y_test = torch.tensor(test_labels.values.astype(np.int64), device=self.device)
self.protected_test = torch.tensor(protected_test.astype(np.bool), device=self.device)
if normalize:
self._normalize(self.continuous_columns)
self._assign_split()
| 46.541284 | 120 | 0.673566 | 4,879 | 0.961758 | 0 | 0 | 0 | 0 | 0 | 0 | 688 | 0.13562 |
1b67df276aa186215a09aa4ae47cdc677ec9f777 | 7,854 | py | Python | data/kitti_raw_loader.py | infinityofspace/SfmLearner-Pytorch | 531fc2d1928d7d7575c5d486ce957995639ebdc9 | [
"MIT"
] | 908 | 2017-10-20T14:24:38.000Z | 2022-03-29T09:54:56.000Z | data/kitti_raw_loader.py | infinityofspace/SfmLearner-Pytorch | 531fc2d1928d7d7575c5d486ce957995639ebdc9 | [
"MIT"
] | 133 | 2017-10-31T09:25:44.000Z | 2022-03-25T20:27:18.000Z | data/kitti_raw_loader.py | infinityofspace/SfmLearner-Pytorch | 531fc2d1928d7d7575c5d486ce957995639ebdc9 | [
"MIT"
] | 236 | 2017-10-21T06:34:33.000Z | 2022-02-23T08:15:31.000Z | from __future__ import division
import numpy as np
from path import Path
from imageio import imread
from skimage.transform import resize as imresize
from kitti_util import pose_from_oxts_packet, generate_depth_map, read_calib_file, transform_from_rot_trans
from datetime import datetime
class KittiRawLoader(object):
def __init__(self,
dataset_dir,
static_frames_file=None,
img_height=128,
img_width=416,
min_disp=0.2,
get_depth=False,
get_pose=False,
depth_size_ratio=1):
dir_path = Path(__file__).realpath().dirname()
test_scene_file = dir_path/'test_scenes.txt'
self.from_speed = static_frames_file is None
if static_frames_file is not None:
self.collect_static_frames(static_frames_file)
with open(test_scene_file, 'r') as f:
test_scenes = f.readlines()
self.test_scenes = [t[:-1] for t in test_scenes]
self.dataset_dir = dataset_dir
self.img_height = img_height
self.img_width = img_width
self.cam_ids = ['02', '03']
self.date_list = ['2011_09_26', '2011_09_28', '2011_09_29', '2011_09_30', '2011_10_03']
self.min_disp = min_disp
self.get_depth = get_depth
self.get_pose = get_pose
self.depth_size_ratio = depth_size_ratio
self.collect_train_folders()
def collect_static_frames(self, static_frames_file):
with open(static_frames_file, 'r') as f:
frames = f.readlines()
self.static_frames = {}
for fr in frames:
if fr == '\n':
continue
date, drive, frame_id = fr.split(' ')
curr_fid = '%.10d' % (np.int(frame_id[:-1]))
if drive not in self.static_frames.keys():
self.static_frames[drive] = []
self.static_frames[drive].append(curr_fid)
def collect_train_folders(self):
self.scenes = []
for date in self.date_list:
drive_set = (self.dataset_dir/date).dirs()
for dr in drive_set:
if dr.name[:-5] not in self.test_scenes:
self.scenes.append(dr)
def collect_scenes(self, drive):
train_scenes = []
for c in self.cam_ids:
oxts = sorted((drive/'oxts'/'data').files('*.txt'))
with open(drive/'oxts'/'timestamps.txt', 'r') as f:
times = [datetime.strptime(time_string[:-4], "%Y-%m-%d %H:%M:%S.%f") for time_string in f.readlines()]
scene_data = {'cid': c,
'dir': drive,
'speed': [],
'time': [t.timestamp() for t in times],
'frame_id': [],
'pose': [],
'rel_path': drive.name + '_' + c}
scale = None
origin = None
imu2velo = read_calib_file(drive.parent/'calib_imu_to_velo.txt')
velo2cam = read_calib_file(drive.parent/'calib_velo_to_cam.txt')
cam2cam = read_calib_file(drive.parent/'calib_cam_to_cam.txt')
velo2cam_mat = transform_from_rot_trans(velo2cam['R'], velo2cam['T'])
imu2velo_mat = transform_from_rot_trans(imu2velo['R'], imu2velo['T'])
cam_2rect_mat = transform_from_rot_trans(cam2cam['R_rect_00'], np.zeros(3))
imu2cam = cam_2rect_mat @ velo2cam_mat @ imu2velo_mat
for n, f in enumerate(oxts):
metadata = np.genfromtxt(f)
speed = metadata[8:11]
scene_data['speed'].append(speed)
scene_data['frame_id'].append('{:010d}'.format(n))
lat = metadata[0]
if scale is None:
scale = np.cos(lat * np.pi / 180.)
pose_matrix = pose_from_oxts_packet(metadata[:6], scale)
if origin is None:
origin = pose_matrix
odo_pose = imu2cam @ np.linalg.inv(origin) @ pose_matrix @ np.linalg.inv(imu2cam)
scene_data['pose'].append(odo_pose[:3])
sample = self.load_image(scene_data, 0)
if sample is None:
return []
scene_data['P_rect'] = self.get_P_rect(scene_data, sample[1], sample[2])
scene_data['intrinsics'] = scene_data['P_rect'][:, :3]
train_scenes.append(scene_data)
return train_scenes
def get_scene_imgs(self, scene_data):
def construct_sample(scene_data, i, frame_id):
sample = {"img": self.load_image(scene_data, i)[0], "id": frame_id}
if self.get_depth:
sample['depth'] = self.get_depth_map(scene_data, i)
if self.get_pose:
sample['pose'] = scene_data['pose'][i]
return sample
if self.from_speed:
cum_displacement = np.zeros(3)
for i, (speed1, speed2, t1, t2) in enumerate(zip(scene_data['speed'][1:],
scene_data['speed'][:-1],
scene_data['time'][1:],
scene_data['time'][:-1])):
print(speed1, speed2, t1, t2)
cum_displacement += 0.5*(speed1 + speed2) / (t2-t1)
disp_mag = np.linalg.norm(cum_displacement)
if disp_mag > self.min_disp:
frame_id = scene_data['frame_id'][i]
yield construct_sample(scene_data, i, frame_id)
cum_displacement *= 0
else: # from static frame file
drive = str(scene_data['dir'].name)
for (i, frame_id) in enumerate(scene_data['frame_id']):
if (drive not in self.static_frames.keys()) or (frame_id not in self.static_frames[drive]):
yield construct_sample(scene_data, i, frame_id)
def get_P_rect(self, scene_data, zoom_x, zoom_y):
calib_file = scene_data['dir'].parent/'calib_cam_to_cam.txt'
filedata = read_calib_file(calib_file)
P_rect = np.reshape(filedata['P_rect_' + scene_data['cid']], (3, 4))
P_rect[0] *= zoom_x
P_rect[1] *= zoom_y
return P_rect
def load_image(self, scene_data, tgt_idx):
img_file = scene_data['dir']/'image_{}'.format(scene_data['cid'])/'data'/scene_data['frame_id'][tgt_idx]+'.png'
if not img_file.isfile():
return None
img = imread(img_file)
zoom_y = self.img_height/img.shape[0]
zoom_x = self.img_width/img.shape[1]
img = imresize(img, (self.img_height, self.img_width))
# workaround for skimage (float [0 .. 1]) and imageio (uint8 [0 .. 255]) interoperability
img = (img * 255).astype(np.uint8)
return img, zoom_x, zoom_y
def get_depth_map(self, scene_data, tgt_idx):
# compute projection matrix velodyne->image plane
R_cam2rect = np.eye(4)
calib_dir = scene_data['dir'].parent
cam2cam = read_calib_file(calib_dir/'calib_cam_to_cam.txt')
velo2cam = read_calib_file(calib_dir/'calib_velo_to_cam.txt')
velo2cam = np.hstack((velo2cam['R'].reshape(3, 3), velo2cam['T'][..., np.newaxis]))
velo2cam = np.vstack((velo2cam, np.array([0, 0, 0, 1.0])))
R_cam2rect[:3, :3] = cam2cam['R_rect_00'].reshape(3, 3)
velo2cam = np.dot(R_cam2rect, velo2cam)
velo_file_name = scene_data['dir']/'velodyne_points'/'data'/'{}.bin'.format(scene_data['frame_id'][tgt_idx])
return generate_depth_map(velo_file_name, scene_data['P_rect'], velo2cam,
self.img_width, self.img_height, self.depth_size_ratio)
| 42.918033 | 119 | 0.564298 | 7,564 | 0.963076 | 1,519 | 0.193405 | 0 | 0 | 0 | 0 | 819 | 0.104278 |
1b686f4d493c18d208dadc42777a3824c6ba09a2 | 4,622 | py | Python | process_patient_data/draw_general_samples.py | ChrisSheng97/hackauton | 7c53579974604e8bc97363e038aaafb0aad6e7b4 | [
"MIT"
] | 1 | 2020-05-03T11:24:16.000Z | 2020-05-03T11:24:16.000Z | process_patient_data/draw_general_samples.py | sumrania/hackauton | 7c53579974604e8bc97363e038aaafb0aad6e7b4 | [
"MIT"
] | null | null | null | process_patient_data/draw_general_samples.py | sumrania/hackauton | 7c53579974604e8bc97363e038aaafb0aad6e7b4 | [
"MIT"
] | 1 | 2019-07-26T20:13:58.000Z | 2019-07-26T20:13:58.000Z | import pickle
import numpy as np
import feature_extraction as fe
""" source : https://www.census.gov/quickfacts/fact/table/alleghenycountypennsylvania/PST045216 """
CURR_YEAR = 2015
# gender
FEMALE_PERCENT = 0.517 # 4327
# MALE = 0.483 # 3134
# age
# BELOW_18 = 0.189 # 0
OVER_65_PERCENT = 0.18 # 4353
# OTHER = 0.631 # 3108
OTHER = 0.82
# race
WHITE = 0.805 # 3184
BLACK = 0.134 # 2294
ASIAN = 0.037 # 1244
# OTHER = 0.024 # 739
def draw_general_sample(num_samples, modified_patient_data, feature='gender', percent=[FEMALE_PERCENT]):
# check if num_samples is reasonable
if num_samples > len(modified_patient_data):
print('data points collected fewer than required!')
return None
# check if the feature categories and given number of percentages is correct
if not ((feature.lower() == 'gender' and len(percent) == 1) \
or (feature.lower() == 'age' and len(percent) == 1) \
or (feature.lower() == 'race' and len(percent) == 3)):
print('unmatched percentage!')
return None
# add age
_add_age(modified_patient_data)
# draw samples
if feature.lower() == 'gender':
FEMALE_PERCENT = percent[0]
# group patient data
female_need = int(num_samples * FEMALE_PERCENT)
male_need = int(num_samples * (1 - FEMALE_PERCENT))
female_group, male_group = _split_gender(modified_patient_data)
# get id
fp_id = np.random.choice(list(female_group.keys()), female_need)
mp_id = np.random.choice(list(male_group.keys()), female_need)
# get sample
sample_chosen = {k : v for k, v in modified_patient_data.iteritems() if k in fp_id or k in mp_id}
elif feature.lower() == 'age':
OVER_65_PERCENT = percent[0]
# group patient data
elder_need = int(num_samples * OVER_65_PERCENT)
adult_need = int(num_samples * (1 - OVER_65_PERCENT))
adult, elder = _split_age(modified_patient_data)
# get id
ap_id = np.random.choice(list(adult.keys()), elder_need)
ep_id = np.random.choice(list(elder.keys()), adult_need)
# get sample
sample_chosen = {k : v for k, v in modified_patient_data.iteritems() if k in ap_id or k in ep_id}
elif feature.lower() == 'race':
WHITE = percent[0]
BLACK = percent[1]
ASIAN = percent[2]
OTHER = 1 - WHITE - BLACK - ASIAN
# group patient data
white_need = int(num_samples * WHITE)
black_need = int(num_samples * BLACK)
asian_need = int(num_samples * ASIAN)
other_need = int(num_samples * OTHER)
white, black, asian, other = _split_race(modified_patient_data)
# get id
w_id = np.random.choice(list(white.keys()), white_need)
b_id = np.random.choice(list(black.keys()), black_need)
a_id = np.random.choice(list(asian.keys()), asian_need)
o_id = np.random.choice(list(other.keys()), other_need)
# get sample
sample_chosen = {k : v for k, v in modified_patient_data.iteritems() if k in w_id or k in b_id or k in a_id or k in o_id}
return sample_chosen
def _add_age(modified_patient_data):
for pid in modified_patient_data:
data = modified_patient_data[pid]
birth_year = int(data['dob'].split('-')[0])
data['age'] = int(CURR_YEAR - birth_year)
def _split_gender(modified_patient_data):
female_group = {}
male_group = {}
for pid in modified_patient_data:
data = modified_patient_data[pid]
if data['gender'].lower() == 'female':
female_group[pid] = data
elif data['gender'].lower() == 'male':
male_group[pid] = data
elif np.random.randint(2): # Unknown case
female_group[pid] = data
else:
male_group[pid] = data
return female_group, male_group
def _split_age(single_group):
adult = {}
elder = {}
for pid in single_group:
data = single_group[pid]
if data['age'] > 65:
elder[pid] = data
else:
adult[pid] = data
return adult, elder
def _split_race(single_group):
white = {}
black = {}
asian = {}
other = {}
for pid in single_group:
data = single_group[pid]
if data['race'].lower() == 'white':
white[pid] = data
elif data['race'].lower() == 'black':
black[pid] = data
elif data['race'].lower() == 'asian':
asian[pid] = data
else:
other[pid] = data
return white, black, asian, other
if __name__ == "__main__":
draw_general_sample(2000)
| 35.829457 | 129 | 0.618563 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 713 | 0.154262 |
1b6956f7c730ca88993d0e21d27dfbe960d89092 | 480 | py | Python | 1 Semestre/Analise e Projeto de Algoritmos (Mauricio)/Aula 5 Aplicacoes com Selecao/Exercicio 8.py | LukasHeidern/Univem-Aulas | 3e7f24eb90a0bfe2af8d49ead958cf4d7072257c | [
"MIT"
] | null | null | null | 1 Semestre/Analise e Projeto de Algoritmos (Mauricio)/Aula 5 Aplicacoes com Selecao/Exercicio 8.py | LukasHeidern/Univem-Aulas | 3e7f24eb90a0bfe2af8d49ead958cf4d7072257c | [
"MIT"
] | null | null | null | 1 Semestre/Analise e Projeto de Algoritmos (Mauricio)/Aula 5 Aplicacoes com Selecao/Exercicio 8.py | LukasHeidern/Univem-Aulas | 3e7f24eb90a0bfe2af8d49ead958cf4d7072257c | [
"MIT"
] | null | null | null | '''8) Elabore um algoritmo que leia 3 valores inteiros (a,b e c) e os coloque em
ordem crescente, de modo que em a fique o menor valor, em b o valor intermediário e
em c o maior valor. '''
a = int(input("Digite o valor do primeiro valor: "))
b = int(input("Digite o valor do segundo valor: "))
c = int(input("Digite o valor do terceiro valor: "))
print(f"Antes: {a} - {b} - {c}")
if a > b: a,b = b,a
if a > c: a,c = c,a
if b > c: b,c = c,b
print(f"Depois: {a} - {b} - {c}") | 30 | 84 | 0.61875 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 349 | 0.725572 |
1b6b9bb3b81f1790e19b1a9ea45c871f7adbdef4 | 796 | py | Python | python_developer_tools/machinelearning/knn.py | carlsummer/python_developer_tools | a8c4365b7cc601cda55648cdfd8c0cb1faae132f | [
"Apache-2.0"
] | 32 | 2021-06-21T04:49:48.000Z | 2022-03-29T05:46:59.000Z | python_developer_tools/machinelearning/knn.py | HonestyBrave/python_developer_tools | fc0dcf5c4ef088e2e535206dc82f09bbfd01f280 | [
"Apache-2.0"
] | 1 | 2021-11-12T03:45:55.000Z | 2021-11-12T03:45:55.000Z | python_developer_tools/machinelearning/knn.py | HonestyBrave/python_developer_tools | fc0dcf5c4ef088e2e535206dc82f09bbfd01f280 | [
"Apache-2.0"
] | 10 | 2021-06-03T08:05:05.000Z | 2021-12-13T03:10:42.000Z | # !/usr/bin/env python
# -- coding: utf-8 --
# @Author zengxiaohui
# Datatime:8/20/2021 8:39 AM
# @File:knn
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn import datasets
X, y = datasets.load_iris(return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=0)
kNN_classifier = KNeighborsClassifier(n_neighbors=6)
kNN_classifier.fit(X_train,y_train)
kNN_classifier.predict(X_test)
best_score = 0.0
best_k = -1
for k in range(1,11):
knn_clf = KNeighborsClassifier(n_neighbors=k)
knn_clf.fit(X_train,y_train)
score = knn_clf.score(X_test,y_test)
if score > best_score:
best_k = k
best_score=score
print("best_k=",best_k)
print("best_score=",best_score)
| 30.615385 | 88 | 0.752513 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 125 | 0.157035 |
1b6d044699c6c19b255bab4c846b102b0a2b1aae | 7,604 | py | Python | test/basemodule_test.py | nktankta/PytorchCNNModules | bc1469ceb37477d3f60062f14a750f272e7ceeb0 | [
"MIT"
] | null | null | null | test/basemodule_test.py | nktankta/PytorchCNNModules | bc1469ceb37477d3f60062f14a750f272e7ceeb0 | [
"MIT"
] | null | null | null | test/basemodule_test.py | nktankta/PytorchCNNModules | bc1469ceb37477d3f60062f14a750f272e7ceeb0 | [
"MIT"
] | null | null | null | import pytest
import torch
import torch.nn as nn
from module_list import get_test_module
from PytorchCNNModules.modules.base_module import BaseModule,SEmodule
class CNN(BaseModule):
def __init__(self,in_feature,out_featue,stride=1):
super(CNN,self).__init__(in_feature,out_featue,stride)
self.cnn = nn.Conv2d(in_feature,out_featue,3,stride,1)
def _forward(self,x):
return self.cnn(x)
class Identity(BaseModule):
def __init__(self,in_feature,out_featue,stride=1,**kwargs):
super(Identity,self).__init__(in_feature,out_featue,stride,norm_layer = nn.Identity,**kwargs)
def _forward(self,x,*args):
if self.stride!=1:
x = nn.AvgPool2d(1,stride=self.stride)(x)
return x
test_modules = get_test_module()
normal_test = [
((2, 3, 5, 5), (2, 10, 5, 5), 3, 10, 1),
((1, 10, 10, 10), (1, 20, 10, 10), 10, 20, 1),
((5, 8, 20, 20), (5, 16, 20, 20), 8, 16, 1),
((2, 3, 10, 10), (2, 10, 5, 5), 3, 10, 2),
((2, 3, 5, 5), (2, 10, 3, 3), 3, 10, 2)
]
residual_test = [
((2, 10, 5, 5), (2, 10, 5, 5), 10, 10, 1),
((1, 17, 10, 10), (1, 17, 10, 10), 17, 17, 1),
((2, 10, 5, 5), (2, 10, 3, 3), 10, 10, 2),
((1, 17, 10, 10), (1, 17, 5, 5), 17, 17, 2),
]
dense_test = [
((2, 3, 5, 5), (2, 13, 5, 5), 3, 10, 1),
((1, 10, 10, 10), (1, 30, 10, 10), 10, 20, 1),
((5, 8, 20, 20), (5, 24, 20, 20), 8, 16, 1),
((2, 3, 10, 10), (2, 13, 5, 5), 3, 10, 2),
((2, 3, 5, 5), (2, 13, 3, 3), 3, 10, 2)
]
def test_residual_featuresize_exception():
with pytest.raises(AssertionError,match="[residual feature size error]"):
CNN(10,5,1).to_residual()
def test_residual_aggregation_error():
with pytest.raises(NotImplementedError):
CNN(10,10,2).to_residual(aggregation="test")
def test_dense_downsample_error():
with pytest.raises(NotImplementedError):
CNN(10, 10, 2).to_dense(downsample="test")
def test_residual_activation_bool():
inp = torch.randn((2,10,20,20))
module = Identity(10,10,1).to_residual(activation=True)
out = module(inp)
assert torch.min(out).item()>=0
def test_residual_activation():
inp = torch.ones((2,10,20,20))*10
module = Identity(10,10,1).to_residual(activation=nn.ReLU6)
out = module(inp)
assert torch.max(out).item()<=6
def test_residual_preactivation_bool():
inp = -torch.ones((2,10,20,20))
module = Identity(10,10,1).to_residual(pre_activation=True)
out = module(inp)
assert torch.min(out).item()>=-1
def test_residual_preactivation():
inp = torch.ones((2,10,20,20))*10
module = Identity(10,10,1).to_residual(pre_activation=nn.ReLU6)
out = module(inp)
assert torch.max(out).item()<=16
def test_residual_random_drop():
inp = torch.ones((2,10,20,20))*1
module = Identity(10,10,1).to_residual(drop_rate=1)
out = module(inp)
assert torch.max(out).item()<=1
def test_semodule():
inp = torch.randn((5, 31, 11, 11))
module = SEmodule(31)
out = module(inp)
assert out.shape == inp.shape
def test_semodule_enable():
inp = torch.ones((2,10,20,20))
out_normal = torch.empty((2,10,20,20))
out_dense = torch.empty((2,20,20,20))
module = Identity(10,10,1,use_SEmodule=True)
out = module(inp)
assert out.shape == out_normal.shape
module.to_residual()
out = module(inp)
assert out.shape == out_normal.shape
module.to_dense()
out = module(inp)
assert out.shape == out_dense.shape
def test_multi_input():
inp1 = torch.randn((2,10,20,20))
inp2 = torch.randn((2,10,20,20))
dense_out = torch.empty((2,20,20,20))
module = Identity(10,10,1)
out = module(inp1,inp2)
assert out.shape == inp1.shape
module.to_residual()
out = module(inp1,inp2)
assert out.shape == inp1.shape
module.to_dense()
out = module(inp1,inp2)
assert out.shape == dense_out.shape
@pytest.mark.parametrize("input_shape", [(2,10,20,20),(2,10,5,5)])
@pytest.mark.parametrize("output_feature", [10,20])
@pytest.mark.parametrize("downsample", ["conv","max","avg"])
def test_residual_downsample_add(input_shape,output_feature,downsample):
n,c,w,h = input_shape
inp = torch.randn(input_shape)
downsample_out = (n,output_feature,(w-1)//2+1,(h-1)//2+1)
module = CNN(10,output_feature,2).to_residual(aggregation="add",downsample=downsample)
out = module(inp)
assert out.shape == torch.empty(downsample_out).shape
@pytest.mark.parametrize("input_shape,output_shape", [((2,10,20,20),(2,20,10,10)),((2,20,5,5),(2,40,3,3))])
@pytest.mark.parametrize("downsample", ["conv","max","avg"])
def test_residual_downsample_conc(input_shape,output_shape,downsample):
inp = torch.randn(input_shape)
module = CNN(input_shape[1],input_shape[1],2).to_residual(aggregation="concatenate",downsample=downsample)
out = module(inp)
assert out.shape == torch.empty(output_shape).shape
@pytest.mark.parametrize("input_shape,output_shape", [((2,10,20,20),(2,20,10,10)),((2,20,5,5),(2,40,3,3))])
@pytest.mark.parametrize("downsample", ["conv","max","avg"])
def test_dense_downsample(input_shape,output_shape,downsample):
inp = torch.randn(input_shape)
module = CNN(input_shape[1],input_shape[1],2).to_dense(downsample)
out = module(inp)
assert out.shape == torch.empty(output_shape).shape
@pytest.mark.parametrize("input_shape,output_shape,channel_in,channel_out,stride", normal_test)
def test_module(input_shape,output_shape,channel_in,channel_out,stride):
input = torch.randn(input_shape)
module = CNN(channel_in,channel_out,stride)
output = module(input)
assert output.shape == torch.empty(output_shape).shape
@pytest.mark.parametrize("input_shape,output_shape,channel_in,channel_out,stride", normal_test)
def test_cuda_module(input_shape,output_shape,channel_in,channel_out,stride):
input = torch.randn(input_shape).cuda()
module = CNN(channel_in,channel_out,stride).cuda()
output = module(input)
assert output.shape == torch.empty(output_shape).shape
@pytest.mark.parametrize("input_shape,output_shape,channel_in,channel_out,stride", residual_test)
def test_residual_add(input_shape,output_shape,channel_in,channel_out,stride):
input = torch.randn(input_shape)
module = CNN(channel_in, channel_out, stride).to_residual(aggregation="add")
output = module(input)
assert output.shape == torch.empty(output_shape).shape
@pytest.mark.parametrize("input_shape,output_shape,channel_in,channel_out,stride", dense_test)
def test_residual_concat(input_shape,output_shape,channel_in,channel_out,stride):
input = torch.randn(input_shape)
module = CNN(channel_in, channel_out, stride).to_residual(aggregation="concatenate")
output = module(input)
assert output.shape == torch.empty(output_shape).shape
@pytest.mark.parametrize("input_shape,output_shape,channel_in,channel_out,stride", dense_test)
@pytest.mark.parametrize("downsample", ["conv","avg","max"])
def test_dense(input_shape,output_shape,channel_in,channel_out,stride,downsample):
input = torch.randn(input_shape)
module = CNN(channel_in, channel_out, stride).to_dense()
output = module(input)
assert output.shape == torch.empty(output_shape).shape
@pytest.mark.parametrize("input_shape,output_shape,channel_in,channel_out,stride", normal_test)
def test_backward(input_shape,output_shape,channel_in,channel_out,stride):
input = torch.randn(input_shape,requires_grad=True)
module = CNN(channel_in,channel_out,stride)
output = module(input)
torch.sum(output).backward()
assert input.grad.shape == input.shape | 37.458128 | 110 | 0.687138 | 579 | 0.076144 | 0 | 0 | 3,690 | 0.485271 | 0 | 0 | 608 | 0.079958 |
1b72096dfb377d3bbc56672d41f1e99b14c18550 | 10,981 | py | Python | experiments/rex_example.py | aws-samples/aws-lex-retrieval-extraction-lm-pt | b9de2e9844593e0c0bbd380ed3ea126c1b33fa32 | [
"Apache-2.0"
] | 1 | 2021-05-31T14:55:09.000Z | 2021-05-31T14:55:09.000Z | experiments/rex_example.py | aws-samples/aws-lex-retrieval-extraction-lm-pt | b9de2e9844593e0c0bbd380ed3ea126c1b33fa32 | [
"Apache-2.0"
] | null | null | null | experiments/rex_example.py | aws-samples/aws-lex-retrieval-extraction-lm-pt | b9de2e9844593e0c0bbd380ed3ea126c1b33fa32 | [
"Apache-2.0"
] | 1 | 2021-06-10T18:53:03.000Z | 2021-06-10T18:53:03.000Z | import os
import gc
import sys
import copy
import psutil
import torch
import gzip
import json
import glob
import random
from collections import Counter
from functools import partial
from multiprocessing import Pool
from os import listdir
from os.path import isfile, join
from tqdm import tqdm, trange
from transformers import *
from torch.utils.data import Dataset, Sampler
def get_position(entity, text, tokenizer, max_length):
'''
Inputs: input text and the answer span
Outputs:
1. start and end position index of the answer
2. encoded text token ids
3. Boolean value indicating if the text contains the answer
'''
def index_sublist(text, l, entity):
if entity not in text:
return (0, 0), False
sl = tokenizer.encode(entity)[1:-1]
sl_length = len(sl)
for i in range(len(l)):
if l[i] == sl[0] and l[i:i + sl_length] == sl:
return (i, i + sl_length - 1), True
return (0, 0), False
text_tok = tokenizer.encode(text, max_length=max_length)
position, has_ans = index_sublist(text, text_tok, entity)
return {
'position': position,
'text_tok': text_tok,
'has_ans': has_ans,
}
class ContextSampler(object):
'''
for each query, randomly sample 1 positive and n negative samples
for retrieval-extraction pretraining
'''
def __init__(self, entity, contexts, tokenizer, max_length):
self.entity = entity
self.contexts = contexts
self.pos_ctx = []
self.neg_ctx = []
self.num_pos_ctx = 0
self.num_neg_ctx = 0
for context in contexts:
index = get_position(entity, context, tokenizer, max_length)
if index['has_ans']:
self.pos_ctx.append(index)
self.num_pos_ctx += 1
if not index['has_ans']:
self.neg_ctx.append(index)
self.num_neg_ctx += 1
def sample(self, num_samples):
neg_num = num_samples - 1
label = random.randint(0, neg_num)
ctx_list = random.sample(self.neg_ctx, neg_num)
correct_sample = random.sample(self.pos_ctx, 1)
ctx_list = ctx_list[:label] + correct_sample + ctx_list[label:]
return ctx_list, label
class RexExample(object):
'''
A Rex example for pretraining, containing a query and a context sampler
Samples data for retrieval and pretraining
'''
def __init__(self, q_id, question, answer, contexts, tokenizer,
ques_max_length=64, ctx_max_length=320, all_max_length=384):
self.q_id = q_id
self.question_text = question
self.answer_text = answer
self.ques_max_length = ques_max_length
self.ctx_max_length = ctx_max_length
self.all_max_length = all_max_length
self.pad_token_id = tokenizer.pad_token_id
self.context_sampler = ContextSampler(answer, contexts, tokenizer, ctx_max_length)
self.ques_ids = tokenizer.encode(question, max_length=ques_max_length)
self.ques_length = len(self.ques_ids)
self.num_ques_pads = ques_max_length - self.ques_length
self.ques_attention_mask = [1] * self.ques_length + [tokenizer.pad_token_id] * self.num_ques_pads
self.token_type_id = [0] * self.ques_length + [1] * (all_max_length - self.ques_length)
self.cur_ret_examples = []
def reset(self):
self.cur_ret_examples = []
def gen_attention_mask(self, input_ids, max_length=None, paded=False):
if paded:
attention_mask = [int(x != self.pad_token_id) for x in input_ids]
else:
attention_mask = [1] * len(input_ids) + [0] * (max_length - len(input_ids))
return attention_mask
def pad(self, ids, max_length):
return ids + [self.pad_token_id] * (max_length - len(ids))
def get_ret_example(self, num_ctx):
'''
Sample training targets for retrieval
'''
ctx_list, label = self.context_sampler.sample(num_ctx)
self.cur_ret_examples = ctx_list
paded_ctx_ids = [self.pad(x['text_tok'], self.ctx_max_length) for x in ctx_list]
ctx_attention_mask = [self.gen_attention_mask(x, paded=True) for x in paded_ctx_ids]
return {
'ques_ids': self.pad(self.ques_ids, self.ques_max_length),
'ques_attention_mask': self.ques_attention_mask,
'ctx_ids': paded_ctx_ids,
'ctx_attention_mask': ctx_attention_mask,
'label': label
}
def get_ext_example(self, ctx_id=None, ctx_obj=None):
'''
Sample training targets for extraction based on retrieval results.
'''
if ctx_id:
ctx = self.cur_ret_examples[ctx_id]
elif ctx_obj:
ctx = ctx_obj
token_ids = self.pad(self.ques_ids + ctx['text_tok'][1:], self.all_max_length)
attention_mask = self.gen_attention_mask(token_ids, paded=True)
start_position = ctx['position'][0]
end_position = ctx['position'][1]
if start_position:
start_position += self.ques_length - 1
end_position += self.ques_length - 1
self.reset()
return {
'token_ids': token_ids,
'attention_mask': attention_mask,
'start_position': start_position,
'end_position': end_position,
'token_type_id': self.token_type_id
}
def get_rex_example(self, num_ctx):
'''
Sampling training data for combined retrieval-extraction training.
'''
ctx_list, label = self.context_sampler.sample(num_ctx)
examples = [self.get_ext_example(ctx_obj=x) for x in ctx_list]
return {
'token_ids': torch.Tensor([x['token_ids'] for x in examples]).long(),
'attention_mask': torch.Tensor([x['attention_mask'] for x in examples]).long(),
'start_position': torch.Tensor([x['start_position'] for x in examples]).long(),
'end_position': torch.Tensor([x['end_position'] for x in examples]).long(),
'token_type_id': torch.Tensor([x['token_type_id'] for x in examples]).long(),
'label': label
}
class RexBatch(object):
'''
Combining RexExamples as a batch
'''
def __init__(self, rex_examples: list, num_ctx: int, device):
self.rex_examples = rex_examples
self.batch_size = len(rex_examples)
self.device = device
self.num_ctx = num_ctx
examples = [rex.get_rex_example(num_ctx) for rex in rex_examples]
self.data = {
'input_ids': torch.cat([x['token_ids'] for x in examples], dim=0).to(device),
'attention_mask': torch.cat([x['attention_mask'] for x in examples], dim=0).to(device),
'start_positions': torch.cat([x['start_position'] for x in examples], dim=0).to(device),
'end_positions': torch.cat([x['end_position'] for x in examples], dim=0).to(device),
'token_type_ids': torch.cat([x['token_type_id'] for x in examples], dim=0).to(device),
'label': torch.Tensor([x['label'] for x in examples]).long().to(device),
'num_ctx': num_ctx,
'batch_size': self.batch_size,
'idx_base' : torch.range(0, self.batch_size - 1).long().to(device) * num_ctx
}
def concat_ques_ctx(self, ctx_pred):
device = self.device
ext_examples = [x.get_ext_example(y) for x, y in zip(self.rex_examples, ctx_pred.tolist())]
output = {
'input_ids': torch.Tensor([x['token_ids'] for x in ext_examples]).long().to(device),
'attention_mask': torch.Tensor([x['attention_mask'] for x in ext_examples]).to(device),
'start_positions': torch.Tensor([x['start_position'] for x in ext_examples]).long().to(device),
'end_positions': torch.Tensor([x['end_position'] for x in ext_examples]).long().to(device),
'token_type_ids': torch.Tensor([x['token_type_id'] for x in ext_examples]).long().to(device),
}
return output
class RexDataset(Dataset):
'''
The dataset for rex examples. Reads raw input files when all data
in the previous file are used.
Supports indexing and sequential sampler, does not support random sampler.
'''
def __init__(self, data_dir, threads, tokenizer):
self.tokenizer = tokenizer
self.threads = threads
self.fn_list = glob.glob('{}/ssptGen/*/*.gz'.format(data_dir), recursive=True)
print(len(self.fn_list))
self.fn_idx = 0
self.cur_rex_limit = 0
self.cur_rex_list = []
self.update_rex_list()
print('Finished init')
def epoch_init(self, load=False):
random.shuffle(self.fn_list)
self.fn_idx = 0
if load:
self.update_rex_list()
self.rel_idx_base = 0
self.cur_rex_limit = 0
self.cur_rex_limit = len(self.cur_rex_list)
def skip_file(self):
self.fn_idx += 1
self.rel_idx_base = self.cur_rex_limit
self.update_rex_list()
def update_rex_list(self):
del self.cur_rex_list
gc.collect()
self.cur_rex_list = proc_file(self.fn_list[self.fn_idx], self.threads, self.tokenizer)
self.cur_rex_limit += len(self.cur_rex_list)
def __len__(self):
return len(self.cur_rex_list) * (len(self.fn_list) - 1)
def __getitem__(self, idx):
if idx > self.cur_rex_limit - 1:
print(idx)
self.skip_file()
rel_idx = idx - self.rel_idx_base
return self.cur_rex_list[rel_idx]
def proc_line(rex_line: list):
'''
Process lines in the input .jsonlines files
'''
line = json.loads(rex_line)
passages = [p for p in line['passage'] if p]
if len(passages) < 5:
return None
q_id = line['qid']
question = line['question']
answer_text = line['answers'][0]
rex_example = RexExample(
q_id,
question.replace('[BLANK]', '[MASK]'),
answer_text,
passages,
tokenizer
)
if rex_example.context_sampler.num_pos_ctx > 0 and\
rex_example.context_sampler.num_neg_ctx > 0:
return rex_example
else:
del rex_example
return None
def proc_line_init(tokenizer_for_wiki):
global tokenizer
tokenizer = tokenizer_for_wiki
def proc_file(fn, threads, tokenizer):
'''
Process input .jsonlines files
'''
in_file = gzip.open(fn)
jsonls = in_file.readlines()
with Pool(threads, initializer=proc_line_init, initargs=(tokenizer,)) as p:
new_rex_list = list(tqdm(p.imap(proc_line, jsonls), total=len(jsonls)))
rex_list = [rex for rex in new_rex_list if rex]
print('File {} containes {} Rex examples'.format(fn, len(rex_list)))
del new_rex_list
del jsonls
in_file.close()
gc.collect()
return rex_list
| 33.58104 | 107 | 0.626719 | 8,428 | 0.767508 | 0 | 0 | 0 | 0 | 0 | 0 | 1,935 | 0.176213 |
1b72f634399ec5091eab1e68a51eec4a9a4c1881 | 720 | py | Python | LeetcodeAlgorithms/491. Increasing Subsequences/increasing-subsequences.py | Fenghuapiao/PyLeetcode | d804a62643fe935eb61808196a2c093ea9583654 | [
"MIT"
] | 3 | 2019-08-20T06:54:38.000Z | 2022-01-07T12:56:46.000Z | LeetcodeAlgorithms/491. Increasing Subsequences/increasing-subsequences.py | Fenghuapiao/PyLeetcode | d804a62643fe935eb61808196a2c093ea9583654 | [
"MIT"
] | null | null | null | LeetcodeAlgorithms/491. Increasing Subsequences/increasing-subsequences.py | Fenghuapiao/PyLeetcode | d804a62643fe935eb61808196a2c093ea9583654 | [
"MIT"
] | 2 | 2018-06-07T02:56:39.000Z | 2018-08-01T15:27:55.000Z | class Solution(object):
def findSubsequences(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
ans = []
def dfs(nums, start, path, ans):
if len(path) >= 2:
ans.append(tuple(path + []))
for i in range(start, len(nums)):
if i != start and nums[i] == nums[i - 1]:
continue
if path and nums[i] < path[-1]:
continue
path.append(nums[i])
dfs(nums, i + 1, path, ans)
path.pop()
dfs(nums, 0, [], ans)
return list(set(ans))
| 31.304348 | 58 | 0.381944 | 706 | 0.980556 | 0 | 0 | 0 | 0 | 0 | 0 | 80 | 0.111111 |
1b730a1ad9ad1fa78cb7e47435bcdc5ea1daee8d | 1,279 | py | Python | lab01/authserver/lab01_authserver/client_test.py | Boris-Barboris/rsoi | 30b03f50549f7977d5ecb7788b8e22b789f8859f | [
"MIT"
] | null | null | null | lab01/authserver/lab01_authserver/client_test.py | Boris-Barboris/rsoi | 30b03f50549f7977d5ecb7788b8e22b789f8859f | [
"MIT"
] | null | null | null | lab01/authserver/lab01_authserver/client_test.py | Boris-Barboris/rsoi | 30b03f50549f7977d5ecb7788b8e22b789f8859f | [
"MIT"
] | null | null | null | #!/usr/bin/python3
from lab01_authserver_app.oauthclient import *
import json
import requests
import time
import requests
import logging
import http.client
http.client.HTTPConnection.debuglevel = 1
logging.basicConfig()
logging.getLogger().setLevel(logging.DEBUG)
requests_log = logging.getLogger("requests.packages.urllib3")
requests_log.setLevel(logging.DEBUG)
requests_log.propagate = True
pp = PasswordPlugin('admin', 'admin')
client = OAuthClient('http://127.0.0.1:39000', pp, 'debug_client', 'mysecret', 'localhost')
print('\nVerification using password plugin\n')
print('\n\n' + repr(client.verify()) + '\n')
print('\nissuing tokens...\n')
tokens = client.issue_tokens()
print('\n\ntokens:\n')
print('\n' + repr(tokens) + '\n')
tp = TokenPlugin(atoken = tokens['access_token'], rtoken = tokens['refresh_token'])
client.auth_plugin = tp
print('\nVerification using token plugin...\n')
print('\n\n' + repr(client.verify()) + '\n')
#time.sleep(1)
#print('\nVerification using token plugin again...\n')
#print('\n\n' + repr(client.verify()) + '\n')
print('\nrefreshing tokens...\n')
tokens = client.issue_tokens()
print('\n\ntokens:\n')
print('\n' + repr(tokens) + '\n')
print('\nme information...\n')
me = client.me()
print('\n\nme:\n')
print('\n' + repr(me) + '\n')
| 27.804348 | 91 | 0.702893 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 497 | 0.388585 |
1b73f658c2495b3c08ab7ff7d8488599e76db525 | 5,332 | py | Python | vmware_nsx_tempest/tests/nsxv/api/test_flat_network.py | mail2nsrajesh/vmware-nsx | 63154b510b9fd95c10fffae86bfc49073cafeb40 | [
"Apache-2.0"
] | null | null | null | vmware_nsx_tempest/tests/nsxv/api/test_flat_network.py | mail2nsrajesh/vmware-nsx | 63154b510b9fd95c10fffae86bfc49073cafeb40 | [
"Apache-2.0"
] | null | null | null | vmware_nsx_tempest/tests/nsxv/api/test_flat_network.py | mail2nsrajesh/vmware-nsx | 63154b510b9fd95c10fffae86bfc49073cafeb40 | [
"Apache-2.0"
] | null | null | null | # Copyright 2015 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
import test_subnets as SNET
LOG = logging.getLogger(__name__)
class FlatNetworksTestJSON(SNET.SubnetTestJSON):
_interface = 'json'
_provider_network_body = {
'name': data_utils.rand_name('FLAT-network'),
'provider:network_type': 'flat'}
@classmethod
def resource_setup(cls):
super(FlatNetworksTestJSON, cls).resource_setup()
def _create_network(self, _auto_clean_up=True, network_name=None,
**kwargs):
network_name = network_name or data_utils.rand_name('flat-netwk')
# self.create_network expect network_name
# self.admin_client.create_network()
# and self.client.create_network() expect name
post_body = {'name': network_name,
'provider:network_type': 'flat'}
post_body.update(kwargs)
LOG.debug("create FLAT network: %s", str(post_body))
body = self.admin_networks_client.create_network(**post_body)
network = body['network']
if _auto_clean_up:
self.addCleanup(self._try_delete_network, network['id'])
return network
@decorators.idempotent_id('dc2f2f46-0577-4e2a-b35d-3c8c8bbce5bf')
def test_create_network(self):
# Create a network as an admin user specifying the
# flat network type attribute
network = self._create_network()
# Verifies router:network_type parameter
self.assertIsNotNone(network['id'])
self.assertEqual(network.get('provider:network_type'), 'flat')
@decorators.idempotent_id('777fc335-b26c-42ea-9759-c71dff2ce1c6')
def test_update_network(self):
# Update flat network as an admin user specifying the
# flat network attribute
network = self._create_network(shared=True, _auto_clean_up=False)
self.assertEqual(network.get('shared'), True)
new_name = network['name'] + "-updated"
update_body = {'shared': False, 'name': new_name}
body = self.update_network(network['id'], **update_body)
updated_network = body['network']
# Verify that name and shared parameters were updated
self.assertEqual(updated_network['shared'], False)
self.assertEqual(updated_network['name'], new_name)
# get flat network attributes and verify them
body = self.show_network(network['id'])
updated_network = body['network']
# Verify that name and shared parameters were updated
self.assertEqual(updated_network['shared'], False)
self.assertEqual(updated_network['name'], new_name)
self.assertEqual(updated_network['status'], network['status'])
self.assertEqual(updated_network['subnets'], network['subnets'])
self._delete_network(network['id'])
@decorators.idempotent_id('1dfc1c11-e838-464c-85b2-ed5e4c477c64')
def test_list_networks(self):
# Create flat network
network = self._create_network(shared=True)
# List networks as a normal user and confirm it is available
body = self.list_networks(client=self.networks_client)
network_list = [net['id'] for net in body['networks']]
self.assertIn(network['id'], network_list)
update_body = {'shared': False}
body = self.update_network(network['id'], **update_body)
# List networks as a normal user and confirm it is not available
body = self.list_networks(client=self.networks_client)
network_list = [net['id'] for net in body['networks']]
self.assertNotIn(network['id'], network_list)
@decorators.idempotent_id('b5649fe2-a214-4105-8053-1825a877c45b')
def test_show_network_attributes(self):
# Create flat network
network = self._create_network(shared=True)
# Show a flat network as a normal user and confirm the
# flat network attribute is returned.
body = self.show_network(network['id'], client=self.networks_client)
show_net = body['network']
self.assertEqual(network['name'], show_net['name'])
self.assertEqual(network['id'], show_net['id'])
# provider attributes are for admin only
body = self.show_network(network['id'])
show_net = body['network']
net_attr_list = show_net.keys()
for attr in ('admin_state_up', 'port_security_enabled', 'shared',
'status', 'subnets', 'tenant_id', 'router:external',
'provider:network_type', 'provider:physical_network',
'provider:segmentation_id'):
self.assertIn(attr, net_attr_list)
| 45.57265 | 78 | 0.674606 | 4,510 | 0.845836 | 0 | 0 | 3,536 | 0.663166 | 0 | 0 | 2,107 | 0.395161 |
1b76f5bb1aa93e1e73ce925999a4fb1d6faa34a6 | 651 | py | Python | tests/checkstyle.py | MattCJones/engutils | 0115f1896f72f475d9073073469d3495ce511f2f | [
"MIT"
] | 1 | 2020-10-31T16:42:03.000Z | 2020-10-31T16:42:03.000Z | tests/checkstyle.py | MattCJones/engutils | 0115f1896f72f475d9073073469d3495ce511f2f | [
"MIT"
] | 1 | 2021-04-07T09:51:15.000Z | 2021-04-07T09:51:15.000Z | tests/checkstyle.py | MattCJones/engutils | 0115f1896f72f475d9073073469d3495ce511f2f | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""
Check that PEP8 format is followed
Author: Matthew C. Jones
Email: matt.c.jones.aoe@gmail.com
:copyright: 2020 by Optionset authors, see AUTHORS for more details.
:license: GPLv3, see LICENSE for more details.
"""
import subprocess
def check_format(py_file_path):
"""Check format of Python file. """
print("="*60)
run_str = f"pycodestyle -v {py_file_path}"
subproc = subprocess.run(run_str, shell=True, capture_output=True,
check=False)
print(subproc.stdout.decode('UTF-8'), end='')
print("="*60)
check_format("../engutils/engutils.py")
check_format("runtests.py")
| 25.038462 | 70 | 0.675883 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 361 | 0.554531 |
1b772cab2ebc2949130c95b8d651bc778a308880 | 5,233 | py | Python | test/test_version.py | Tismas/bigflow | 6a4a14616d66beeaf45700ea340c97d797a1f9e5 | [
"Apache-2.0"
] | null | null | null | test/test_version.py | Tismas/bigflow | 6a4a14616d66beeaf45700ea340c97d797a1f9e5 | [
"Apache-2.0"
] | null | null | null | test/test_version.py | Tismas/bigflow | 6a4a14616d66beeaf45700ea340c97d797a1f9e5 | [
"Apache-2.0"
] | null | null | null | import re
import os
from shutil import rmtree
from uuid import uuid1
import subprocess
from pathlib import Path
import tempfile
from unittest import TestCase, mock
from bigflow.version import VERSION_PATTERN, bump_minor, release, STARTING_VERSION
from bigflow.commons import resolve
NO_REPOSITORY_VERSION_PATTERN = re.compile(r'^0.1.0SNAPSHOT\w+$')
NO_COMMITS_VERSION_PATTERN = NO_REPOSITORY_VERSION_PATTERN
NO_TAG_VERSION_PATTERN = re.compile(r'^0.1.0SNAPSHOT\w+$')
NO_TAG_DIRTY_VERSION_PATTERN = re.compile(r'^0.1.0SNAPSHOT\w+$')
TAG_ON_HEAD_VERSION_PATTERN = re.compile(r'^\d+\.\d+\.\d+$')
TAG_ON_HEAD_DIRTY_VERSION_PATTERN = re.compile(r'^\d+\.\d+\.\d+SNAPSHOT\w+$')
TAG_NOT_ON_HEAD_VERSION_PATTERN = re.compile(r'^\d+\.\d+\.\d+SHA\w+$')
TAG_NOT_ON_HEAD_DIRTY_VERSION_PATTERN = re.compile(r'^\d+\.\d+\.\d+SHA\w+SNAPSHOT\w+$')
here = str(Path(__file__).absolute()).split(os.sep)
bf_path_index = here.index('bigflow')
bf_path_parts = here[:bf_path_index + 1]
BIGFLOW_PATH = os.path.join(os.sep, *bf_path_parts)
class Project:
def __init__(self):
self.tmp_dir = Path(tempfile.gettempdir())
self.project_dir = resolve(self.tmp_dir / f'bigflow_test_version_{str(uuid1()).replace("-", "")}')
os.mkdir(self.project_dir)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
rmtree(self.project_dir)
def __del__(self):
rmtree(self.project_dir, ignore_errors=True)
def run_cmd(self, cmd):
result = subprocess.getoutput(f'cd {self.project_dir};{cmd}')
print(result)
return result
def get_version(self):
get_version_cmd = f"""python -c 'import sys;sys.path.insert(0, "{BIGFLOW_PATH}");from bigflow.version import get_version;print(get_version())'"""
result = subprocess.getoutput(f'cd {self.project_dir};{get_version_cmd}')
print(result)
return result
class GetVersionE2E(TestCase):
def test_should_version_based_on_git_tags(self):
with Project() as project:
# expect
self.assertTrue(NO_REPOSITORY_VERSION_PATTERN.match(project.get_version()))
# when
project.run_cmd('git init')
# then
self.assertTrue(NO_COMMITS_VERSION_PATTERN.match(project.get_version()))
# when
project.run_cmd("touch file1;git add file1;git commit -m 'file1'")
# then
self.assertTrue(NO_TAG_VERSION_PATTERN.match(project.get_version()))
# when
project.run_cmd('touch file2')
# then
self.assertTrue(NO_TAG_DIRTY_VERSION_PATTERN.match(project.get_version()))
# when
project.run_cmd("git add file2;git commit -m 'file2';git tag 0.2.0")
# then
self.assertTrue(TAG_ON_HEAD_VERSION_PATTERN.match(project.get_version()))
# when
project.run_cmd('touch file3')
# then
self.assertTrue(TAG_ON_HEAD_DIRTY_VERSION_PATTERN.match(project.get_version()))
# when
project.run_cmd("git add file3;git commit -m 'file3'")
# then
self.assertTrue(TAG_NOT_ON_HEAD_VERSION_PATTERN.match(project.get_version()))
# when
project.run_cmd('touch file4')
# then
self.assertTrue(TAG_NOT_ON_HEAD_DIRTY_VERSION_PATTERN.match(project.get_version()))
class ReleaseTestCase(TestCase):
@mock.patch('bigflow.version.push_tag')
@mock.patch('bigflow.version.get_tag')
def test_should_push_bumped_tag(self, get_tag_mock, push_tag_mock):
# given
get_tag_mock.return_value = None
# when
release('fake_pem_path')
# then
push_tag_mock.assert_called_with(STARTING_VERSION, 'fake_pem_path')
# given
get_tag_mock.return_value = '0.2.0'
# when
release('fake_pem_path')
# then
push_tag_mock.assert_called_with('0.3.0', 'fake_pem_path')
class VersionPatternTestCase(TestCase):
def test_version_patter(self):
self.assertTrue(VERSION_PATTERN.match('1.0.0'))
self.assertTrue(VERSION_PATTERN.match('1.0.1'))
self.assertTrue(VERSION_PATTERN.match('1.11.1'))
self.assertTrue(VERSION_PATTERN.match('0.0.1123123'))
self.assertTrue(VERSION_PATTERN.match('0.0.112dev'))
self.assertTrue(VERSION_PATTERN.match('0.0.dev'))
self.assertFalse(VERSION_PATTERN.match('x.0.1123123'))
self.assertFalse(VERSION_PATTERN.match('x.x.1123123'))
self.assertFalse(VERSION_PATTERN.match('0.x.1123123'))
class BumpMinorTestCase(TestCase):
def test_should_bump_minor_(self):
self.assertEqual(bump_minor('1.0.0'), '1.1.0')
self.assertEqual(bump_minor('0.1.0'), '0.2.0')
self.assertEqual(bump_minor('0.1.1'), '0.2.0')
self.assertEqual(bump_minor('0.0.1'), '0.1.0')
self.assertEqual(bump_minor('0.1.dev1'), '0.2.0')
def test_should_raise_value_error_for_invalid_version_schema(self):
# given
invalid_version = 'dev.0.1'
# then
with self.assertRaises(ValueError):
# when
bump_minor(invalid_version) | 33.120253 | 153 | 0.65966 | 4,200 | 0.802599 | 0 | 0 | 545 | 0.104147 | 0 | 0 | 1,069 | 0.204281 |
1b78db61df8513cdfba1daf0489e66f494f59626 | 2,182 | py | Python | misc/eulerhelper.py | Hashi4/vmdgadgets | 07ff21c2ce20ab6dbae410d365b6c2189635944a | [
"Apache-2.0"
] | null | null | null | misc/eulerhelper.py | Hashi4/vmdgadgets | 07ff21c2ce20ab6dbae410d365b6c2189635944a | [
"Apache-2.0"
] | null | null | null | misc/eulerhelper.py | Hashi4/vmdgadgets | 07ff21c2ce20ab6dbae410d365b6c2189635944a | [
"Apache-2.0"
] | null | null | null | import sys
sys.path.append('../vmdgadgets')
import vmdutil
import functools
def alt_dot_v(v1, v2):
return functools.reduce(
lambda i, j: i + j, [x * y for x, y in zip(v1, v2)])
vmdutil.vmdutil.dot_v = alt_dot_v
vmdutil.dot_v = alt_dot_v
class strexp():
def __init__(self, val):
self.val = val
def __mul__(self, other):
if other.val == '0' or self.val == '0':
return strexp('0')
elif other.val == '1':
return self
elif self.val == '1':
return other
else:
return strexp('(' + self.val + ')*(' + other.val + ')')
def __add__(self, other):
if other.val == '0':
return self
elif self.val == '0':
return other
else:
return strexp(self.val + '+' + other.val)
def __sub__(self, other):
if other.val == '0':
return self
elif self.val == '0':
return strexp('-' + other.val)
else:
return strexp(self.val + '-' + other.val)
def __neg__(self):
return strexp('-' + self.val)
def __repr__(self):
return self.val
if __name__ == '__main__':
zero = strexp('0')
one = strexp('1')
p = [strexp('sp'), zero, zero, strexp('cp')] # q(1, 0, 0, wx)
y = [zero, strexp('sy'), zero, strexp('cy')]
r = [zero, zero, strexp('sr'), strexp('cr')]
o = vmdutil.multiply_quaternion(
vmdutil.multiply_quaternion(r, p), y)
print('euler2quaternion of z-x-y(global)')
print(o)
print()
# rotx = [[1, 0, 0], [0, cx, -sx], [0, sx, cx]]
# roty = [[cy, 0, sy], [0, 1, 0], [-sy, 0, cy]]
# rotz = [[cz, -sz, 0], [sz, cz, 0], [0, 0, 1]]
cx = strexp('cx')
cy = strexp('cy')
cz = strexp('cz')
sx = strexp('sx')
sy = strexp('sy')
sz = strexp('sz')
print('euler2matrix of z-x-y(global)')
o = vmdutil.dot_m(
vmdutil.dot_m(
[[cz, -sz, zero], [sz, cz, zero], [zero, zero, one]],
[[one, zero, zero], [zero, cx, -sx], [zero, sx, cx]]),
[[cy, zero, sy], [zero, one, zero], [-sy, zero, cy]])
for r in o:
print(r)
| 28.337662 | 70 | 0.493126 | 910 | 0.417049 | 0 | 0 | 0 | 0 | 0 | 0 | 352 | 0.16132 |
1b790769f9ec806dc08494f05272f0127dd9fe0f | 2,528 | py | Python | app/api/data/service.py | nccr-itmo/FedotWeb | 763fb1f39ad2b69104b6568e6f941c4c67762e34 | [
"BSD-3-Clause"
] | 1 | 2020-12-24T10:49:02.000Z | 2020-12-24T10:49:02.000Z | app/api/data/service.py | nccr-itmo/FedotWeb | 763fb1f39ad2b69104b6568e6f941c4c67762e34 | [
"BSD-3-Clause"
] | null | null | null | app/api/data/service.py | nccr-itmo/FedotWeb | 763fb1f39ad2b69104b6568e6f941c4c67762e34 | [
"BSD-3-Clause"
] | null | null | null | from pathlib import Path
from typing import List, Optional, Tuple
from app.api.meta.service import task_type_from_id
from fedot.core.data.data import DataTypesEnum, InputData
from fedot.core.repository.tasks import Task, TaskParams, TsForecastingParams
from flask import current_app
from utils import project_root
default_datasets = {
'scoring': {
'train': 'scoring/scoring_train.csv',
'test': 'scoring/scoring_test.csv',
'data_type': DataTypesEnum.table
},
'metocean': {
'train': 'metocean/metocean_train.csv',
'test': 'metocean/metocean_test.csv',
'data_type': DataTypesEnum.ts
},
'oil': {
'train': 'oil/oil_train.csv',
'test': 'oil/oil_test.csv',
'data_type': DataTypesEnum.table
}
}
data_types = {
'ts': DataTypesEnum.ts,
'table': DataTypesEnum.table,
'image': DataTypesEnum.image,
'text': DataTypesEnum.text,
}
def get_datasets_names() -> List[str]:
return list(default_datasets)
def get_dataset_metadata(dataset_name: str, sample_type: str) -> Tuple[int, int]:
data = get_input_data(dataset_name, sample_type)
if data is None:
raise ValueError(f'Data for dataset_name={dataset_name} with sample_type={sample_type} must exists')
if len(data.features.shape) > 1:
n_features, n_rows = data.features.shape[1], data.features.shape[0]
else:
n_features, n_rows = 1, len(data.features)
return n_features, n_rows
def get_input_data(dataset_name: str, sample_type: str,
task_type: Optional[str] = None,
task_params: Optional[TaskParams] = None) -> Optional[InputData]:
try:
dataset = default_datasets[dataset_name]
data_path = dataset[sample_type]
if task_params is None and task_type == 'ts_forecasting':
# forecast_length should be defined
task_params = TsForecastingParams(forecast_length=30)
task = Task(task_type_from_id(task_type), task_params) if task_type is not None else None
file_path = Path(project_root(), 'data', data_path)
if dataset['data_type'] == DataTypesEnum.ts:
data = InputData.from_csv_time_series(file_path=file_path, task=task, target_column='target')
else:
data = InputData.from_csv(file_path=file_path, task=task, data_type=dataset['data_type'])
return data
except KeyError as ex:
print(f'Dataset {dataset_name} has no data for {sample_type}: {ex}')
return None
| 34.162162 | 108 | 0.673259 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 497 | 0.196598 |
1b7abe7f4afad6c95d3ce7822b662af263bb8812 | 1,172 | py | Python | data/strategies/bases.py | jamesrharwood/journal-guidelines | fe6c0a6d3c0443df6fc816b9503fad24459ddb4a | [
"MIT"
] | null | null | null | data/strategies/bases.py | jamesrharwood/journal-guidelines | fe6c0a6d3c0443df6fc816b9503fad24459ddb4a | [
"MIT"
] | null | null | null | data/strategies/bases.py | jamesrharwood/journal-guidelines | fe6c0a6d3c0443df6fc816b9503fad24459ddb4a | [
"MIT"
] | null | null | null | import re
from data.scrape.link_extractors.create_extractor import create_extractor
from data.scrape.utils import clean_url
from .constants import ID
class Strategy:
def __init__(self, url_pattern, template=None, **extractor_args):
self.url_pattern = url_pattern.format(ID=ID)
self.url_regex = re.compile(
self.url_pattern.replace(".", r"\."), flags=re.IGNORECASE
)
self.extractor_args = extractor_args
self.guideline_url_template = template
def match_url(self, url):
url = clean_url(url)
return self.url_regex.search(url)
def matches_url(self, url):
return bool(self.match_url(url))
def create_link_extractor(self, url):
return create_extractor(url, allow_domains=[], **self.extractor_args)
def generate_guideline_urls(self, url, row):
if self.guideline_url_template is None:
return []
match = self.match_url(url)
urls = [self.guideline_url_template.format(**match.groupdict(), **row)]
urls = [url for url in urls if url]
return urls
def __repr__(self):
return f"<Strategy: {self.url_pattern}>"
| 31.675676 | 79 | 0.668942 | 1,018 | 0.868601 | 0 | 0 | 0 | 0 | 0 | 0 | 41 | 0.034983 |
1b7b00a69a0d287e97e0ec900905293bcb8524cc | 1,973 | py | Python | tests/regressiontests/get_or_create_regress/models.py | huicheese/Django-test3 | ac11d2dce245b48392e52d1f4acfd5e7433b243e | [
"BSD-3-Clause"
] | 23 | 2015-01-26T12:16:59.000Z | 2022-02-10T10:58:40.000Z | tests/regressiontests/get_or_create_regress/models.py | joetyson/django | c3699190186561d5c216b2a77ecbfc487d42a734 | [
"BSD-3-Clause"
] | 1 | 2018-01-03T15:26:49.000Z | 2018-01-03T15:26:49.000Z | tests/regressiontests/get_or_create_regress/models.py | joetyson/django | c3699190186561d5c216b2a77ecbfc487d42a734 | [
"BSD-3-Clause"
] | 30 | 2015-03-25T19:40:07.000Z | 2021-05-28T22:59:26.000Z | from django.db import models
class Publisher(models.Model):
name = models.CharField(max_length=100)
class Author(models.Model):
name = models.CharField(max_length=100)
class Book(models.Model):
name = models.CharField(max_length=100)
authors = models.ManyToManyField(Author, related_name='books')
publisher = models.ForeignKey(Publisher, related_name='books')
__test__ = {'one':"""
#
# RelatedManager
#
# First create a Publisher.
>>> p = Publisher.objects.create(name='Acme Publishing')
# Create a book through the publisher.
>>> book, created = p.books.get_or_create(name='The Book of Ed & Fred')
>>> created
True
# The publisher should have one book.
>>> p.books.count()
1
# Try get_or_create again, this time nothing should be created.
>>> book, created = p.books.get_or_create(name='The Book of Ed & Fred')
>>> created
False
# And the publisher should still have one book.
>>> p.books.count()
1
#
# ManyRelatedManager
#
# Add an author to the book.
>>> ed, created = book.authors.get_or_create(name='Ed')
>>> created
True
# Book should have one author.
>>> book.authors.count()
1
# Try get_or_create again, this time nothing should be created.
>>> ed, created = book.authors.get_or_create(name='Ed')
>>> created
False
# And the book should still have one author.
>>> book.authors.count()
1
# Add a second author to the book.
>>> fred, created = book.authors.get_or_create(name='Fred')
>>> created
True
# The book should have two authors now.
>>> book.authors.count()
2
# Create an Author not tied to any books.
>>> Author.objects.create(name='Ted')
<Author: Author object>
# There should be three Authors in total. The book object should have two.
>>> Author.objects.count()
3
>>> book.authors.count()
2
# Try creating a book through an author.
>>> ed.books.get_or_create(name="Ed's Recipies", publisher=p)
(<Book: Book object>, True)
# Now Ed has two Books, Fred just one.
>>> ed.books.count()
2
>>> fred.books.count()
1
"""}
| 21.445652 | 74 | 0.706031 | 348 | 0.176381 | 0 | 0 | 0 | 0 | 0 | 0 | 1,587 | 0.804359 |
1b7cb10575222f004342449819d9940438677034 | 3,613 | py | Python | python_files/generate_figures/7.2_Perturbations.py | TiKeil/Masterthesis-LOD | 03c1c6748a0464165a666bd9f4f933bc8e4f233b | [
"Apache-2.0"
] | null | null | null | python_files/generate_figures/7.2_Perturbations.py | TiKeil/Masterthesis-LOD | 03c1c6748a0464165a666bd9f4f933bc8e4f233b | [
"Apache-2.0"
] | null | null | null | python_files/generate_figures/7.2_Perturbations.py | TiKeil/Masterthesis-LOD | 03c1c6748a0464165a666bd9f4f933bc8e4f233b | [
"Apache-2.0"
] | 1 | 2020-03-30T08:49:13.000Z | 2020-03-30T08:49:13.000Z | # This file is part of the master thesis "Variational crimes in the Localized orthogonal decomposition method":
# https://github.com/TiKeil/Masterthesis-LOD.git
# Copyright holder: Tim Keil
# License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
import numpy as np
import buildcoef2d
import matplotlib.pyplot as plt
from visualize import drawCoefficient, ExtradrawCoefficient
bg = 0.05 #background
val = 1 #values
NWorldFine = np.array([42, 42])
CoefClass = buildcoef2d.Coefficient2d(NWorldFine,
bg = bg, # background
val = val, # values
length = 2, # length
thick = 2, # thickness
space = 2, # space between values
probfactor = 1, # probability of an value
right = 1, # shape 1
down = 0, # shape 2
diagr1 = 0, # shape 3
diagr2 = 0, # shape 4
diagl1 = 0, # shape 5
diagl2 = 0, # shape 6
LenSwitch = None, # various length
thickSwitch = None, # various thickness
ChannelHorizontal = None, # horizontal Channels
ChannelVertical = None, # vertical Channels
BoundarySpace = True # additional space on the boundary
)
A = CoefClass.BuildCoefficient() # coefficient in a numpy array
numbers = [13,20,27,44,73] #What entries will be changed
# Change in Value
B = CoefClass.SpecificValueChange( Number = numbers,
ratio = -0.4,
randomvalue = None,
negative = None,
ShapeRestriction = True,
ShapeWave = None,
probfactor = 1,
Original = True)
# Disappearance
C = CoefClass.SpecificVanish( Number = numbers,
PartlyVanish = None,
probfactor = 1,
Original = True)
# Shift
D = CoefClass.SpecificMove( Number = numbers,
steps = 1,
randomstep = None,
randomDirection = None,
Right = 1,
BottomRight = 1,
Bottom = 1,
BottomLeft = 1,
Left = 1,
TopLeft = 1,
Top = 1,
TopRight = 1,
Original = True)
A = A.flatten()
B = B.flatten()
C = C.flatten()
D = D.flatten()
plt.figure("original")
drawCoefficient(NWorldFine, A)
plt.figure("1")
drawCoefficient(NWorldFine, B)
plt.figure("2")
drawCoefficient(NWorldFine, C)
plt.figure("3")
drawCoefficient(NWorldFine, D)
plt.figure('all')
ExtradrawCoefficient(NWorldFine, A, B, C, D)
plt.show() | 39.703297 | 111 | 0.4124 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 656 | 0.181567 |
1b7cb45d76ebe48d79805caba5c36070a7bbe517 | 550 | py | Python | lib.py | TakuyaNoguchi/cb_lib | d0254b65126a544ee7564930fe0c6f9441ab8cba | [
"MIT"
] | null | null | null | lib.py | TakuyaNoguchi/cb_lib | d0254b65126a544ee7564930fe0c6f9441ab8cba | [
"MIT"
] | null | null | null | lib.py | TakuyaNoguchi/cb_lib | d0254b65126a544ee7564930fe0c6f9441ab8cba | [
"MIT"
] | null | null | null | import math
# 10進数 -> base進数に変換
def base10to(x: int, base: int) -> str:
s = ''
while x > 0:
s = str(x % base) + s
x = x // base
return s
# 最小公倍数
def lcm(a: int, b: int) -> int:
return int(a * b / math.gcd(a, b))
# n以下の素数の一覧
def make_primes(n: int) -> list:
is_prime = [False, False] + ([True] * (n + 1))
for i in range(2, int(n**0.5) + 1):
if not is_prime[i]: continue
for j in range(i * 2, n + 1, i):
is_prime[j] = False
return [i for i in range(n + 1) if is_prime[i]]
| 19.642857 | 51 | 0.505455 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 79 | 0.133898 |
1b7d3e825fb41006762ca249a60864707434a95c | 516 | py | Python | entertainment_tonight/migrations/0003_event_upload_photo.py | ashleyf1996/OOP_Web_Application_Assignment3 | 5c398bf282e8decefaac2ff54a5ec50aff3ab32f | [
"MIT"
] | null | null | null | entertainment_tonight/migrations/0003_event_upload_photo.py | ashleyf1996/OOP_Web_Application_Assignment3 | 5c398bf282e8decefaac2ff54a5ec50aff3ab32f | [
"MIT"
] | null | null | null | entertainment_tonight/migrations/0003_event_upload_photo.py | ashleyf1996/OOP_Web_Application_Assignment3 | 5c398bf282e8decefaac2ff54a5ec50aff3ab32f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-04-06 20:39
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('entertainment_tonight', '0002_auto_20170321_1517'),
]
operations = [
migrations.AddField(
model_name='event',
name='upload_photo',
field=models.CharField(default=1, max_length=200),
preserve_default=False,
),
]
| 23.454545 | 62 | 0.629845 | 358 | 0.693798 | 0 | 0 | 0 | 0 | 0 | 0 | 140 | 0.271318 |
1b7db79014deaf78f87cbb4258acd5febac0be17 | 2,029 | py | Python | tests/test_models.py | epandurski/swp_accounts | 6fefe98b7fde945e2d6ea4f536acc576a59d9915 | [
"MIT"
] | null | null | null | tests/test_models.py | epandurski/swp_accounts | 6fefe98b7fde945e2d6ea4f536acc576a59d9915 | [
"MIT"
] | 2 | 2022-03-08T20:49:24.000Z | 2022-03-08T20:49:24.000Z | tests/test_models.py | epandurski/swpt_accounts | 9bcb5349d700bcf3d17928ab7bd63440a4a6f938 | [
"MIT"
] | null | null | null | from datetime import datetime, date, timezone, timedelta
from swpt_accounts.models import Account
D_ID = -1
C_ID = 1
def test_sibnalbus_burst_count(app):
from swpt_accounts import models as m
assert isinstance(m.RejectedTransferSignal.signalbus_burst_count, int)
assert isinstance(m.PreparedTransferSignal.signalbus_burst_count, int)
assert isinstance(m.FinalizedTransferSignal.signalbus_burst_count, int)
assert isinstance(m.AccountTransferSignal.signalbus_burst_count, int)
assert isinstance(m.AccountUpdateSignal.signalbus_burst_count, int)
assert isinstance(m.AccountPurgeSignal.signalbus_burst_count, int)
assert isinstance(m.RejectedConfigSignal.signalbus_burst_count, int)
assert isinstance(m.PendingBalanceChangeSignal.signalbus_burst_count, int)
def test_configure_account():
one_year = timedelta(days=365.25)
current_ts = datetime.now(tz=timezone.utc)
committed_at = current_ts - 2 * one_year
account = Account(
debtor_id=D_ID,
creditor_id=C_ID,
creation_date=date(1970, 1, 1),
principal=1000,
total_locked_amount=0,
pending_transfers_count=0,
last_transfer_id=0,
status_flags=0,
last_change_ts=current_ts,
previous_interest_rate=0.0,
last_interest_rate_change_ts=current_ts - one_year,
interest_rate=10.0,
)
i = account.calc_due_interest(1000, committed_at, current_ts)
assert abs(i - 100) < 1e-12
i = account.calc_due_interest(-1000, committed_at, current_ts)
assert abs(i + 100) < 1e-12
assert account.calc_due_interest(1000, committed_at, committed_at) == 0
assert account.calc_due_interest(1000, current_ts, current_ts) == 0
assert account.calc_due_interest(1000, current_ts, committed_at) == 0
i = account.calc_due_interest(1000, current_ts - timedelta(days=1), current_ts)
assert abs(i - 0.26098) < 1e-3
i = account.calc_due_interest(1000, committed_at, committed_at + timedelta(days=1))
assert abs(i) == 0
| 38.283019 | 87 | 0.739773 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
1b7ed426c8537d2c5b9aac0eec06e951c8f52d4b | 1,735 | py | Python | examples/socket/asr_socketClient.py | marvin-nj/py-kaldi-asr | 1f16ba0bb1029ec2549d5cd6062e612db562ba32 | [
"Apache-2.0"
] | 1 | 2021-03-29T08:11:58.000Z | 2021-03-29T08:11:58.000Z | examples/socket/asr_socketClient.py | marvin-nj/py-kaldi-asr | 1f16ba0bb1029ec2549d5cd6062e612db562ba32 | [
"Apache-2.0"
] | null | null | null | examples/socket/asr_socketClient.py | marvin-nj/py-kaldi-asr | 1f16ba0bb1029ec2549d5cd6062e612db562ba32 | [
"Apache-2.0"
] | 1 | 2021-03-29T08:11:59.000Z | 2021-03-29T08:11:59.000Z | # -*- coding: UTF-8 -*-
import socket
import pyaudio
import numpy as np
import time
import logging
address = ('127.0.0.1', 8301)
RATE = 8000
RECORD_SECONDS = 10 #录制时长,单位秒
FORMAT = pyaudio.paInt16
CHANNELS = 1
CHUNK=256
DEBUG=1
def start_client ():
#socket init
tcpClient = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
tcpClient.connect(address)
logging.info(" connect to %s:%s OK" % ( address[0],address[1]))
#pyaudio init
p = pyaudio.PyAudio()
stream = p.open(format=FORMAT, channels=CHANNELS, rate=RATE, input=True, frames_per_buffer=CHUNK) #创建录音文件
logging.info("Please speak.")
#控制录音时长,开始发送
cnt=0
for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):
samples = stream.read(CHUNK)
#buff=np.float32(np.frombuffer(samples, dtype=np.int16)) #16为bytes转int
tcpClient.send(samples)
msg=tcpClient.recv(1024).decode("utf-8")
if msg != " ":
logging.debug("result: %s " % msg)
cnt=cnt+1
logging.debug ("audio length: %d, recv count : %d " % (len(samples),cnt))
#end for
#发送结束符号,长度为1值为0的数组,暂不支持其它
eos=np.zeros(1)
tcpClient.send(bytes(eos))
msg=tcpClient.recv(1024).decode("utf-8")
logging.info("final result: %s " % msg )
#close socket and recording
stream.stop_stream()
stream.close()
p.terminate()
tcpClient.close()
if __name__ == '__main__':
logfile="log.asr_server"
if DEBUG:
logging.basicConfig( filename = "", level=logging.DEBUG)
else:
logging.basicConfig( filename = "", level=logging.INFO)
time_start = time.time()
start_client()
logging.info ( "** total time : %8.2fs" % ( time.time() - time_start )) | 27.983871 | 112 | 0.629971 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 496 | 0.27089 |
1b8172a9213dc0fc85ea4cac4402ab2fd75468c4 | 293 | py | Python | Topic09-errors/myfunction.py | mizydorek/pands-problems-2020 | a418dcc58e49dfbcb269e4524f676c1c6a0a6255 | [
"MIT"
] | null | null | null | Topic09-errors/myfunction.py | mizydorek/pands-problems-2020 | a418dcc58e49dfbcb269e4524f676c1c6a0a6255 | [
"MIT"
] | null | null | null | Topic09-errors/myfunction.py | mizydorek/pands-problems-2020 | a418dcc58e49dfbcb269e4524f676c1c6a0a6255 | [
"MIT"
] | null | null | null | # Calculate factorial of a number
def factorial(n):
''' Returns the factorial of n.
e.g. factorial(7) = 7x76x5x4x3x2x1 = 5040.
'''
answer = 1
for i in range(1,n+1):
answer = answer * i
return answer
if __name__ == "__main__":
assert factorial(7) == 5040 | 19.533333 | 46 | 0.600683 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 129 | 0.440273 |
1b81dd0d1830c6f75ae724553d61f0767f0550dc | 1,696 | py | Python | developers_chamber/scripts/version.py | radimsuckr/developers-chamber | b29306a33c59473184206ad9f5354313f032a6c3 | [
"MIT"
] | null | null | null | developers_chamber/scripts/version.py | radimsuckr/developers-chamber | b29306a33c59473184206ad9f5354313f032a6c3 | [
"MIT"
] | null | null | null | developers_chamber/scripts/version.py | radimsuckr/developers-chamber | b29306a33c59473184206ad9f5354313f032a6c3 | [
"MIT"
] | null | null | null | import os
import click
from developers_chamber.scripts import cli
from developers_chamber.version_utils import bump_to_next_version as bump_to_next_version_func
from developers_chamber.version_utils import get_next_version, get_version
from developers_chamber.types import EnumType, ReleaseType
default_version_files = os.environ.get('VERSION_FILES', 'version.json').split(',')
@cli.command()
@click.option('--release_type', help='release type', type=EnumType(ReleaseType), required=True)
@click.option('--build_hash', help='hash of the build', type=str)
@click.option('--file', help='path to the version file', type=str, default=default_version_files, required=True,
multiple=True)
def version_bump_to_next(release_type, build_hash, file):
"""
Bump JSON file (or files) version number
"""
click.echo(bump_to_next_version_func(release_type, build_hash, file))
@cli.command()
@click.option('--file', help='path to the version file', type=str, default=default_version_files[0], required=True)
def version_print(file):
"""
Return current project version according to version JSON file
"""
click.echo(get_version(file))
@cli.command()
@click.option('--release_type', help='release type', type=EnumType(ReleaseType), required=True)
@click.option('--build_hash', help='hash of the build', type=str)
@click.option('--file', help='path to the version file', type=str, default=default_version_files[0], required=True)
def version_print_next(release_type, build_hash, file):
"""
Return next version according to input release type, build hash and version JSON file
"""
click.echo(get_next_version(release_type, build_hash, file))
| 38.545455 | 116 | 0.751179 | 0 | 0 | 0 | 0 | 1,305 | 0.769458 | 0 | 0 | 494 | 0.291274 |
1b82fee94cf72f5f38cff15f6ac47f29bd71f75c | 78 | py | Python | Practice/buttons.py | ashishjayamohan/competitive-programming | 05c5c560c2c2eb36121c52693b8c7d084f435f9e | [
"MIT"
] | null | null | null | Practice/buttons.py | ashishjayamohan/competitive-programming | 05c5c560c2c2eb36121c52693b8c7d084f435f9e | [
"MIT"
] | null | null | null | Practice/buttons.py | ashishjayamohan/competitive-programming | 05c5c560c2c2eb36121c52693b8c7d084f435f9e | [
"MIT"
] | null | null | null | line = input().split()
n = int(line[0])
m = int(line[1])
print(str(abs(n-m)))
| 15.6 | 22 | 0.576923 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
1b834a2c0a248402fca286b2a1394e08ad414754 | 12,470 | py | Python | PuTTY/putty.py | caltaojihun/Packages | 58811a7fdebbdc60327d3d1cf44eb2335fc2f2b9 | [
"Zlib"
] | null | null | null | PuTTY/putty.py | caltaojihun/Packages | 58811a7fdebbdc60327d3d1cf44eb2335fc2f2b9 | [
"Zlib"
] | null | null | null | PuTTY/putty.py | caltaojihun/Packages | 58811a7fdebbdc60327d3d1cf44eb2335fc2f2b9 | [
"Zlib"
] | null | null | null | # Keypirinha: a fast launcher for Windows (keypirinha.com)
import keypirinha as kp
import keypirinha_util as kpu
import os.path
import winreg
import urllib.parse
class PuTTY(kp.Plugin):
"""
Launch PuTTY sessions.
This plugin automatically detects the installed version of the official
PuTTY distribution and lists its configured sessions so they can be launched
directly without having to pass through the sessions selection dialog. The
portable version of PuTTY can also be registered in package's dedicated
configuration file.
"""
DIST_SECTION_PREFIX = "dist/" # lower case
EXE_NAME_OFFICIAL = "PUTTY.EXE"
EXE_NAME_PAPPS = "PuTTYPortable.exe"
default_icon_handle = None
distros = {}
def __init__(self):
super().__init__()
def on_start(self):
self._read_config()
def on_catalog(self):
self._read_config()
catalog = []
for distro_name, distro in self.distros.items():
if not distro['enabled']:
continue
# catalog the executable
catalog.append(self.create_item(
category=kp.ItemCategory.FILE,
label=distro['label'],
short_desc="",
target=distro['exe_file'],
args_hint=kp.ItemArgsHint.ACCEPTED,
hit_hint=kp.ItemHitHint.KEEPALL))
# catalog the configured sessions, if any
for session_name in distro['sessions']:
catalog.append(self.create_item(
category=kp.ItemCategory.REFERENCE,
label="{}: {}".format(distro['label'], session_name),
short_desc='Launch {} "{}" session'.format(
distro['label'], session_name),
target=kpu.kwargs_encode(
dist=distro_name, session=session_name),
args_hint=kp.ItemArgsHint.FORBIDDEN,
hit_hint=kp.ItemHitHint.NOARGS))
self.set_catalog(catalog)
def on_suggest(self, user_input, items_chain):
if items_chain and items_chain[-1].category() == kp.ItemCategory.FILE:
clone = items_chain[-1].clone()
clone.set_args(user_input)
self.set_suggestions([clone])
def on_execute(self, item, action):
if item.category() == kp.ItemCategory.FILE:
kpu.execute_default_action(self, item, action)
return
if item.category() != kp.ItemCategory.REFERENCE:
return
# extract info from item's target property
try:
item_target = kpu.kwargs_decode(item.target())
distro_name = item_target['dist']
session_name = item_target['session']
except Exception as exc:
self.dbg(str(exc))
return
# check if the desired distro is available and enabled
if distro_name not in self.distros:
self.warn('Could not execute item "{}". Distro "{}" not found.'.format(item.label(), distro_name))
return
distro = self.distros[distro_name]
if not distro['enabled']:
self.warn('Could not execute item "{}". Distro "{}" is disabled.'.format(item.label(), distro_name))
return
# check if the desired session still exists
if session_name not in distro['sessions']:
self.warn('Could not execute item "{}". Session "{}" not found in distro "{}".'.format(item.label(), session_name, distro_name))
return
# find the placeholder of the session name in the args list and execute
sidx = distro['cmd_args'].index('%1')
kpu.shell_execute(
distro['exe_file'],
args=distro['cmd_args'][0:sidx] + [session_name] + distro['cmd_args'][sidx+1:])
def on_events(self, flags):
if flags & kp.Events.PACKCONFIG:
self.info("Configuration changed, rebuilding catalog...")
self.on_catalog()
def _read_config(self):
if self.default_icon_handle:
self.default_icon_handle.free()
self.default_icon_handle = None
self.distros = {}
settings = self.load_settings()
for section_name in settings.sections():
if not section_name.lower().startswith(self.DIST_SECTION_PREFIX):
continue
dist_name = section_name[len(self.DIST_SECTION_PREFIX):]
detect_method = getattr(self, "_detect_distro_{}".format(dist_name.lower()), None)
if not detect_method:
self.err("Unknown PuTTY distribution name: ", dist_name)
continue
dist_path = settings.get_stripped("path", section_name)
dist_enable = settings.get_bool("enable", section_name)
dist_props = detect_method(
dist_enable,
settings.get_stripped("label", section_name),
dist_path)
if not dist_props:
if dist_path:
self.warn('PuTTY distribution "{}" not found in: {}'.format(dist_name, dist_path))
elif dist_enable:
self.warn('PuTTY distribution "{}" not found'.format(dist_name))
continue
self.distros[dist_name.lower()] = {
'orig_name': dist_name,
'enabled': dist_props['enabled'],
'label': dist_props['label'],
'exe_file': dist_props['exe_file'],
'cmd_args': dist_props['cmd_args'],
'sessions': dist_props['sessions']}
if dist_props['enabled'] and not self.default_icon_handle:
self.default_icon_handle = self.load_icon(
"@{},0".format(dist_props['exe_file']))
if self.default_icon_handle:
self.set_default_icon(self.default_icon_handle)
def _detect_distro_official(self, given_enabled, given_label, given_path):
dist_props = {
'enabled': given_enabled,
'label': given_label,
'exe_file': None,
'cmd_args': ['-load', '%1'],
'sessions': []}
# label
if not dist_props['label']:
dist_props['label'] = "PuTTY"
# enabled? don't go further if not
if dist_props['enabled'] is None:
dist_props['enabled'] = True
if not dist_props['enabled']:
return dist_props
# find executable
exe_file = None
if given_path:
exe_file = os.path.normpath(os.path.join(given_path, self.EXE_NAME_OFFICIAL))
if not os.path.exists(exe_file):
exe_file = None
if not exe_file:
exe_file = self._autodetect_official_installreg()
if not exe_file:
exe_file = self._autodetect_startmenu(self.EXE_NAME_OFFICIAL, "PuTTY.lnk")
if not exe_file:
exe_file = self._autodetect_official_progfiles()
if not exe_file:
exe_file = self._autodetect_path(self.EXE_NAME_OFFICIAL)
#if not exe_file:
# exe_file = self._autodetect_startmenu(self.EXE_NAME_OFFICIAL, "*putty*.lnk")
if not exe_file:
return None
dist_props['exe_file'] = exe_file
# list configured sessions
try:
hkey = winreg.OpenKey(
winreg.HKEY_CURRENT_USER,
'Software\\SimonTatham\\PuTTY\\Sessions')
index = 0
while True:
try:
dist_props['sessions'].append(urllib.parse.unquote(
winreg.EnumKey(hkey, index), encoding='mbcs'))
index += 1
except OSError:
break
winreg.CloseKey(hkey)
except OSError:
pass
return dist_props
def _detect_distro_portableapps(self, given_enabled, given_label, given_path):
dist_props = {
'enabled': given_enabled,
'label': given_label,
'exe_file': None,
'cmd_args': ['-load', '%1'],
'sessions': []}
# label
if not dist_props['label']:
dist_props['label'] = "PuTTY Portable"
# enabled? don't go further if not
if dist_props['enabled'] is None:
dist_props['enabled'] = False
if not dist_props['enabled']:
return dist_props
# find executable
exe_file = None
if given_path:
exe_file = os.path.normpath(os.path.join(given_path, self.EXE_NAME_PAPPS))
if not os.path.exists(exe_file):
exe_file = None
if not exe_file:
exe_file = self._autodetect_path(self.EXE_NAME_PAPPS)
if not exe_file:
exe_file = self._autodetect_startmenu(self.EXE_NAME_PAPPS, "*putty*.lnk")
if not exe_file:
return None
dist_props['exe_file'] = exe_file
# list configured sessions
reg_file = os.path.join(os.path.split(exe_file)[0], "data", "settings", "putty.reg")
reg_prefix = "[hkey_current_user\\software\\simontatham\\putty\\sessions\\"
try:
reg_content = kpu.chardet_slurp(reg_file)
except Exception:
self.err("Failed to read file:", reg_file)
return None
for reg_line in iter(reg_content.splitlines()):
if reg_line.lower().startswith(reg_prefix) and reg_line.endswith(']'):
dist_props['sessions'].append(urllib.parse.unquote(
reg_line[len(reg_prefix):-1], encoding='mbcs')) # important! putty uses the current code page
reg_content = None
return dist_props
def _autodetect_official_installreg(self):
try:
key = winreg.OpenKey(
winreg.HKEY_LOCAL_MACHINE,
"SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Uninstall\\PUTTY_is1",
access=winreg.KEY_READ | winreg.KEY_WOW64_32KEY)
value = winreg.QueryValueEx(key, "InstallLocation")[0]
winreg.CloseKey(key)
exe_file = os.path.join(value, self.EXE_NAME_OFFICIAL)
if os.path.exists(exe_file):
return exe_file
except:
pass
return None
def _autodetect_official_progfiles(self):
for hive in ('%PROGRAMFILES%', '%PROGRAMFILES(X86)%'):
exe_file = os.path.join(
os.path.expandvars(hive), "PuTTY", self.EXE_NAME_OFFICIAL)
if os.path.exists(exe_file):
return exe_file
def _autodetect_startmenu(self, exe_name, name_pattern):
known_folders = (
"{625b53c3-ab48-4ec1-ba1f-a1ef4146fc19}", # FOLDERID_StartMenu
"{a4115719-d62e-491d-aa7c-e74b8be3b067}") # FOLDERID_CommonStartMenu
found_link_files = []
for kf_guid in known_folders:
try:
known_dir = kpu.shell_known_folder_path(kf_guid)
found_link_files += [
os.path.join(known_dir, f)
for f in kpu.scan_directory(
known_dir, name_pattern, kpu.ScanFlags.FILES, -1)]
except Exception as exc:
self.dbg(str(exc))
pass
for link_file in found_link_files:
try:
link_props = kpu.read_link(link_file)
if (link_props['target'].lower().endswith(exe_name) and
os.path.exists(link_props['target'])):
return link_props['target']
except Exception as exc:
self.dbg(str(exc))
pass
return None
def _autodetect_path(self, exe_name):
path_dirs = [
os.path.expandvars(p.strip())
for p in os.getenv("PATH", "").split(";") if p.strip() ]
for path_dir in path_dirs:
exe_file = os.path.join(path_dir, exe_name)
if os.path.exists(exe_file):
return exe_file
return None
| 37.902736 | 141 | 0.557979 | 12,296 | 0.986047 | 0 | 0 | 0 | 0 | 0 | 0 | 2,502 | 0.200642 |
1b83893f087fa124b92468cb84f5165e0e3c0d9a | 3,737 | py | Python | tests/graphical/one_view.py | vishalbelsare/cgpm | 56a481829448bddc9cdfebd42f65023287d5b7c7 | [
"Apache-2.0"
] | 26 | 2016-12-17T10:39:30.000Z | 2020-10-28T14:16:42.000Z | tests/graphical/one_view.py | vishalbelsare/cgpm | 56a481829448bddc9cdfebd42f65023287d5b7c7 | [
"Apache-2.0"
] | 120 | 2016-09-05T20:53:20.000Z | 2021-11-24T18:32:27.000Z | tests/graphical/one_view.py | vishalbelsare/cgpm | 56a481829448bddc9cdfebd42f65023287d5b7c7 | [
"Apache-2.0"
] | 8 | 2017-08-02T23:12:50.000Z | 2021-11-16T07:20:24.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2015-2016 MIT Probabilistic Computing Project
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
from cgpm.crosscat.state import State
from cgpm.utils import config as cu
from cgpm.utils import general as gu
from cgpm.utils import general as gu
from cgpm.utils import test as tu
# Set up the data generation
cctypes, distargs = cu.parse_distargs(
['normal',
'poisson',
'bernoulli',
'categorical(k=4)',
'lognormal',
'exponential',
'beta',
'geometric',
'vonmises'])
T, Zv, Zc = tu.gen_data_table(
200, [1], [[.25, .25, .5]], cctypes, distargs,
[.95]*len(cctypes), rng=gu.gen_rng(10))
state = State(T.T, cctypes=cctypes, distargs=distargs, rng=gu.gen_rng(312))
state.transition(N=10, progress=1)
def test_crash_simulate_joint(state):
state.simulate(-1, [0, 1, 2, 3, 4, 5, 6, 7, 8], N=10)
def test_crash_logpdf_joint(state):
state.logpdf(-1, {0:1, 1:2, 2:1, 3:3, 4:1, 5:10, 6:.4, 7:2, 8:1.8})
def test_crash_simulate_conditional(state):
state.simulate(-1, [1, 4, 5, 6, 7, 8], {0:1, 2:1, 3:3}, None, 10)
def test_crash_logpdf_conditional(state):
state.logpdf(
-1, {1:2, 4:1, 5:10, 6:.4, 7:2, 8:1.8}, {0:1, 2:1, 3:3})
def test_crash_simulate_joint_observed(state):
state.simulate(1, [0, 1, 2, 3, 4, 5, 6, 7, 8], None, None, 10)
def test_crash_logpdf_joint_observed(state):
with pytest.raises(ValueError):
state.logpdf(1, {0:1, 1:2, 2:1, 3:3, 4:1, 5:10, 6:.4, 7:2, 8:1.8})
def test_crash_simulate_conditional_observed(state):
with pytest.raises(ValueError):
state.simulate(1, [1, 4, 5, 6, 7, 8], {0:1, 2:1, 3:3}, None, 10)
def test_crash_logpdf_conditional_observed(state):
with pytest.raises(ValueError):
state.logpdf(
1, {1:2, 4:1, 5:10, 6:.4, 7:2, 8:1.8}, {0:1, 2:1, 3:3})
# Plot!
state.plot()
# Run some solid checks on a complex state.
test_crash_simulate_joint(state)
test_crash_logpdf_joint(state)
test_crash_simulate_conditional(state)
test_crash_logpdf_conditional(state)
test_crash_simulate_joint_observed(state)
test_crash_logpdf_joint_observed(state)
test_crash_simulate_conditional_observed(state)
test_crash_logpdf_conditional_observed(state)
# Joint equals chain rule for state 1.
joint = state.logpdf(-1, {0:1, 1:2})
chain = state.logpdf(-1, {0:1}, {1:2}) + state.logpdf(-1, {1:2})
assert np.allclose(joint, chain)
if False:
state2 = State(T.T, cctypes=cctypes, distargs=distargs, rng=gu.gen_rng(12))
state2.transition(N=10, progress=1)
# Joint equals chain rule for state 2.
state2.logpdf(-1, {0:1, 1:2})
state2.logpdf(-1, {0:1}, {1:2}) + state2.logpdf(-1, {1:2})
# Take the Monte Carlo average of the conditional.
mc_conditional = np.log(.5) + gu.logsumexp([
state.logpdf(-1, {0:1}, {1:2}),
state2.logpdf(-1, {0:1}, {1:2})
])
# Take the Monte Carlo average of the joint.
mc_joint = np.log(.5) + gu.logsumexp([
state.logpdf(-1, {0:1, 1:2}),
state2.logpdf(-1, {0:1, 1:2})
])
# Take the Monte Carlo average of the marginal.
mc_marginal = np.log(.5) + gu.logsumexp([
state.logpdf(-1, {1:2}),
state2.logpdf(-1, {1:2})
])
| 31.940171 | 79 | 0.666042 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,006 | 0.2692 |
1b841323151f94cecfae15579531c3557720f55a | 3,401 | py | Python | app.py | Tokky-tane/todo_server | 08d4127cf31d9dd387abdfdc7545ead21e5d74ff | [
"Apache-2.0"
] | null | null | null | app.py | Tokky-tane/todo_server | 08d4127cf31d9dd387abdfdc7545ead21e5d74ff | [
"Apache-2.0"
] | null | null | null | app.py | Tokky-tane/todo_server | 08d4127cf31d9dd387abdfdc7545ead21e5d74ff | [
"Apache-2.0"
] | null | null | null | import os
import datetime
import dateutil.parser
from flask import Flask, request, Response
from flask_api import status
from database import close_db
from crud_task import get_all_tasks, delete_all_tasks, create_task, delete_user_tasks, get_user_tasks, delete_task, get_task, update_task, exist_task
import firebase_admin
from firebase_admin import credentials, auth
from dotenv import find_dotenv, load_dotenv
app = Flask(__name__)
app.teardown_appcontext(close_db)
load_dotenv(find_dotenv())
cred = credentials.Certificate({
"type": os.environ['FIREBASE_TYPE'],
"project_id": os.environ['FIREBASE_PROJECT_ID'],
"private_key_id": os.environ['FIREBASE_PRIVATE_KEY_ID'],
"private_key": os.environ['FIREBASE_PRIVATE_KEY'].replace('\\n', '\n'),
"client_email": os.environ['FIREBASE_CLIENT_EMAIL'],
"client_id": os.environ['FIREBASE_CLIENT_ID'],
"auth_uri": os.environ['FIREBASE_AUTH_URI'],
"token_uri": os.environ['FIREBASE_TOKEN_URI'],
"auth_provider_x509_cert_url": os.environ['FIREBASE_AUTH_PROVIDER_CERT_URL'],
"client_x509_cert_url": os.environ['FIREBASE_CLIENT_CERT_URL']
})
firebase_admin.initialize_app(cred)
@app.route('/users/me/tasks', methods=['GET', 'POST', 'DELETE'])
def route_users_tasks():
token = request.headers.get('Authorization')
try:
user_id = auth.verify_id_token(token)['uid']
except ValueError:
return '', status.HTTP_401_UNAUTHORIZED
except auth.AuthError:
return '', status.HTTP_401_UNAUTHORIZED
if request.method == 'GET':
updated_at_min = request.args.get('updated_at_min')
datetime_updated_at_min = None
if(updated_at_min is not None):
datetime_updated_at_min = dateutil.parser.isoparse(updated_at_min)
tasks = get_user_tasks(user_id, datetime_updated_at_min)
response = Response(mimetype='applicaiotn/json')
response.set_data(tasks)
return response
elif request.method == 'POST':
title = request.json['title']
due_date = request.json['due_date']
if(due_date is not None):
due_date = dateutil.parser.isoparse(due_date)
id = create_task(user_id, title, due_date)
location = '/users/me/tasks/{}'.format(id)
response = Response(status=status.HTTP_201_CREATED)
response.headers['location'] = location
return response
else:
delete_user_tasks(user_id)
return Response(status=status.HTTP_204_NO_CONTENT)
@app.route('/users/me/tasks/<int:task_id>', methods=['GET', 'PUT', 'DELETE'])
def route_task(task_id):
token = request.headers.get('Authorization')
try:
auth.verify_id_token(token)
except ValueError:
return '', status.HTTP_401_UNAUTHORIZED
except auth.AuthError:
return '', status.HTTP_401_UNAUTHORIZED
if exist_task(task_id) == False:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'GET':
task = get_task(task_id)
response = Response(mimetype='application/json')
response.set_data(task)
return response
elif request.method == 'PUT':
title = request.json['title']
due_date = request.json['due_date']
update_task(task_id, title, due_date)
return '', status.HTTP_200_OK
else:
delete_task(task_id)
return '', status.HTTP_204_NO_CONTENT
| 32.701923 | 149 | 0.69656 | 0 | 0 | 0 | 0 | 2,238 | 0.658042 | 0 | 0 | 646 | 0.189944 |
1b844fc8072efe4493670c7cec4e521cf36cb60d | 927 | py | Python | BestMovies/TMDBClient.py | brunoluizcs/fiap | 1c42eba68dddd81a73c07584b7e52a2801e73fc0 | [
"Apache-2.0"
] | null | null | null | BestMovies/TMDBClient.py | brunoluizcs/fiap | 1c42eba68dddd81a73c07584b7e52a2801e73fc0 | [
"Apache-2.0"
] | null | null | null | BestMovies/TMDBClient.py | brunoluizcs/fiap | 1c42eba68dddd81a73c07584b7e52a2801e73fc0 | [
"Apache-2.0"
] | null | null | null | import requests
class TMDBClient:
BASE_URL = "https://api.themoviedb.org/3/discover/movie"
API_KEY = ""
def request_best_movies_from_year(self, year):
params = {
'language': 'pt-BR',
'primary_release_year': year,
'sort_by': 'vote_average.desc',
'api_key': self.API_KEY,
'vote_count.gte': 100
}
r = requests.get(self.BASE_URL, params)
data = r.json()
movies = []
for d in data["results"]:
movie = {
'id': d["id"],
'title': d["title"],
'original_title': d["original_title"],
'release_date': d["release_date"],
'overview': d["overview"],
'vote_average': d["vote_average"],
'vote_count': d["vote_count"]
}
movies.append(movie)
return movies
| 22.071429 | 60 | 0.482201 | 899 | 0.969795 | 0 | 0 | 0 | 0 | 0 | 0 | 302 | 0.325782 |
1b84fddae5fbb8ef03fd12251b718e4f86816d5a | 746 | py | Python | segme/model/f3_net/tests/test_decoder.py | shkarupa-alex/segme | d5bc0043f9e709c8ccaf8949d662bc6fd6144006 | [
"MIT"
] | 2 | 2021-05-25T18:53:00.000Z | 2021-05-26T12:11:41.000Z | segme/model/f3_net/tests/test_decoder.py | shkarupa-alex/segme | d5bc0043f9e709c8ccaf8949d662bc6fd6144006 | [
"MIT"
] | null | null | null | segme/model/f3_net/tests/test_decoder.py | shkarupa-alex/segme | d5bc0043f9e709c8ccaf8949d662bc6fd6144006 | [
"MIT"
] | 2 | 2021-11-21T02:39:37.000Z | 2021-12-08T07:26:56.000Z | import tensorflow as tf
from keras import keras_parameterized
from ..decoder import Decoder
from ....testing_utils import layer_multi_io_test
@keras_parameterized.run_all_keras_modes
class TestDecoder(keras_parameterized.TestCase):
def test_layer(self):
layer_multi_io_test(
Decoder,
kwargs={'refine': False, 'filters': 7},
input_shapes=[(2, 32, 32, 3), (2, 16, 16, 4), (2, 8, 8, 5), (2, 4, 4, 6)],
input_dtypes=['float32'] * 4,
expected_output_shapes=[
(None, 32, 32, 7), (None, 16, 16, 7), (None, 8, 8, 7), (None, 4, 4, 6), (None, 32, 32, 7)],
expected_output_dtypes=['float32'] * 5
)
if __name__ == '__main__':
tf.test.main()
| 31.083333 | 107 | 0.592493 | 512 | 0.686327 | 0 | 0 | 553 | 0.741287 | 0 | 0 | 45 | 0.060322 |
1b850dbfb0c60c1aeafce298fa429a15013488c3 | 20,047 | py | Python | PassPY0.2Win.py | IvaldiS6/PassPYWin | 25ffc73392166fd85a8590866c5b65fd0aece72c | [
"MIT"
] | null | null | null | PassPY0.2Win.py | IvaldiS6/PassPYWin | 25ffc73392166fd85a8590866c5b65fd0aece72c | [
"MIT"
] | 1 | 2021-09-15T20:02:36.000Z | 2021-09-15T20:02:36.000Z | PassPY0.2Win.py | IvaldiS6/PassPYWin | 25ffc73392166fd85a8590866c5b65fd0aece72c | [
"MIT"
] | 1 | 2021-09-15T21:49:40.000Z | 2021-09-15T21:49:40.000Z | import codecs
import os
import os.path
import string
import random
from random import shuffle
import csv
import time
import hashlib
import struct
import binascii
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
DefaultSize = "mode con: cols=100 lines=20"
os.system(DefaultSize)
pre = "C:\ProgramData\PassPY"
if not os.path.exists(pre):
os.makedirs(pre)
account = ""
cypher = ""
username = ""
user_name = ""
m = ""
def clrscr():
# Check if Operating System is Mac and Linux or Windows
if os.name == 'posix':
_ = os.system('clear')
else:
# Else Operating System is Windows (os.name = nt)
_ = os.system('cls')
def logo():
print("________ __________ __\n___ __ \_____ _________________ __ \ \/ /\n__ /_/ / __ `/_ ___/_ ___/_ /_/ /_ / \n_ ____// /_/ /_(__ )_(__ )_ ____/_ / \n/_/ \__,_/ /____/ /____/ /_/ /_/ \n\n\n\n\n")
def header():
clrscr()
os.system(DefaultSize)
logo()
def PassGen(user_name,acc,uN,pre):
header()
x = ''
x = input("1: Have PassPY generate a password with a length you choose for " + acc + "\n2: Type your own password for " + acc + "\n")
if x == '1':
header()
length = float(input("How many characters would you like the password to be for " + acc + "? \n"))
div = int(length/3)
r = int(length%3)
seed = string.ascii_letters # Generating letters
letters = ( ''. join(random.choice(seed) for i in range(div)) )
seed = string.digits # generating digits
numbers = ( ''.join(random.choice(seed) for i in range(div)) )
seed = string.punctuation # generating punctuation
punctuation = ( ''.join(random.choice(seed) for i in range(div + r)) )
hold = letters + numbers + punctuation
pW = ( ''.join(random.sample(hold, len(hold))))
print("here is the generated password: " + pW)
preKey = acc + uN + pW
lineHash = hashlib.sha256(preKey.encode('utf-8'))
half = hashlib.sha256(user_name.encode('utf-8')).hexdigest()
lineHashHexidecimal = lineHash.hexdigest()
smosh = hashlib.sha256(bytes(half + lineHashHexidecimal, 'utf8'))
key = smosh.digest()
iv = bytes(int(len(key)/2))
acc = bytes(acc, 'utf8')
uN = bytes(uN, 'utf8')
pW = bytes(pW, 'utf8')
cipher = Cipher(algorithms.AES(key), modes.CTR(iv))
encryptor = cipher.encryptor()
uN = encryptor.update(uN) + encryptor.finalize()
uN = bytes.hex(uN)
encryptor = cipher.encryptor()
acc = encryptor.update(acc) + encryptor.finalize()
acc = bytes.hex(acc)
encryptor = cipher.encryptor()
pW = encryptor.update(pW) + encryptor.finalize()
pW = bytes.hex(pW)
lineEncrypted = bytes(acc + uN + pW, 'utf8')
lineChecksum = hashlib.sha256(lineEncrypted).hexdigest()
newline = acc + "\t" + uN + "\t" + pW + "\t" + str(lineHashHexidecimal) + "\t" + str(lineChecksum) + "\n"
post = user_name + "50" + ".passpy"
location = os.path.join(pre, post)
with open(location, "a", newline="\n") as filea:
filea.write(newline + "\n")
input("press Enter once the password is memorized (dont worry if you forget, it was saved in your password directory.)\n")
MainMenu(user_name)
elif x == '2':
header()
pW = input("Type the password for " + acc + ", then press Enter: \n")
preKey = acc + uN + pW
lineHash = hashlib.sha256(preKey.encode('utf-8'))
half = hashlib.sha256(user_name.encode('utf-8')).hexdigest()
lineHashHexidecimal = lineHash.hexdigest()
smosh = hashlib.sha256(bytes(half + lineHashHexidecimal, 'utf8'))
key = smosh.digest()
iv = bytes(int(len(key)/2))
acc = bytes(acc, 'utf8')
uN = bytes(uN, 'utf8')
pW = bytes(pW, 'utf8')
cipher = Cipher(algorithms.AES(key), modes.CTR(iv))
smosh = ''
key = ''
iv = ''
encryptor = cipher.encryptor()
uN = encryptor.update(uN) + encryptor.finalize()
uN = bytes.hex(uN)
encryptor = cipher.encryptor()
acc = encryptor.update(acc) + encryptor.finalize()
acc = bytes.hex(acc)
encryptor = cipher.encryptor()
pW = encryptor.update(pW) + encryptor.finalize()
pW = bytes.hex(pW)
lineEncrypted = bytes(acc + uN + pW, 'utf8')
lineChecksum = hashlib.sha256(lineEncrypted).hexdigest()
newline = acc + "\t" + uN + "\t" + pW + "\t" + str(lineHashHexidecimal) + "\t" + str(lineChecksum) + "\n"
post = user_name + "50" + ".passpy"
location = os.path.join(pre, post)
with open(location, "a", newline="\n") as filea:
filea.write(newline)
MainMenu(user_name)
else:
PassGen(user_name,acc,uN,pre)
def Signin(pre):
header()
user_name = input("Enter Username: ").encode("utf-8").hex()
if user_name == "":
input("Press enter to returnt to the Sign In screen and enter a user name\n")
Signin(pre)
nametest2 = user_name + "4c" + ".passpy"
location = os.path.join(pre, nametest2)
try: #check to see if the account exists
usersearch = open(location,"r") #search for user's password file
lst = list(usersearch.readlines())
confirm = lst[-1]
print("Hello " + str(codecs.decode(user_name, "hex"), "utf-8"))
password = input("Enter Password: ").encode("utf-8").hex()
s(user_name,password)
compare = line
if compare == confirm:
print("Access Granted")
MainMenu(user_name)
else:
print("Access Denied")
Signin(pre)
except FileNotFoundError:
header()
print("Username not found!")
input("please press enter to continue")
Login(pre)
def AddEntry(user_name,pre):
header()
acc = input("what account is this password for? (e.g. GitHub)\n")
uN = input("What is the username for " + acc + "?\n")
PassGen(user_name,acc,uN,pre)
print("Done!")
def PasswordSearch(user_name,pre):
c = ""
header()
post = user_name + "50" + ".passpy"
location = os.path.join(pre, post)
half = hashlib.sha256(user_name.encode('utf-8')).hexdigest()
SearchColumn = input("Password Search Menu:\nPress 1 to show all passwords\nPress 2 to search by account\nAll of the following options will NOT work!\nPress 3 to search by username\nPress 4 to search by password\nPress 5 to return to the Main Menu\n ")
try: #make sure there is a password file to search through
with open(location) as csv_file:
csv_reader = csv.reader(csv_file, delimiter="\t")
next(csv_reader)
if SearchColumn == '1':
header()
print("Here are all of the stored passwords: ")
for row in csv_reader: # !!!START HERE!!! Decrypt single item line by line
smosh = hashlib.sha256(bytes(half + str(row[3]), 'utf8'))
key = smosh.digest()
iv = bytes(int(len(key)/2))
cipher = Cipher(algorithms.AES(key), modes.CTR(iv))
decryptor = cipher.decryptor()
bEntry = bytes.fromhex(str(row[2]).lower())
bct = str(decryptor.update(bEntry), "utf8")
print(bct)
input("Press Enter to continue to the Main Menu")
MainMenu(user_name)
elif SearchColumn == '2':
header()
search = bytes(input("What Account are you looking for? \n"), 'utf8')
for row in csv_reader:
half = hashlib.sha256(user_name.encode('utf-8')).hexdigest()
smosh = hashlib.sha256(bytes(half + str(row[3]), 'utf8'))
key = smosh.digest()
iv = bytes(int(len(key)/2))
cipher = Cipher(algorithms.AES(key), modes.CTR(iv))
decryptor = cipher.decryptor()
encryptor = cipher.encryptor()
sup = encryptor.update(search) + encryptor.finalize()
sable = bytes.hex(sup)
if sable == row[0]:
decryptor = cipher.decryptor()
a = bytes.fromhex(str(row[0]).lower())
a = str(decryptor.update(a), "utf8")
decryptor = cipher.decryptor()
u = bytes.fromhex(str(row[1]).lower())
u = str(decryptor.update(u), "utf8")
decryptor = cipher.decryptor()
p = bytes.fromhex(str(row[2]).lower())
p = str(decryptor.update(p), "utf8")
header()
c = input("The Account, Username and Password information for " + a + " are:\n\nAccount------" + a + "\nUser Name----" + u + "\nPassword-----" + p + "\n\nEnter 1 if you want to copy the password to the clipboard\nEnter 2 if you want to continue searching\n")
if c == '1':
target = p
header()
print("The Account, Username and Password information for " + a + " are:\n\nAccount------" + a + "\nUser Name----" + u + "\nPassword-----" + p + "\n")
Clipboard(target)
MainMenu(user_name)
elif c == '2':
print("Password NOT copied, continuing to search")
time.sleep(2)
continue
else:
print("Returning to the Main Menu")
time.sleep(1)
MainMenu(user_name)
MainMenu(user_name)
elif SearchColumn == '3':
header()
search = bytes(input("What Username are you looking for? \n"), 'utf8')
for row in csv_reader:
half = hashlib.sha256(user_name.encode('utf-8')).hexdigest()
smosh = hashlib.sha256(bytes(half + str(row[3]), 'utf8'))
key = smosh.digest()
iv = bytes(int(len(key)/2))
cipher = Cipher(algorithms.AES(key), modes.CTR(iv))
decryptor = cipher.decryptor()
encryptor = cipher.encryptor()
sup = encryptor.update(search) + encryptor.finalize()
sable = bytes.hex(sup)
if sable == row[1]:
decryptor = cipher.decryptor()
a = bytes.fromhex(str(row[0]).lower())
a = str(decryptor.update(a), "utf8")
decryptor = cipher.decryptor()
u = bytes.fromhex(str(row[1]).lower())
u = str(decryptor.update(u), "utf8")
decryptor = cipher.decryptor()
p = bytes.fromhex(str(row[2]).lower())
p = str(decryptor.update(p), "utf8")
header()
c = input("The Account, Username and Password information for " + a + " are:\n\nAccount------" + a + "\nUser Name----" + u + "\nPassword-----" + p + "\n\nEnter 1 if you want to copy the password to the clipboard\nEnter 2 if you do not\n")
if c == '1':
target = p
header()
print("The Account, Username and Password information for " + a + " are:\n\nAccount------" + a + "\nUser Name----" + u + "\nPassword-----" + p + "\n")
Clipboard(target)
MainMenu(user_name)
elif c == '2':
input("Password NOT copied, Press enter to return to continue searching")
continue
else:
input("Password NOT copied, Press enter to return to the Main Menu")
MainMenu(user_name)
continue
MainMenu(user_name)
elif SearchColumn == '4':
header()
search = bytes(input("What password are you looking for? \n"), 'utf8')
for row in csv_reader:
half = hashlib.sha256(user_name.encode('utf-8')).hexdigest()
smosh = hashlib.sha256(bytes(half + str(row[3]), 'utf8'))
key = smosh.digest()
iv = bytes(int(len(key)/2))
cipher = Cipher(algorithms.AES(key), modes.CTR(iv))
decryptor = cipher.decryptor()
encryptor = cipher.encryptor()
sup = encryptor.update(search) + encryptor.finalize()
sable = bytes.hex(sup)
if sable == row[2]:
decryptor = cipher.decryptor()
a = bytes.fromhex(str(row[0]).lower())
a = str(decryptor.update(a), "utf8")
decryptor = cipher.decryptor()
u = bytes.fromhex(str(row[1]).lower())
u = str(decryptor.update(u), "utf8")
decryptor = cipher.decryptor()
p = bytes.fromhex(str(row[2]).lower())
p = str(decryptor.update(p), "utf8")
header()
c = input("The Account, Username and Password information for " + a + " are:\n\nAccount------" + a + "\nUser Name----" + u + "\nPassword-----" + p + "\n\nEnter 1 if you want to copy the password to the clipboard\nEnter 2 if you do not\n")
if c == '1':
target = p
header()
print("The Account, Username and Password information for " + a + " are:\n\nAccount------" + a + "\nUser Name----" + u + "\nPassword-----" + p + "\n")
Clipboard(target)
MainMenu(user_name)
elif c == '2':
input("Password NOT copied, Press enter to return to continue")
continue
else:
input("Password NOT copied, Press enter to return to the Main Menu")
MainMenu(user_name)
continue
MainMenu(user_name)
elif SearchColumn == '5':
MainMenu(user_name)
else:
m = input("enter 1, 2, 3 or 4:\n")
PasswordSearch(user_name,pre)
MainMenu(user_name)
except FileNotFoundError:
header()
print("Please register some passwords for me to search through.")
input("please press enter to continue")
MainMenu(user_name)
def Clipboard(target):
command = 'echo ' + target.strip() + '| clip'
os.system(command)
time.sleep(1)
print("The clipboard will be cleared in 5 seconds")
time.sleep(1)
print("The clipboard will be cleared in 4 seconds")
time.sleep(1)
print("The clipboard will be cleared in 3 seconds")
time.sleep(1)
print("The clipboard will be cleared in 2 seconds")
time.sleep(1)
print("The clipboard will be cleared in 1 seconds")
time.sleep(1)
print("The clipboard will be cleared now")
os.system("echo.| clip")
def MainMenu(user_name):
header()
print("Menu:\n 1: New password - register new password\n 2: List - show passwords\n 3: Exit")
menu = input("Enter a number:\n")
if menu == '1':
AddEntry(user_name,pre)
elif menu == '2':
PasswordSearch(user_name,pre)
elif menu == '3':
clrscr()
exit()
elif menu == '':
MainMenu(user_name)
else:
MainMenu(user_name)
def s(user_name,password):
uhold = hashlib.sha256(user_name.encode('utf-8')).hexdigest()
phold = hashlib.sha256(password.encode('utf-8')).hexdigest()
for i in uhold:
if i.isdigit():
ucount = i
break
for i in phold:
if i.isdigit():
pcount = i
break
if int(pcount) % 2 == 0:
global line
line = uhold * int(pcount) + phold * int(ucount)
else:
line = phold * int(pcount) + uhold * int(pcount)
line = hashlib.sha256(line.encode('utf-8')).hexdigest()
def Register(pre):
header()
user_name = input("Enter Username: ").encode("utf-8").hex()
if user_name == "":
input("Press enter to return to the Sign In screen and enter a user name\n")
Register(pre)
nametest1 = user_name + "4c" + ".passpy"
location = os.path.join(pre, nametest1)
try:
usersearch = open(location) #search for user's password file
usersearch.close()
header()
print("User name not available")
input("Press Enter to try again: ")
Register(pre)
except FileNotFoundError:
header()
print("User name is available")
with open(location,"a") as create: #create user's password file
password = input("enter desired password:\n").encode("utf-8").hex()
while password == "":
header()
password = input("An empty password is not useful\nPlease enter desired password:\n")
s(user_name,password)
create.write(line)
second = user_name + "50" + ".passpy"
location = os.path.join(pre, second)
with open(location, "a", newline="\n") as create:
first = "count"
b = "0"
third = "empty"
hold = first + b + third
fourth = hashlib.sha256(bytes(hold, 'utf8')).hexdigest()
half = hashlib.sha256(user_name.encode('utf-8')).hexdigest()
smosh = hashlib.sha256(bytes(half + hold, 'utf8'))
key = smosh.digest()
iv = bytes(int(len(key)/2))
cipher = Cipher(algorithms.AES(key), modes.CTR(iv))
encryptor = cipher.encryptor()
first = bytes(first, 'utf8')
first = bytes.hex(encryptor.update(first) + encryptor.finalize())
encryptor = cipher.encryptor()
b = bytes(b, 'utf8')
b = bytes.hex(encryptor.update(b) + encryptor.finalize())
encryptor = cipher.encryptor()
third = bytes(third, 'utf8')
third = bytes.hex(encryptor.update(third) + encryptor.finalize())
hold = bytes(first + b + third, "utf8")
fifth = hashlib.sha256(hold).hexdigest()
firstLine = first + "\t" + b + "\t" + third + "\t" + fourth + "\t" + fifth + "\n"
create.write(firstLine)
header()
input("Done! \nNew account created!\nWelcome!")
MainMenu(user_name)
def Login(pre):
header()
print("Welcome!\n 1: New users - register your account\n 2: Existing users - log in\n 3: Exit - close the application.")
login = input("Enter a number:\n")
if login == '1':
Register(pre)
elif login == '2':
Signin(pre)
elif login == '3':
clrscr()
exit()
else:
Login(pre)
# Startup Phase
header()
print("Welcome to PassPY, the python based, opensource password storage\nIf you like PassPY, share it with a friend github.com/kayakers6/passpy\nIf you love PassPY, BTC: bc1qsqc3v2jt3lh0kq9addf4gu6e2uq5vxxfk35pl\n SNX: 0x05E8813B7dc3c4e039D898CB13f21A6E4d675bc1")
start = input("Press ENTER to start")
Login(pre)
| 42.562633 | 284 | 0.521874 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,937 | 0.246271 |
1b87d6becc53e7c2b04088a17b003a62df6d1a55 | 291 | py | Python | cloudtunes-server/cloudtunes/async.py | skymemoryGit/cloudtunes | 424bf59e05663b0df20e16ddbd119eb39f6a729b | [
"BSD-3-Clause"
] | 529 | 2015-01-01T04:59:12.000Z | 2022-03-31T16:09:38.000Z | cloudtunes-server/cloudtunes/async.py | skymemoryGit/cloudtunes | 424bf59e05663b0df20e16ddbd119eb39f6a729b | [
"BSD-3-Clause"
] | 21 | 2015-01-13T15:41:15.000Z | 2021-11-06T20:56:40.000Z | cloudtunes-server/cloudtunes/async.py | skymemoryGit/cloudtunes | 424bf59e05663b0df20e16ddbd119eb39f6a729b | [
"BSD-3-Clause"
] | 116 | 2015-01-04T22:12:56.000Z | 2022-02-23T11:43:01.000Z | """Asynchronous MongoDB and Redis connections."""
from functools import partial
import motor
import tornadoredis
from cloudtunes import settings
RedisClient = partial(tornadoredis.Client, **settings.REDIS)
mongo = motor.MotorClient(**settings.MONGODB).cloudtunes
redis = RedisClient()
| 19.4 | 60 | 0.797251 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 49 | 0.168385 |
1b884b81a434124c2e9b5b7bec2f3d3b1a796257 | 183 | py | Python | output/models/saxon_data/id/id008_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | 1 | 2021-08-14T17:59:21.000Z | 2021-08-14T17:59:21.000Z | output/models/saxon_data/id/id008_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | 4 | 2020-02-12T21:30:44.000Z | 2020-04-15T20:06:46.000Z | output/models/saxon_data/id/id008_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | null | null | null | from output.models.saxon_data.id.id008_xsd.id008 import (
Doc,
Node,
PseudoId,
PseudoIdref,
)
__all__ = [
"Doc",
"Node",
"PseudoId",
"PseudoIdref",
]
| 13.071429 | 57 | 0.584699 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 34 | 0.185792 |
1b89f8f65882ad931575298d75654e28396d40ac | 1,541 | py | Python | scripts/convert_ts_model_to_onnx.py | SakodaShintaro/Miacis | af3508076660cc6e19186f17fa436499e32164f5 | [
"BSD-3-Clause"
] | 10 | 2019-05-14T12:54:49.000Z | 2022-02-28T12:02:52.000Z | scripts/convert_ts_model_to_onnx.py | SakodaShintaro/Miacis | af3508076660cc6e19186f17fa436499e32164f5 | [
"BSD-3-Clause"
] | null | null | null | scripts/convert_ts_model_to_onnx.py | SakodaShintaro/Miacis | af3508076660cc6e19186f17fa436499e32164f5 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
import os
from generate_cnn_model import *
from generate_transformer_model import *
parser = argparse.ArgumentParser()
parser.add_argument("model_path", type=str)
parser.add_argument("--batch_size", type=int, default=128)
args = parser.parse_args()
input_channel_num = 42
board_size = 9
policy_channel_num = 27
input_tensor = torch.randn([args.batch_size, input_channel_num, board_size, board_size]).cuda()
script_model = torch.jit.load(args.model_path)
filename = os.path.splitext(os.path.basename(args.model_path))[0]
parts = filename.split("_")
block_num = None
channel_num = None
for part in parts:
if "bl" in part:
block_num = int(part.replace("bl", ""))
if "ch" in part:
channel_num = int(part.replace("ch", ""))
print(f"block_num = {block_num}, channel_num = {channel_num}")
model = None
if "transformer" in args.model_path:
model = TransformerModel(input_channel_num, block_num=block_num, channel_num=channel_num,
policy_channel_num=policy_channel_num,
board_size=board_size)
else:
model = CategoricalNetwork(input_channel_num, block_num=block_num, channel_num=channel_num,
policy_channel_num=policy_channel_num,
board_size=board_size)
model.load_state_dict(script_model.state_dict())
model.eval()
model.cuda()
save_path = args.model_path.replace(".model", ".onnx")
torch.onnx.export(model, input_tensor, save_path)
print(f"export to {save_path}")
| 31.44898 | 95 | 0.704737 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 178 | 0.115509 |
1b8be8576b611a2c50fd6a04efe0790fe0d5e554 | 1,916 | py | Python | ietf/mailtrigger/migrations/0009_review_sent.py | ekr/ietfdb | 8d936836b0b9ff31cda415b0a423e3f5b33ab695 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 2 | 2021-11-20T03:40:40.000Z | 2021-11-20T03:40:42.000Z | ietf/mailtrigger/migrations/0009_review_sent.py | ekr/ietfdb | 8d936836b0b9ff31cda415b0a423e3f5b33ab695 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | ietf/mailtrigger/migrations/0009_review_sent.py | ekr/ietfdb | 8d936836b0b9ff31cda415b0a423e3f5b33ab695 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def forward(apps, schema_editor):
MailTrigger=apps.get_model('mailtrigger','MailTrigger')
Recipient=apps.get_model('mailtrigger','Recipient')
Recipient.objects.create(
slug='review_team_mail_list',
desc="The review team's email list",
template="{{review_req.team.list_email}}"
)
Recipient.objects.create(
slug='review_doc_group_mail_list',
desc="The working group list for the document being reviewed",
template="{{review_req.doc.group.list_email}}"
)
Recipient.objects.create(
slug='review_doc_all_parties',
desc="The .all alias for the document being reviewed",
template="{% if review_req.doc.type_id == 'draft' %}<{{review_req.doc.name}}.all@ietf.org>{% endif %}"
)
Recipient.objects.create(
slug='ietf_general',
desc="The IETF general discussion list",
template="ietf@ietf.org"
)
annc = MailTrigger.objects.create(
slug='review_completed',
desc='Recipients when an review is completed',
)
annc.to = Recipient.objects.filter(slug__in=['review_team_mail_list',])
annc.cc = Recipient.objects.filter(slug__in=['review_doc_all_parties','review_doc_group_mail_list','ietf_general'])
def reverse(apps, schema_editor):
MailTrigger=apps.get_model('mailtrigger','MailTrigger')
Recipient=apps.get_model('mailtrigger','Recipient')
MailTrigger.objects.filter(slug='review_completed').delete()
Recipient.objects.filter(slug__in=['review_team_mail_list','review_doc_group_mail_list','review_doc_all_parties','ietf_general']).delete()
class Migration(migrations.Migration):
dependencies = [
('mailtrigger', '0008_review_summary_triggers'),
]
operations = [
migrations.RunPython(forward, reverse)
]
| 33.034483 | 142 | 0.692067 | 196 | 0.102296 | 0 | 0 | 0 | 0 | 0 | 0 | 854 | 0.44572 |
1b8cff6c78d91bb987bc3e4d6107a012b36bc8af | 2,092 | py | Python | utils/configs.py | realeu/ImgBB-Bot | 8e71df093e41690f815051636c91815f2db08ec2 | [
"MIT"
] | null | null | null | utils/configs.py | realeu/ImgBB-Bot | 8e71df093e41690f815051636c91815f2db08ec2 | [
"MIT"
] | null | null | null | utils/configs.py | realeu/ImgBB-Bot | 8e71df093e41690f815051636c91815f2db08ec2 | [
"MIT"
] | null | null | null | import os
import time
class Var(object):
# Get a bot token from botfather
BOT_TOKEN = os.environ.get("BOT_TOKEN", "")
# Get from my.telegram.org
API_ID = int(os.environ.get("API_ID", 12345))
# Get from my.telegram.org
API_HASH = os.environ.get("API_HASH", "")
# To record start time of bot
BOT_START_TIME = time.time()
# You Can Get An API Key From https://api.imgbb.com.
API = os.environ.get("API", None)
OWNER_ID = int(os.environ.get("OWNER_ID", "1453690249"))
BOT_NAME = os.environ.get("BOT_NAME", "ImgBB")
START_PIC = "https://telegra.ph/file/e162f5f8554a9bf66e830.jpg"
HELP_PIC = "https://telegra.ph/file/e162f5f8554a9bf66e830.jpg"
class Tr(object):
START_TEXT = """
👋 Hi {},
I’m **[ImgBB](telegram.me/xImgBBbot)**. I can upload images on **ImgBB.com** & generate shareable link for it!
BTW, do press **Help** for more information about the process.
"""
ABOUT_TEXT = """🤖 **My Name:** [ImgBB](telegram.me/xImgBBbot)
📝 **Language:** [Python 3](https://www.python.org)
📚 **Framework:** [Pyrogram](https://github.com/pyrogram/pyrogram)
📡 **Hosted On:** [Railway](https://railway.app)
👨💻 **Developer:** [𖤍 Λℓσηє 𖤍](t.me/xDune)
👥 **Support Group:** [Marine Support](https://t.me/MarineChats)
📢 **Updates Channel:** [Marine Bots](https://t.me/MarineBots)
"""
HELP_TEXT = """You may have already known my function. As you have seen in the start message, I can upload images on **ImgBB.com** & generate shareable link for it, which can be deleted after a specific time or stay there forever ~ according to your selection...🙃
Steps:
• Post/Forward an image...
• Select an option ~ whether to delete it automatically within the given period or keep it permanently...
• BOOM!💥 Your image is uploaded! You will be provided with a link to view the image, as well as, a link to delete it."""
ERR_TEXT = "⚠️ API Not Found"
ERRTOKEN_TEXT = "😶 The Access Token Provided Has Expired, Revoked, Malformed Or Invalid For Other Reasons. Report this at @MarineBots",
WAIT = "💬 Please Wait !!"
| 31.69697 | 267 | 0.672084 | 2,129 | 0.987019 | 0 | 0 | 0 | 0 | 0 | 0 | 1,669 | 0.77376 |
1b8d1a7f7c749a047b0eddbcadab57bb15964022 | 4,517 | py | Python | unsupervised_meta_learning/_nbdev.py | ojss/c3lr | a018c5a793a2c9eedc3f0fefcca0970f0be35ffc | [
"Apache-2.0"
] | 3 | 2022-02-24T07:02:12.000Z | 2022-03-20T18:33:58.000Z | unsupervised_meta_learning/_nbdev.py | ojss/c3lr | a018c5a793a2c9eedc3f0fefcca0970f0be35ffc | [
"Apache-2.0"
] | null | null | null | unsupervised_meta_learning/_nbdev.py | ojss/c3lr | a018c5a793a2c9eedc3f0fefcca0970f0be35ffc | [
"Apache-2.0"
] | null | null | null | # AUTOGENERATED BY NBDEV! DO NOT EDIT!
__all__ = ["index", "modules", "custom_doc_links", "git_url"]
index = {"c_imshow": "01_nn_utils.ipynb",
"Flatten": "01_nn_utils.ipynb",
"conv3x3": "01_nn_utils.ipynb",
"get_proto_accuracy": "01_nn_utils.ipynb",
"get_accuracy": "02_maml_pl.ipynb",
"collate_task": "01b_data_loaders_pl.ipynb",
"collate_task_batch": "01b_data_loaders_pl.ipynb",
"get_episode_loader": "01b_data_loaders_pl.ipynb",
"UnlabelledDataset": "01b_data_loaders_pl.ipynb",
"get_cub_default_transform": "01b_data_loaders_pl.ipynb",
"get_simCLR_transform": "01b_data_loaders_pl.ipynb",
"get_omniglot_transform": "01b_data_loaders_pl.ipynb",
"get_custom_transform": "01b_data_loaders_pl.ipynb",
"identity_transform": "01b_data_loaders_pl.ipynb",
"UnlabelledDataModule": "01b_data_loaders_pl.ipynb",
"OmniglotDataModule": "01b_data_loaders_pl.ipynb",
"MiniImagenetDataModule": "01b_data_loaders_pl.ipynb",
"cg": "01c_grad_utils.ipynb",
"cat_list_to_tensor": "01c_grad_utils.ipynb",
"reverse_unroll": "01c_grad_utils.ipynb",
"reverse": "01c_grad_utils.ipynb",
"fixed_point": "01c_grad_utils.ipynb",
"CG": "01c_grad_utils.ipynb",
"CG_normaleq": "01c_grad_utils.ipynb",
"neumann": "01c_grad_utils.ipynb",
"exact": "01c_grad_utils.ipynb",
"grd": "01c_grad_utils.ipynb",
"list_dot": "01c_grad_utils.ipynb",
"jvp": "01c_grad_utils.ipynb",
"get_outer_gradients": "01c_grad_utils.ipynb",
"update_tensor_grads": "01c_grad_utils.ipynb",
"grad_unused_zero": "01c_grad_utils.ipynb",
"DifferentiableOptimizer": "01c_grad_utils.ipynb",
"HeavyBall": "01c_grad_utils.ipynb",
"Momentum": "01c_grad_utils.ipynb",
"GradientDescent": "01c_grad_utils.ipynb",
"gd_step": "01c_grad_utils.ipynb",
"heavy_ball_step": "01c_grad_utils.ipynb",
"torch_momentum_step": "01c_grad_utils.ipynb",
"euclidean_distance": "01d_proto_utils.ipynb",
"cosine_similarity": "01d_proto_utils.ipynb",
"get_num_samples": "01d_proto_utils.ipynb",
"get_prototypes": "01d_proto_utils.ipynb",
"prototypical_loss": "01d_proto_utils.ipynb",
"clusterer": "01d_proto_utils.ipynb",
"cluster_diff_loss": "01d_proto_utils.ipynb",
"CNN_4Layer": "01d_proto_utils.ipynb",
"Encoder": "01d_proto_utils.ipynb",
"Decoder": "01d_proto_utils.ipynb",
"CAE": "01d_proto_utils.ipynb",
"Encoder4L": "01d_proto_utils.ipynb",
"Decoder4L": "01d_proto_utils.ipynb",
"Decoder4L4Mini": "01d_proto_utils.ipynb",
"CAE4L": "01d_proto_utils.ipynb",
"get_images_labels_from_dl": "01d_proto_utils.ipynb",
"logger": "02_maml_pl.ipynb",
"ConvolutionalNeuralNetwork": "02_maml_pl.ipynb",
"MAML": "02_maml_pl.ipynb",
"UMTRA": "02_maml_pl.ipynb",
"cg_solve": "02b_iMAML.ipynb",
"iMAML": "02b_iMAML.ipynb",
"PrototypicalNetwork": "03_protonet_pl.ipynb",
"CactusPrototypicalModel": "03_protonet_pl.ipynb",
"ProtoModule": "03_protonet_pl.ipynb",
"Classifier": "03b_ProtoCLR.ipynb",
"get_train_images": "03b_ProtoCLR.ipynb",
"WandbImageCallback": "03b_ProtoCLR.ipynb",
"TensorBoardImageCallback": "03b_ProtoCLR.ipynb",
"ConfidenceIntervalCallback": "03b_ProtoCLR.ipynb",
"UMAPCallback": "03b_ProtoCLR.ipynb",
"UMAPClusteringCallback": "03b_ProtoCLR.ipynb",
"PCACallback": "03b_ProtoCLR.ipynb",
"ProtoCLR": "03b_ProtoCLR.ipynb",
"Partition": "04_cactus.ipynb",
"CactusTaskDataset": "04_cactus.ipynb",
"get_partitions_kmeans": "04_cactus.ipynb",
"DataOpt": "04_cactus.ipynb",
"LoaderOpt": "04_cactus.ipynb",
"load": "04_cactus.ipynb",
"CactusDataModule": "04_cactus.ipynb"}
modules = ["nn_utils.py",
"pl_dataloaders.py",
"hypergrad.py",
"proto_utils.py",
"maml.py",
"imaml.py",
"protonets.py",
"protoclr.py",
"cactus.py"]
doc_url = "https://ojss.github.io/unsupervised_meta_learning/"
git_url = "https://github.com/ojss/unsupervised_meta_learning/tree/main/"
def custom_doc_links(name): return None
| 44.722772 | 73 | 0.636263 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,272 | 0.724375 |
1b8eb8a92908acc22a08cbb4b026df99be610d7a | 9,413 | py | Python | code/applications/qs_predict_probablistic.py | ninamiolane/quicksilver | 1baf251360dadea0afa3daaa09942d9d2d7c71fb | [
"Apache-2.0"
] | 126 | 2017-04-06T03:19:03.000Z | 2022-03-08T07:44:09.000Z | code/applications/qs_predict_probablistic.py | ninamiolane/quicksilver | 1baf251360dadea0afa3daaa09942d9d2d7c71fb | [
"Apache-2.0"
] | 7 | 2017-09-22T01:46:04.000Z | 2021-04-29T07:22:44.000Z | code/applications/qs_predict_probablistic.py | ninamiolane/quicksilver | 1baf251360dadea0afa3daaa09942d9d2d7c71fb | [
"Apache-2.0"
] | 37 | 2017-04-03T17:14:44.000Z | 2022-03-09T16:00:03.000Z | # add LDDMM shooting code into path
import sys
sys.path.append('../vectormomentum/Code/Python');
sys.path.append('../library')
from subprocess import call
import argparse
import os.path
#Add deep learning related libraries
from collections import Counter
import torch
import prediction_network
import util
import numpy as np
from skimage import exposure
#Add LDDMM registration related libraries
# pyca modules
import PyCA.Core as ca
import PyCA.Common as common
#import PyCA.Display as display
# vector momentum modules
# others
import logging
import copy
import math
import registration_methods
#parse command line input
parser = argparse.ArgumentParser(description='Deformation prediction given set of moving and target images.')
requiredNamed = parser.add_argument_group('required named arguments')
requiredNamed.add_argument('--moving-image', nargs='+', required=True, metavar=('m1', 'm2, m3...'),
help='List of moving images, seperated by space.')
requiredNamed.add_argument('--target-image', nargs='+', required=True, metavar=('t1', 't2, t3...'),
help='List of target images, seperated by space.')
requiredNamed.add_argument('--output-prefix', nargs='+', required=True, metavar=('o1', 'o2, o3...'),
help='List of registration output prefixes for every moving/target image pair, seperated by space. Preferred to be a directory (e.g. /some_path/output_dir/)')
parser.add_argument('--samples', type=int, default=50, metavar='N',
help='number of times to sample the network (default: 64)')
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for prediction network (default: 64)')
parser.add_argument('--n-GPU', type=int, default=1, metavar='N',
help='number of GPUs used for prediction (default: 1). For maximum efficiency please set the batch size divisible by the number of GPUs.')
parser.add_argument('--use-CPU-for-shooting', action='store_true', default=False,
help='Use CPU for geodesic shooting. Slow, but saves GPU memory.')
parser.add_argument('--shoot-steps', type=int, default=0, metavar='N',
help='time steps for geodesic shooting. Ignore this option to use the default step size used by the registration model.')
parser.add_argument('--affine-align', action='store_true', default=False,
help='Perform affine registration to align moving and target images to ICBM152 atlas space. Require niftireg.')
parser.add_argument('--histeq', action='store_true', default=False,
help='Perform histogram equalization to the moving and target images.')
parser.add_argument('--atlas', default="../data/atlas/icbm152.nii",
help="Atlas to use for (affine) pre-registration")
parser.add_argument('--prediction-parameter', default='../../network_configs/OASIS_predict_probabilistic.pth.tar',
help="network parameters for the prediction network")
args = parser.parse_args()
# check validity of input arguments from command line
def check_args(args):
# number of input images/output prefix consistency check
n_moving_images = len(args.moving_image)
n_target_images = len(args.target_image)
n_output_prefix = len(args.output_prefix)
if (n_moving_images != n_target_images):
print('The number of moving images is not consistent with the number of target images!')
sys.exit(1)
elif (n_moving_images != n_output_prefix ):
print('The number of output prefix is not consistent with the number of input images!')
sys.exit(1)
# number of GPU check (positive integers)
if (args.n_GPU <= 0):
print('Number of GPUs must be positive!')
sys.exit(1)
# geodesic shooting step check (positive integers)
if (args.shoot_steps < 0):
print('Shooting steps (--shoot-steps) is negative. Using model default step.')
# geodesic shooting step check (positive integers)
if (args.samples < 1):
print('Number of samples (--samples) is smaller than 1. Using model default step.')
#enddef
def create_net(args, network_config):
net_single = prediction_network.net(network_config['network_feature']).cuda();
net_single.load_state_dict(network_config['state_dict'])
if (args.n_GPU > 1) :
device_ids=range(0, args.n_GPU)
net = torch.nn.DataParallel(net_single, device_ids=device_ids).cuda()
else:
net = net_single
net.train()
return net;
#enddef
def preprocess_image(image_pyca, histeq):
image_np = common.AsNPCopy(image_pyca)
nan_mask = np.isnan(image_np)
image_np[nan_mask] = 0
image_np /= np.amax(image_np)
# perform histogram equalization if needed
if histeq:
image_np[image_np != 0] = exposure.equalize_hist(image_np[image_np != 0])
return image_np
#perform deformation prediction
def predict_image(args):
if (args.use_CPU_for_shooting):
mType = ca.MEM_HOST
else:
mType = ca.MEM_DEVICE
# load the prediction network
predict_network_config = torch.load(args.prediction_parameter)
prediction_net = create_net(args, predict_network_config);
batch_size = args.batch_size
patch_size = predict_network_config['patch_size']
input_batch = torch.zeros(batch_size, 2, patch_size, patch_size, patch_size).cuda()
# start prediction
for i in range(0, len(args.moving_image)):
common.Mkdir_p(os.path.dirname(args.output_prefix[i]))
if (args.affine_align):
# Perform affine registration to both moving and target image to the ICBM152 atlas space.
# Registration is done using Niftireg.
call(["reg_aladin",
"-noSym", "-speeeeed", "-ref", args.atlas ,
"-flo", args.moving_image[i],
"-res", args.output_prefix[i]+"moving_affine.nii",
"-aff", args.output_prefix[i]+'moving_affine_transform.txt'])
call(["reg_aladin",
"-noSym", "-speeeeed" ,"-ref", args.atlas ,
"-flo", args.target_image[i],
"-res", args.output_prefix[i]+"target_affine.nii",
"-aff", args.output_prefix[i]+'target_affine_transform.txt'])
moving_image = common.LoadITKImage(args.output_prefix[i]+"moving_affine.nii", mType)
target_image = common.LoadITKImage(args.output_prefix[i]+"target_affine.nii", mType)
else:
moving_image = common.LoadITKImage(args.moving_image[i], mType)
target_image = common.LoadITKImage(args.target_image[i], mType)
#preprocessing of the image
moving_image_np = preprocess_image(moving_image, args.histeq);
target_image_np = preprocess_image(target_image, args.histeq);
grid = moving_image.grid()
moving_image_processed = common.ImFromNPArr(moving_image_np, mType)
target_image_processed = common.ImFromNPArr(target_image_np, mType)
moving_image.setGrid(grid)
target_image.setGrid(grid)
predict_transform_space = False
if 'matlab_t7' in predict_network_config:
predict_transform_space = True
# run actual prediction
prediction_result = util.predict_momentum(moving_image_np, target_image_np, input_batch, batch_size, patch_size, prediction_net, predict_transform_space);
m0 = prediction_result['image_space']
m0_reg = common.FieldFromNPArr(prediction_result['image_space'], mType);
registration_result = registration_methods.geodesic_shooting(moving_image_processed, target_image_processed, m0_reg, args.shoot_steps, mType, predict_network_config)
phi = common.AsNPCopy(registration_result['phiinv'])
phi_square = np.power(phi,2)
for sample_iter in range(1, args.samples):
print(sample_iter)
prediction_result = util.predict_momentum(moving_image_np, target_image_np, input_batch, batch_size, patch_size, prediction_net, predict_transform_space);
m0 += prediction_result['image_space']
m0_reg = common.FieldFromNPArr(prediction_result['image_space'], mType);
registration_result = registration_methods.geodesic_shooting(moving_image_processed, target_image_processed, m0_reg, args.shoot_steps, mType, predict_network_config)
phi += common.AsNPCopy(registration_result['phiinv'])
phi_square += np.power(common.AsNPCopy(registration_result['phiinv']),2)
m0_mean = np.divide(m0, args.samples);
m0_reg = common.FieldFromNPArr(m0_mean, mType);
registration_result = registration_methods.geodesic_shooting(moving_image_processed, target_image_processed, m0_reg, args.shoot_steps, mType, predict_network_config)
phi_mean = registration_result['phiinv']
phi_var = np.divide(phi_square, args.samples) - np.power(np.divide(phi, args.samples), 2)
#save result
common.SaveITKImage(registration_result['I1'], args.output_prefix[i]+"I1.mhd")
common.SaveITKField(phi_mean, args.output_prefix[i]+"phiinv_mean.mhd")
common.SaveITKField(common.FieldFromNPArr(phi_var, mType), args.output_prefix[i]+"phiinv_var.mhd")
#enddef
if __name__ == '__main__':
check_args(args);
predict_image(args)
| 46.369458 | 185 | 0.691809 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,989 | 0.31754 |
1b8fc5e29f8f56408c4c25c653cdf874f3bd78ff | 1,685 | py | Python | exifGPSimplant.py | Bahrd/AppliedPythonology | 65d7bd665eba823c3319c3efdc5a5047ddfa534d | [
"MIT"
] | 4 | 2019-10-11T07:39:49.000Z | 2022-03-01T23:18:57.000Z | exifGPSimplant.py | Bahrd/AppliedPythonology | 65d7bd665eba823c3319c3efdc5a5047ddfa534d | [
"MIT"
] | null | null | null | exifGPSimplant.py | Bahrd/AppliedPythonology | 65d7bd665eba823c3319c3efdc5a5047ddfa534d | [
"MIT"
] | null | null | null | ## EXIF GPS tags hand-crafted modification ('autografts')...
from exif import Image
from sys import argv as names
from math import floor
def dd_GPS_dms(coordinate):
latlonitude = float(coordinate)
degrees = floor(latlonitude)
residuum = (latlonitude - degrees) * 60
minutes = floor(residuum)
seconds = (residuum - minutes) * 60
return (degrees, minutes, seconds)
if(len(names) < 4):
print('USAGE: exifGPSimplant filename latitude [0-360) longitude [0 - 180)')
exit(-1)
else:
(recipient, latitude, longitude) = names[1:4]
with open(recipient, 'rb') as image_file:
img = Image(image_file)
img.gps_latitude = dd_GPS_dms(latitude)
img.gps_longitude = dd_GPS_dms(longitude)
#img.gps_altitude = 1200 # An orphan...
print(img.gps_latitude, img.gps_longitude)
with open(recipient, 'wb') as image_file:
image_file.write(img.get_file())
## Note the GPS tags format
# 34°56'43.386"N 109°46'32.447"W
## Other locations...
# https://www.gps-coordinates.net/gps-coordinates-converter
# Whitewater, CA: 33.923685, -116.640324
# Yosemite Valley: 37° 43′ 18″ N, 119° 38′ 47″ W
# Mocassin (Tuolumne Count, CA): 37° 48′ 39″ N, 120° 18′ 0″ W
# Hollywood Sign Puzzle View: 34°06'18.3"N 118°19'52.0"W
# Hoover Dam: 36° 0′ 56″ N, 114° 44′ 16″ W
# Rainbow Canyon: 36° 21′ 56.88″ N, 117° 30′ 5.4″ W
# Route 66 (AZ): 35°14'15.5"N 113°12'22.6"W
# Las Vegas' Replica of the Statue of Liberty 36°6'3.58"N 115°10'23.029"W
# The Tepees in Petrified Forest 34°56'43.386"N 109°46'32.447"W
# Golden gate & Alcatraz: 37.7764931, 122.5042172
## Shortcuts...
# Target folder: C:\Users\Przem\OneDrive\Images\OOW\Arizona, California & Nevada\Journey, not a destination
| 33.039216 | 107 | 0.695549 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,013 | 0.583189 |
1b90a0ad6a0068ff9c4007e6a35e5372ec92638b | 1,443 | py | Python | RegonAPI/converters.py | damianwasik98/RegonAPI | 0c45363ba7fcfdc7c0938084288575c5b32ec4da | [
"MIT"
] | 10 | 2020-01-17T15:40:16.000Z | 2022-01-29T10:45:21.000Z | RegonAPI/converters.py | damianwasik98/RegonAPI | 0c45363ba7fcfdc7c0938084288575c5b32ec4da | [
"MIT"
] | 4 | 2019-10-25T08:21:26.000Z | 2021-04-19T11:28:30.000Z | RegonAPI/converters.py | damianwasik98/RegonAPI | 0c45363ba7fcfdc7c0938084288575c5b32ec4da | [
"MIT"
] | 3 | 2020-11-23T13:12:26.000Z | 2021-07-23T10:10:46.000Z | """
Converters of codes
"""
from string import digits
from .exceptions import RegonConvertionError
from . import validators
REGON9_WEIGHTS = [8, 9, 2, 3, 4, 5, 6, 7]
REGON14_WEIGHTS = [2, 4, 8, 5, 0, 9, 7, 3, 6, 1, 2, 4, 8]
def regon8_to_9(regon8):
"""Convert REGON8 to REGON9
Parameters
----------
regon8 : str
REGON8
Returns
-------
str
REGON9
Raises
------
RegonConvertionError
If regon8 is not valid
"""
if not validators.is_valid_regon8(regon8):
raise RegonConvertionError(regon8)
a, b = list(regon8), REGON9_WEIGHTS
a = list(map(lambda x: int(x), a))
last_digit = sum(list(map(lambda x: x[0] * x[1], zip(a, b)))) % 11
regon9 = "{regon8}{last_digit}".format(regon8=regon8, last_digit=last_digit)
return regon9
def regon13_to_14(regon13):
"""Convert REGON13 to REGON14
Parameters
----------
regon13 : str
REGON13
Returns
-------
str
REGON14
Raises
------
RegonConvertionError
If regon13 is not valid
"""
if not validators.is_valid_regon13(regon13):
raise RegonConvertionError(regon13)
a, b = list(regon13), REGON14_WEIGHTS
a = list(map(lambda x: int(x), a))
last_digit = sum(list(map(lambda x: x[0] * x[1], zip(a, b)))) % 11
regon14 = "{regon13}{last_digit}".format(regon13=regon13, last_digit=last_digit)
return regon14
| 21.537313 | 84 | 0.600139 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 532 | 0.368676 |
1b9205fa35c003e972162f8ca03dbd40a69ae685 | 1,609 | py | Python | analysis/time_domain_response.py | Joeltronics/audioexperiments | 9f677ce72b0a50c7240ace880603d8e955f270a8 | [
"MIT"
] | 1 | 2021-12-13T03:05:03.000Z | 2021-12-13T03:05:03.000Z | analysis/time_domain_response.py | Joeltronics/audioexperiments | 9f677ce72b0a50c7240ace880603d8e955f270a8 | [
"MIT"
] | null | null | null | analysis/time_domain_response.py | Joeltronics/audioexperiments | 9f677ce72b0a50c7240ace880603d8e955f270a8 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import math
from typing import Iterable, Optional, Tuple, Union
import numpy as np
from analysis import linearity
from utils import utils
from unit_test import unit_test
from processor import ProcessorBase
from generation import signal_generation
def generate_impulse(n_samp, amplitude=1.0) -> np.ndarray:
x = np.zeros(n_samp, dtype=np.float64)
x[0] = amplitude
return x
def generate_step(n_samp, amplitude=1.0) -> np.ndarray:
return np.ones(n_samp) * amplitude
def generate_ramp(n_samp, slope=1.0) -> np.ndarray:
y = (np.arange(n_samp) + 1).astype(np.float64) * slope
assert utils.approx_equal(y[0], slope)
assert utils.approx_equal(y[1], 2*slope)
return y
def get_impulse_response(system, n_samp, amplitude=1.0, reset=True, negative=False) -> np.ndarray:
# Assuming system is LTI & causal, and that system.reset() works as it should,
# we can ignore negative half of impulse/step response, as zero-input will have zero-output
x = generate_impulse(n_samp, amplitude)
if negative:
x = -x
if reset:
system.reset()
return system.process_vector(x)
def get_step_response(system, n_samp, amplitude=1.0, reset=True, negative=False) -> np.ndarray:
x = generate_step(n_samp, amplitude)
if negative:
x = -x
if reset:
system.reset()
return system.process_vector(x)
def get_ramp_response(system, n_samp, slope=1.0, reset=True, negative=False) -> np.ndarray:
x = generate_ramp(n_samp, slope)
if negative:
x = -x
if reset:
system.reset()
return system.process_vector(x)
| 22.985714 | 99 | 0.707272 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 194 | 0.120572 |
1b93212182ff315995c2188f791e82900b2d2157 | 28,488 | py | Python | bach/bach/series/series_datetime.py | objectiv/objectiv-analytics | 86ec1508f71c2d61ea7d67479800e4dc417a46e1 | [
"Apache-2.0"
] | 23 | 2021-11-10T21:37:42.000Z | 2022-03-30T11:46:19.000Z | bach/bach/series/series_datetime.py | objectiv/objectiv-analytics | 86ec1508f71c2d61ea7d67479800e4dc417a46e1 | [
"Apache-2.0"
] | 163 | 2021-11-10T10:11:26.000Z | 2022-03-31T16:04:27.000Z | bach/bach/series/series_datetime.py | objectiv/objectiv-analytics | 86ec1508f71c2d61ea7d67479800e4dc417a46e1 | [
"Apache-2.0"
] | null | null | null | """
Copyright 2021 Objectiv B.V.
"""
import datetime
import warnings
from abc import ABC
from enum import Enum
from typing import Union, cast, List, Tuple, Optional, Any
import numpy
import pandas
from sqlalchemy.engine import Dialect
from bach import DataFrame
from bach.series import Series, SeriesString, SeriesBoolean, SeriesFloat64, SeriesInt64
from bach.expression import Expression, join_expressions
from bach.series.series import WrappedPartition, ToPandasInfo
from bach.series.utils.datetime_formats import parse_c_standard_code_to_postgres_code, \
parse_c_code_to_bigquery_code
from bach.types import DtypeOrAlias, StructuredDtype
from sql_models.constants import DBDialect
from sql_models.util import is_postgres, is_bigquery, DatabaseNotSupportedException
class DatePart(str, Enum):
DAY = 'days'
HOUR = 'hours'
MINUTE = 'minutes'
SECOND = 'seconds'
MILLISECOND = 'milliseconds'
MICROSECOND = 'microseconds'
# conversions for date parts to seconds
# when adjusting intervals, 30-day time periods are represented as months
# BigQuery seems to follow Postgres threshold
# https://www.postgresql.org/docs/current/functions-datetime.html#:~:text=justify_days%20(%20interval%20)%20%E2%86%92%20interval,mon%205%20days
# For example 395 days is equal to 1 year, 1 month and 5 days.
_TOTAL_SECONDS_PER_DATE_PART = {
DatePart.DAY: 24 * 60 * 60,
DatePart.HOUR: 60 * 60,
DatePart.MINUTE: 60,
DatePart.SECOND: 1,
DatePart.MILLISECOND: 1e-3,
DatePart.MICROSECOND: 1e-6,
}
class DateTimeOperation:
def __init__(self, series: 'SeriesAbstractDateTime'):
self._series = series
def sql_format(self, format_str: str) -> SeriesString:
"""
Allow formatting of this Series (to a string type).
:param format_str: The format to apply to the date/time column.
Currently, this uses Postgres' data format string syntax:
https://www.postgresql.org/docs/14/functions-formatting.html
.. warning::
This method is deprecated, we recommend using :meth:`SeriesAbstractDateTime.dt.strftime` instead.
.. code-block:: python
df['year'] = df.some_date_series.dt.sql_format('YYYY') # return year
df['date'] = df.some_date_series.dt.sql_format('YYYYMMDD') # return date
:returns: a SeriesString containing the formatted date.
"""
warnings.warn(
'Call to deprecated method, we recommend to use SeriesAbstractDateTime.dt.strftime instead',
category=DeprecationWarning,
)
expression = Expression.construct('to_char({}, {})',
self._series, Expression.string_value(format_str))
str_series = self._series.copy_override_type(SeriesString).copy_override(expression=expression)
return str_series
def strftime(self, format_str: str) -> SeriesString:
"""
Allow formatting of this Series (to a string type).
:param format_str: The format to apply to the date/time column.
This uses 1989 C standard format codes:
https://docs.python.org/3/library/datetime.html#strftime-and-strptime-format-codes
.. code-block:: python
df['year'] = df.some_date_series.dt.sql_format('%Y') # return year
df['date'] = df.some_date_series.dt.sql_format('%Y%m%d') # return date
:returns: a SeriesString containing the formatted date.
"""
engine = self._series.engine
if is_postgres(engine):
parsed_format_str = parse_c_standard_code_to_postgres_code(format_str)
expression = Expression.construct(
'to_char({}, {})', self._series, Expression.string_value(parsed_format_str),
)
elif is_bigquery(engine):
# BQ uses C Standard Codes
# https://cloud.google.com/bigquery/docs/reference/standard-sql/format-elements#format_elements_date_time
parsed_format_str = parse_c_code_to_bigquery_code(format_str)
expression = Expression.construct(
'format_date({}, {})',
Expression.string_value(parsed_format_str),
self._series,
)
else:
raise DatabaseNotSupportedException(engine)
str_series = self._series.copy_override_type(SeriesString).copy_override(expression=expression)
return str_series
class TimedeltaOperation(DateTimeOperation):
def _get_conversion_df(self) -> 'DataFrame':
"""
generates a dataframe containing the amounts of seconds a supported date part has.
"""
from bach import DataFrame
conversion_df = pandas.DataFrame(
data=[
{
self._format_converted_series_name(dp): ts
for dp, ts in _TOTAL_SECONDS_PER_DATE_PART.items()
},
]
)
convert_df = DataFrame.from_pandas(df=conversion_df, engine=self._series.engine, convert_objects=True)
return convert_df.reset_index(drop=True)
@staticmethod
def _format_converted_series_name(date_part: DatePart) -> str:
return f'_SECONDS_IN_{date_part.name}'
@property
def components(self) -> DataFrame:
"""
:returns: a DataFrame containing all date parts from the timedelta.
"""
df = self.total_seconds.to_frame()
df = df.merge(self._get_conversion_df(), how='cross')
# justifies total seconds into the units of each date component
# after adjustment, it converts it back into seconds
for date_part in DatePart:
converted_series_name = self._format_converted_series_name(DatePart(date_part))
df[f'ts_{date_part}'] = df['total_seconds'] // df[converted_series_name]
df[f'ts_{date_part}'] *= df[converted_series_name]
# materialize to avoid complex subquery
df = df.materialize(node_name='justified_date_components')
components_series_names = []
prev_ts = ''
# extract actual date component from justified seconds
# by getting the difference between current and previous components
# this helps on normalizing negative time deltas and have only negative values
# in days.
for date_part in DatePart:
converted_series_name = self._format_converted_series_name(DatePart(date_part))
component_name = f'{date_part}'
current_ts = f'ts_{date_part}'
if not prev_ts:
df[component_name] = df[current_ts] / df[converted_series_name]
else:
df[component_name] = (df[current_ts] - df[prev_ts]) / df[converted_series_name]
df[component_name] = cast(SeriesFloat64, df[component_name]).round(decimals=0)
components_series_names.append(component_name)
prev_ts = current_ts
return df[components_series_names].astype('int64')
@property
def days(self) -> SeriesInt64:
"""
converts total seconds into days and returns only the integral part of the result
"""
day_series = self.total_seconds // _TOTAL_SECONDS_PER_DATE_PART[DatePart.DAY]
day_series = day_series.astype('int64')
return (
day_series
.copy_override_type(SeriesInt64)
.copy_override(name='days')
)
@property
def seconds(self) -> SeriesInt64:
"""
removes days from total seconds (self.total_seconds % _SECONDS_IN_DAY)
and returns only the integral part of the result
"""
seconds_series = (self.total_seconds % _TOTAL_SECONDS_PER_DATE_PART[DatePart.DAY]) // 1
seconds_series = seconds_series.astype('int64')
return (
seconds_series
.copy_override_type(SeriesInt64)
.copy_override(name='seconds')
)
@property
def microseconds(self) -> SeriesInt64:
"""
considers only the fractional part of the total seconds and converts it into microseconds
"""
microseconds_series = (
(self.total_seconds % 1) / _TOTAL_SECONDS_PER_DATE_PART[DatePart.MICROSECOND]
)
microseconds_series = microseconds_series.astype('int64')
return (
microseconds_series
.copy_override_type(SeriesInt64)
.copy_override(name='microseconds')
)
@property
def total_seconds(self) -> SeriesFloat64:
"""
returns the total amount of seconds in the interval
"""
if not is_bigquery(self._series.engine):
# extract(epoch from source) returns the total number of seconds in the interval
expression = Expression.construct(f'extract(epoch from {{}})', self._series)
else:
# bq cannot extract epoch from interval
expression = Expression.construct(
(
f"UNIX_MICROS(CAST('1970-01-01' AS TIMESTAMP) + {{}}) "
f"* {_TOTAL_SECONDS_PER_DATE_PART[DatePart.MICROSECOND]}"
),
self._series,
)
return (
self._series
.copy_override_type(SeriesFloat64)
.copy_override(name='total_seconds', expression=expression)
)
class SeriesAbstractDateTime(Series, ABC):
"""
A Series that represents the generic date/time type and its specific operations. Selected arithmetic
operations are accepted using the usual operators.
**Date/Time Operations**
On any of the subtypes, you can access date operations through the `dt` accessor.
"""
@property
def dt(self) -> DateTimeOperation:
"""
Get access to date operations.
.. autoclass:: bach.series.series_datetime.DateTimeOperation
:members:
"""
return DateTimeOperation(self)
def _comparator_operation(self, other, comparator,
other_dtypes=('timestamp', 'date', 'time', 'string')) -> 'SeriesBoolean':
return super()._comparator_operation(other, comparator, other_dtypes)
@classmethod
def _cast_to_date_if_dtype_date(cls, series: 'Series') -> 'Series':
# PG returns timestamp in all cases were we expect date
# Make sure we cast properly, and round similar to python datetime: add 12 hours and cast to date
if series.dtype == 'date':
td_12_hours = datetime.timedelta(seconds=3600 * 12)
series_12_hours = SeriesTimedelta.from_value(base=series, value=td_12_hours, name='tmp')
expr_12_hours = series_12_hours.expression
return series.copy_override(
expression=Expression.construct("cast({} + {} as date)", series, expr_12_hours)
)
else:
return series
def dt_strip_timezone(value: Optional[datetime.datetime]) -> Optional[datetime.datetime]:
if value is None:
return None
return value.replace(tzinfo=None)
class SeriesTimestamp(SeriesAbstractDateTime):
"""
A Series that represents the timestamp/datetime type and its specific operations.
Timestamps are assumed to be in UTC, or without a timezone, both cases are treated the same.
These timestamps have a microsecond precision at best, in contrast to numpy's datetime64 which supports
up to attoseconds precision.
**Database support and types**
* Postgres: utilizes the 'timestamp without time zone' database type.
* BigQuery: utilizes the 'TIMESTAMP' database type.
"""
dtype = 'timestamp'
dtype_aliases = ('datetime64', 'datetime64[ns]', numpy.datetime64)
supported_db_dtype = {
DBDialect.POSTGRES: 'timestamp without time zone',
DBDialect.BIGQUERY: 'TIMESTAMP',
}
supported_value_types = (datetime.datetime, numpy.datetime64, datetime.date, str)
@classmethod
def supported_literal_to_expression(cls, dialect: Dialect, literal: Expression) -> Expression:
return Expression.construct(f'cast({{}} as {cls.get_db_dtype(dialect)})', literal)
@classmethod
def supported_value_to_literal(
cls,
dialect: Dialect,
value: Union[datetime.datetime, numpy.datetime64, datetime.date, str, None],
dtype: StructuredDtype
) -> Expression:
if value is None:
return Expression.raw('NULL')
# if value is not a datetime or date, then convert it to datetime first
dt_value: Union[datetime.datetime, datetime.date, None] = None
if isinstance(value, str):
formats = ['%Y-%m-%d %H:%M:%S.%f', '%Y-%m-%d %H:%M:%S', '%Y-%m-%d %H:%M', '%Y-%m-%d']
for format in formats:
try:
dt_value = datetime.datetime.strptime(value, format)
break
except ValueError:
continue
if dt_value is None:
raise ValueError(f'Not a valid timestamp string literal: {value}.'
f'Supported formats: {formats}')
elif isinstance(value, numpy.datetime64):
if numpy.isnat(value):
return Expression.raw('NULL')
# Weird trick: count number of microseconds in datetime, but only works on timedelta, so convert
# to a timedelta first, by subtracting 0 (epoch = 1970-01-01 00:00:00)
# Rounding can be unpredictable because of limited precision, so always truncate excess precision
microseconds = int((value - numpy.datetime64('1970', 'us')) // numpy.timedelta64(1, 'us'))
dt_value = datetime.datetime.utcfromtimestamp(microseconds / 1_000_000)
elif isinstance(value, (datetime.datetime, datetime.date)):
dt_value = value
if dt_value is None:
raise ValueError(f'Not a valid timestamp literal: {value}')
str_value = dt_value.strftime('%Y-%m-%d %H:%M:%S.%f')
return Expression.string_value(str_value)
@classmethod
def dtype_to_expression(cls, dialect: Dialect, source_dtype: str, expression: Expression) -> Expression:
if source_dtype == 'timestamp':
return expression
else:
if source_dtype not in ['string', 'date']:
raise ValueError(f'cannot convert {source_dtype} to timestamp')
return Expression.construct(f'cast({{}} as {cls.get_db_dtype(dialect)})', expression)
def to_pandas_info(self) -> Optional['ToPandasInfo']:
if is_postgres(self.engine):
return ToPandasInfo('datetime64[ns]', None)
if is_bigquery(self.engine):
return ToPandasInfo('datetime64[ns, UTC]', dt_strip_timezone)
return None
def __add__(self, other) -> 'Series':
return self._arithmetic_operation(other, 'add', '({}) + ({})', other_dtypes=tuple(['timedelta']))
def __sub__(self, other) -> 'Series':
type_mapping = {
'timedelta': 'timestamp',
'timestamp': 'timedelta'
}
return self._arithmetic_operation(other, 'sub', '({}) - ({})',
other_dtypes=tuple(type_mapping.keys()),
dtype=type_mapping)
class SeriesDate(SeriesAbstractDateTime):
"""
A Series that represents the date type and its specific operations
**Database support and types**
* Postgres: utilizes the 'date' database type.
* BigQuery: utilizes the 'DATE' database type.
"""
dtype = 'date'
dtype_aliases: Tuple[DtypeOrAlias, ...] = tuple()
supported_db_dtype = {
DBDialect.POSTGRES: 'date',
DBDialect.BIGQUERY: 'DATE'
}
supported_value_types = (datetime.datetime, datetime.date, str)
@classmethod
def supported_literal_to_expression(cls, dialect: Dialect, literal: Expression) -> Expression:
return Expression.construct(f'cast({{}} as date)', literal)
@classmethod
def supported_value_to_literal(
cls,
dialect: Dialect,
value: Union[str, datetime.date],
dtype: StructuredDtype
) -> Expression:
if isinstance(value, datetime.date):
value = str(value)
# TODO: check here already that the string has the correct format
return Expression.string_value(value)
@classmethod
def dtype_to_expression(cls, dialect: Dialect, source_dtype: str, expression: Expression) -> Expression:
if source_dtype == 'date':
return expression
else:
if source_dtype not in ['string', 'timestamp']:
raise ValueError(f'cannot convert {source_dtype} to date')
return Expression.construct(f'cast({{}} as {cls.get_db_dtype(dialect)})', expression)
def __add__(self, other) -> 'Series':
type_mapping = {
'timedelta': 'date' # PG returns timestamp, needs explicit cast to date
}
return self._cast_to_date_if_dtype_date(
self._arithmetic_operation(other, 'add', '({}) + ({})',
other_dtypes=tuple(type_mapping.keys()),
dtype=type_mapping)
)
def __sub__(self, other) -> 'Series':
type_mapping = {
'date': 'timedelta',
'timedelta': 'date', # PG returns timestamp, needs explicit cast to date
}
if other.dtype == 'date':
# PG does unexpected things when doing date - date. Work around that.
fmt_str = 'cast(cast({} as timestamp) - ({}) as interval)'
else:
fmt_str = '({}) - ({})'
return self._cast_to_date_if_dtype_date(
self._arithmetic_operation(other, 'sub', fmt_str,
other_dtypes=tuple(type_mapping.keys()),
dtype=type_mapping)
)
class SeriesTime(SeriesAbstractDateTime):
"""
A Series that represents the date time and its specific operations
**Database support and types**
* Postgres: utilizes the 'time without time zone' database type.
* BigQuery: utilizes the 'TIME' database type.
"""
dtype = 'time'
dtype_aliases: Tuple[DtypeOrAlias, ...] = tuple()
supported_db_dtype = {
DBDialect.POSTGRES: 'time without time zone',
DBDialect.BIGQUERY: 'TIME',
}
supported_value_types = (datetime.time, str)
@classmethod
def supported_literal_to_expression(cls, dialect: Dialect, literal: Expression) -> Expression:
return Expression.construct(f'cast({{}} as {cls.get_db_dtype(dialect)})', literal)
@classmethod
def supported_value_to_literal(
cls,
dialect: Dialect,
value: Union[str, datetime.time],
dtype: StructuredDtype
) -> Expression:
value = str(value)
# TODO: check here already that the string has the correct format
return Expression.string_value(value)
@classmethod
def dtype_to_expression(cls, dialect: Dialect, source_dtype: str, expression: Expression) -> Expression:
if source_dtype == 'time':
return expression
else:
if source_dtype not in ['string', 'timestamp']:
raise ValueError(f'cannot convert {source_dtype} to time')
return Expression.construct(f'cast({{}} as {cls.get_db_dtype(dialect)})', expression)
# python supports no arithmetic on Time
class SeriesTimedelta(SeriesAbstractDateTime):
"""
A Series that represents the timedelta type and its specific operations
**Database support and types**
* Postgres: utilizes the 'interval' database type.
* BigQuery: support coming soon
"""
dtype = 'timedelta'
dtype_aliases = ('interval',)
supported_db_dtype = {
DBDialect.POSTGRES: 'interval',
DBDialect.BIGQUERY: 'INTERVAL',
}
supported_value_types = (datetime.timedelta, numpy.timedelta64, str)
@classmethod
def supported_literal_to_expression(cls, dialect: Dialect, literal: Expression) -> Expression:
return Expression.construct(f'cast({{}} as {cls.get_db_dtype(dialect)})', literal)
@classmethod
def supported_value_to_literal(
cls,
dialect: Dialect,
value: Union[str, numpy.timedelta64, datetime.timedelta],
dtype: StructuredDtype
) -> Expression:
# pandas.Timedelta checks already that the string has the correct format
# round it up to microseconds precision in order to avoid problems with BigQuery
# pandas by default uses nanoseconds precision
value_td = pandas.Timedelta(value).round(freq='us')
if value_td is pandas.NaT:
return Expression.construct('NULL')
# interval values in iso format are allowed in SQL (both BQ and PG)
# https://www.postgresql.org/docs/8.4/datatype-datetime.html#:~:text=interval%20values%20can%20also%20be%20written%20as%20iso%208601%20time%20intervals%2C
return Expression.string_value(value_td.isoformat())
def to_pandas_info(self) -> Optional[ToPandasInfo]:
if is_bigquery(self.engine):
return ToPandasInfo(dtype='object', function=self._parse_interval_bigquery)
return None
def _parse_interval_bigquery(self, value: Optional[Any]) -> Optional[pandas.Timedelta]:
if value is None:
return None
# BigQuery returns a MonthDayNano object
# we need to normalize months to days (1 month == 30 day period)
return pandas.Timedelta(
days=value.days + value.months * 30,
nanoseconds=value.nanoseconds,
)
@classmethod
def dtype_to_expression(cls, dialect: Dialect, source_dtype: str, expression: Expression) -> Expression:
if source_dtype == 'timedelta':
return expression
else:
if not source_dtype == 'string':
raise ValueError(f'cannot convert {source_dtype} to timedelta')
return Expression.construct(f'cast({{}} as {cls.get_db_dtype(dialect)})', expression)
def _comparator_operation(self, other, comparator,
other_dtypes=('timedelta', 'string')) -> SeriesBoolean:
return super()._comparator_operation(other, comparator, other_dtypes)
def __add__(self, other) -> 'Series':
type_mapping = {
'date': 'date', # PG makes this a timestamp
'timedelta': 'timedelta',
'timestamp': 'timestamp'
}
return self._cast_to_date_if_dtype_date(
self._arithmetic_operation(other, 'add', '({}) + ({})',
other_dtypes=tuple(type_mapping.keys()),
dtype=type_mapping))
def __sub__(self, other) -> 'Series':
type_mapping = {
'timedelta': 'timedelta',
}
return self._arithmetic_operation(other, 'sub', '({}) - ({})',
other_dtypes=tuple(type_mapping.keys()),
dtype=type_mapping)
def __mul__(self, other) -> 'Series':
return self._arithmetic_operation(other, 'mul', '({}) * ({})', other_dtypes=('int64', 'float64'))
def __truediv__(self, other) -> 'Series':
return self._arithmetic_operation(other, 'div', '({}) / ({})', other_dtypes=('int64', 'float64'))
@property
def dt(self) -> TimedeltaOperation:
"""
Get access to date operations.
.. autoclass:: bach.series.series_datetime.TimedeltaOperation
:members:
"""
return TimedeltaOperation(self)
def sum(self, partition: WrappedPartition = None,
skipna: bool = True, min_count: int = None) -> 'SeriesTimedelta':
"""
:meta private:
"""
result = self._derived_agg_func(
partition=partition,
expression='sum',
skipna=skipna,
min_count=min_count
)
return result.copy_override_type(SeriesTimedelta)
def mean(self, partition: WrappedPartition = None, skipna: bool = True) -> 'SeriesTimedelta':
"""
:meta private:
"""
result = self._derived_agg_func(
partition=partition,
expression='avg',
skipna=skipna
)
result = result.copy_override_type(SeriesTimedelta)
if is_bigquery(self.engine):
result = result._remove_nano_precision_bigquery()
return result
def _remove_nano_precision_bigquery(self) -> 'SeriesTimedelta':
"""
Helper function that removes nano-precision from intervals.
"""
series = self.copy()
# https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#interval_type
_BQ_INTERVAL_FORMAT = '%d-%d %d %d:%d:%d.%06.0f'
_BQ_SUPPORTED_INTERVAL_PARTS = [
'YEAR', 'MONTH', 'DAY', 'HOUR', 'MINUTE', 'SECOND'
]
# aggregating intervals by average might generate a result with
# nano-precision, which is not supported by BigQuery TimeStamps
# therefore we need to make sure we always generate values up to
# microseconds precision
# https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#timestamp_type
all_extracted_parts_expr = [
Expression.construct(f'EXTRACT({date_part} FROM {{}})', series)
for date_part in _BQ_SUPPORTED_INTERVAL_PARTS
]
# convert nanoseconds to microseconds
all_extracted_parts_expr.append(
Expression.construct(f'EXTRACT(NANOSECOND FROM {{}}) / 1000', series)
)
format_arguments_expr = join_expressions(all_extracted_parts_expr)
# All parts will create a string with following format
# '%d-%d %d %d:%d:%d.%06.0f'
# where the first 6 digits are date parts from YEAR to SECOND
# Format specifier %06.0f will format fractional part of seconds with maximum width of 6 digits
# for example:
# nanoseconds = 1142857, converting them into microseconds is 1142.857
# when applying string formatting, the value will be rounded into 1143 (.0 precision)
# and will be left padded by 2 leading zeros: 001143 (0 flag and 6 minimum width)
# for more information:
# https://cloud.google.com/bigquery/docs/reference/standard-sql/string_functions#format_string
format_expr = Expression.construct(
f'format({{}}, {{}})',
Expression.string_value(_BQ_INTERVAL_FORMAT),
format_arguments_expr,
)
return series.copy_override(
expression=self.dtype_to_expression(
self.engine, source_dtype='string', expression=format_expr,
)
)
def quantile(
self, partition: WrappedPartition = None, q: Union[float, List[float]] = 0.5,
) -> 'SeriesTimedelta':
"""
When q is a float or len(q) == 1, the resultant series index will remain
In case multiple quantiles are calculated, the resultant series index will have all calculated
quantiles as index values.
"""
from bach.quantile import calculate_quantiles
if not is_bigquery(self.engine):
return (
calculate_quantiles(series=self.copy(), partition=partition, q=q)
.copy_override_type(SeriesTimedelta)
)
# calculate quantiles based on total microseconds
# using total seconds might lose precision,
# since TIMESTAMP_SECONDS accepts only integers, therefore
# microseconds will be lost due to rounding
total_microseconds_series = (
self.dt.total_seconds / _TOTAL_SECONDS_PER_DATE_PART[DatePart.MICROSECOND]
)
total_microseconds_series = total_microseconds_series.copy_override_type(SeriesFloat64)
result = calculate_quantiles(series=total_microseconds_series, partition=partition, q=q)
# result must be a timedelta
result = result.copy_override(
expression=Expression.construct(
f"TIMESTAMP_MICROS({{}}) - CAST('1970-01-01' AS TIMESTAMP)",
result.astype('int64'),
),
name=self.name,
)
return result.copy_override_type(SeriesTimedelta)
| 39.293793 | 162 | 0.631354 | 26,940 | 0.945661 | 0 | 0 | 11,435 | 0.401397 | 0 | 0 | 10,258 | 0.360081 |
1b937dd883ff8de284f496b4a8959a68bb61e4f0 | 5,802 | py | Python | infra_validation_engine/infra_tests/components/docker.py | WLCG-Lightweight-Sites/simple_grid_infra_validation_engine | ad1eca7ffe0337f276f0ef9e0c89c80c1070139b | [
"Apache-2.0"
] | 1 | 2020-03-01T12:25:33.000Z | 2020-03-01T12:25:33.000Z | infra_validation_engine/infra_tests/components/docker.py | simple-framework/simple_grid_infra_validation_engine | ad1eca7ffe0337f276f0ef9e0c89c80c1070139b | [
"Apache-2.0"
] | 14 | 2019-11-07T14:36:16.000Z | 2020-10-01T17:04:33.000Z | infra_validation_engine/infra_tests/components/docker.py | simple-framework/simple_grid_infra_validation_engine | ad1eca7ffe0337f276f0ef9e0c89c80c1070139b | [
"Apache-2.0"
] | 1 | 2019-11-07T15:14:21.000Z | 2019-11-07T15:14:21.000Z | # coding: utf-8
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from infra_validation_engine.core import InfraTest, InfraTestType
from infra_validation_engine.utils.constants import Constants
from infra_validation_engine.core.exceptions import PackageNotFoundError, ServiceNotRunningError, \
CommandExecutionError
class DockerConstants(Constants):
DOCKER_PKG_NAME = "docker-ce"
class DockerImageNotFoundError(Exception):
""" Raised if Docker Image is not found"""
pass
class DockerContainerNotFoundError(Exception):
""" Raised if container is not present on a node """
pass
class DockerContainerNotRunningError(Exception):
""" Raised if container is present but is not running """
pass
class DockerInstallationTest(InfraTest):
"""Test if Docker is installed on the nodes"""
__metaclass__ = InfraTestType
def __init__(self, host, fqdn):
InfraTest.__init__(self,
"Docker Installation Test",
"Check if {pkg} is installed on {fqdn}".format(pkg=DockerConstants.DOCKER_PKG_NAME,
fqdn=fqdn),
host,
fqdn)
def run(self):
cmd = self.host.run("docker --version")
return cmd.rc == 0
def fail(self):
err_msg = "Package {pkg} is not installed on {fqdn}".format(pkg=DockerConstants.DOCKER_PKG_NAME, fqdn=self.fqdn)
raise PackageNotFoundError(err_msg)
class DockerServiceTest(InfraTest):
"""
Test if docker is running on a node
"""
__metaclass__ = InfraTestType
def __init__(self, host, fqdn):
InfraTest.__init__(self,
"Docker Service Test",
"Check if docker is running on {fqdn}".format(fqdn=fqdn),
host,
fqdn)
def run(self):
cmd = self.host.run("docker ps -a")
return cmd.rc == 0
def fail(self):
err_msg = "Docker is not running on {fqdn}".format(fqdn=self.fqdn)
raise ServiceNotRunningError(err_msg)
class DockerImageTest(InfraTest):
"""
Check if a given image is present on the host
"""
def __init__(self, host, fqdn, image):
InfraTest.__init__(self,
"Docker Image Test",
"Check if {image} is present on {fqdn}".format(image=image, fqdn=fqdn),
host,
fqdn)
self.image = image
def run(self):
cmd_str = 'docker image ls -q -f "reference={image}"'.format(image=self.image)
cmd = self.host.run(cmd_str)
print cmd.rc, cmd.stdout == ""
if cmd.stdout == "":
return False
self.out = cmd.stdout.split("\n")
print self.out
# stdout containers one extra line
is_single_image = len(self.out) == 2
if is_single_image:
self.out = self.out
self.message = "The Image ID for {image} on {fqdn} is {id}".format(image=self.image, fqdn=self.fqdn,
id=self.out)
else:
self.message = "Multiple docker images found for {image}".format(image=self.image)
self.warn = True
return True
def fail(self):
err_msg = "Docker Image {image} was not found on {fqdn}".format(image=self.image, fqdn=self.fqdn)
raise DockerImageNotFoundError(err_msg)
class DockerContainerStatusTest(InfraTest):
""" Tests if container is running """
def __init__(self, host, fqdn, container):
InfraTest.__init__(self,
"Docker Container Status Test",
"Check if {container} is running on {fqdn}".format(container=container, fqdn=fqdn),
host,
fqdn)
self.container = container
self.cmd_str = ""
def run(self):
cmd_str = "docker inspect -f '{{.State.Running}}' " + "{container}".format(container=self.container)
cmd = self.host.run(cmd_str)
self.rc = cmd.rc
self.err = cmd.stderr
self.out = cmd.stdout
test_status = False
if self.out.strip() == "true":
test_status = True
return test_status
def fail(self):
if self.rc == 1:
err_msg = "Container {container} could not be found on {fqdn}".format(container=self.container,
fqdn=self.fqdn)
raise DockerContainerNotFoundError(err_msg)
elif self.rc == 127:
err_msg = "Command {cmd_str} could not be executed on {fqdn}".format(cmd_str=self.cmd_str, fqdn=self.fqdn)
raise CommandExecutionError(err_msg)
elif self.rc == 0:
err_msg = "Docker container {container} is present but is not running on {fqdn}".format(
container=self.container,
fqdn=self.fqdn)
raise DockerContainerNotRunningError(err_msg)
# class DockerContainerSanityTest(InfraTest):
# """
# Executes sanity check script for a container
# """
# pass
| 34.535714 | 120 | 0.586867 | 4,834 | 0.833161 | 0 | 0 | 0 | 0 | 0 | 0 | 1,842 | 0.317477 |
1b94105f4a37a772952e690f0af3663ce002ade8 | 1,380 | py | Python | examples/htlc-pyteal-ts/assets/htlc.py | stasgga/algo_builder | eecdc8f564d244ea29c9ca42c37e7695d2c09cd3 | [
"Apache-2.0"
] | 1 | 2021-06-28T02:49:29.000Z | 2021-06-28T02:49:29.000Z | examples/htlc-pyteal-ts/assets/htlc.py | endlessor/algo-builder | bff4cbaa58742b028b40e16214fc56c327dd8eeb | [
"Apache-2.0"
] | null | null | null | examples/htlc-pyteal-ts/assets/htlc.py | endlessor/algo-builder | bff4cbaa58742b028b40e16214fc56c327dd8eeb | [
"Apache-2.0"
] | null | null | null | # Hash Time Lock Contract Example in pyTeal
# Add parent directory to path so that algobpy can be imported
import sys
sys.path.insert(0,'..')
from algobpy.parse import parse_params
from pyteal import *
def htlc(tmpl_bob, tmpl_alice, tmpl_secret, tmpl_timeout):
common_fields = And(
Txn.type_enum() == TxnType.Payment,
Txn.rekey_to() == Global.zero_address(),
Txn.close_remainder_to() == Global.zero_address(),
Txn.fee() <= Int(10000)
)
recv_cond = And(
Txn.receiver() == tmpl_alice,
Sha256(Arg(0)) == Bytes("base64", tmpl_secret)
)
esc_cond = And(
Txn.receiver() == tmpl_bob,
Txn.first_valid() > Int(tmpl_timeout)
)
return And(
common_fields,
Or(recv_cond, esc_cond)
)
if __name__ == "__main__":
params = {
"bob": "2ILRL5YU3FZ4JDQZQVXEZUYKEWF7IEIGRRCPCMI36VKSGDMAS6FHSBXZDQ",
"alice": "EDXG4GGBEHFLNX6A7FGT3F6Z3TQGIU6WVVJNOXGYLVNTLWDOCEJJ35LWJY",
"hash_image": "QzYhq9JlYbn2QdOMrhyxVlNtNjeyvyJc/I8d8VAGfGc=",
"timeout": 3001
}
# Overwrite params if sys.argv[1] is passed
if(len(sys.argv) > 1):
params = parse_params(sys.argv[1], params)
print(compileTeal(htlc(
Addr(params["bob"]),
Addr(params["alice"]),
params["hash_image"],
params["timeout"]), Mode.Signature))
| 26.037736 | 78 | 0.633333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 402 | 0.291304 |
1b941a731326acc3ecffebccafaa639df1c778bf | 66,776 | py | Python | pyboto3/sqs.py | thecraftman/pyboto3 | 653a0db2b00b06708334431da8f169d1f7c7734f | [
"MIT"
] | null | null | null | pyboto3/sqs.py | thecraftman/pyboto3 | 653a0db2b00b06708334431da8f169d1f7c7734f | [
"MIT"
] | null | null | null | pyboto3/sqs.py | thecraftman/pyboto3 | 653a0db2b00b06708334431da8f169d1f7c7734f | [
"MIT"
] | null | null | null | '''
The MIT License (MIT)
Copyright (c) 2016 WavyCloud
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
def add_permission(QueueUrl=None, Label=None, AWSAccountIds=None, Actions=None):
"""
Adds a permission to a queue for a specific principal . This allows sharing access to the queue.
When you create a queue, you have full control access rights for the queue. Only you, the owner of the queue, can grant or deny permissions to the queue. For more information about these permissions, see Shared Queues in the Amazon SQS Developer Guide .
See also: AWS API Documentation
:example: response = client.add_permission(
QueueUrl='string',
Label='string',
AWSAccountIds=[
'string',
],
Actions=[
'string',
]
)
:type QueueUrl: string
:param QueueUrl: [REQUIRED]
The URL of the Amazon SQS queue to which permissions are added.
Queue URLs are case-sensitive.
:type Label: string
:param Label: [REQUIRED]
The unique identification of the permission you're setting (for example, AliceSendMessage ). Maximum 80 characters. Allowed characters include alphanumeric characters, hyphens (- ), and underscores (_ ).
:type AWSAccountIds: list
:param AWSAccountIds: [REQUIRED]
The AWS account number of the principal who is given permission. The principal must have an AWS account, but does not need to be signed up for Amazon SQS. For information about locating the AWS account identification, see Your AWS Identifiers in the Amazon SQS Developer Guide .
(string) --
:type Actions: list
:param Actions: [REQUIRED]
The action the client wants to allow for the specified principal. The following values are valid:
*
ChangeMessageVisibility
DeleteMessage
GetQueueAttributes
GetQueueUrl
ReceiveMessage
SendMessage
For more information about these actions, see Understanding Permissions in the Amazon SQS Developer Guide .
Specifying SendMessage , DeleteMessage , or ChangeMessageVisibility for ActionName.n also grants permissions for the corresponding batch versions of those actions: SendMessageBatch , DeleteMessageBatch , and ChangeMessageVisibilityBatch .
(string) --
"""
pass
def can_paginate(operation_name=None):
"""
Check if an operation can be paginated.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is create_foo, and you'd normally invoke the
operation as client.create_foo(**kwargs), if the
create_foo operation can be paginated, you can use the
call client.get_paginator('create_foo').
"""
pass
def change_message_visibility(QueueUrl=None, ReceiptHandle=None, VisibilityTimeout=None):
"""
Changes the visibility timeout of a specified message in a queue to a new value. The maximum allowed timeout value is 12 hours. Thus, you can't extend the timeout of a message in an existing queue to more than a total visibility timeout of 12 hours. For more information, see Visibility Timeout in the Amazon SQS Developer Guide .
For example, you have a message and with the default visibility timeout of 5 minutes. After 3 minutes, you call ChangeMessageVisiblity with a timeout of 10 minutes. At that time, the timeout for the message is extended by 10 minutes beyond the time of the ChangeMessageVisibility action. This results in a total visibility timeout of 13 minutes. You can continue to call the ChangeMessageVisibility to extend the visibility timeout to a maximum of 12 hours. If you try to extend the visibility timeout beyond 12 hours, your request is rejected.
A message is considered to be in flight after it's received from a queue by a consumer, but not yet deleted from the queue.
For standard queues, there can be a maximum of 120,000 inflight messages per queue. If you reach this limit, Amazon SQS returns the OverLimit error message. To avoid reaching the limit, you should delete messages from the queue after they're processed. You can also increase the number of queues you use to process your messages.
For FIFO queues, there can be a maximum of 20,000 inflight messages per queue. If you reach this limit, Amazon SQS returns no error messages.
See also: AWS API Documentation
:example: response = client.change_message_visibility(
QueueUrl='string',
ReceiptHandle='string',
VisibilityTimeout=123
)
:type QueueUrl: string
:param QueueUrl: [REQUIRED]
The URL of the Amazon SQS queue whose message's visibility is changed.
Queue URLs are case-sensitive.
:type ReceiptHandle: string
:param ReceiptHandle: [REQUIRED]
The receipt handle associated with the message whose visibility timeout is changed. This parameter is returned by the `` ReceiveMessage `` action.
:type VisibilityTimeout: integer
:param VisibilityTimeout: [REQUIRED]
The new value for the message's visibility timeout (in seconds). Values values: 0 to 43200 . Maximum: 12 hours.
"""
pass
def change_message_visibility_batch(QueueUrl=None, Entries=None):
"""
Changes the visibility timeout of multiple messages. This is a batch version of `` ChangeMessageVisibility .`` The result of the action on each message is reported individually in the response. You can send up to 10 `` ChangeMessageVisibility `` requests with each ChangeMessageVisibilityBatch action.
See also: AWS API Documentation
:example: response = client.change_message_visibility_batch(
QueueUrl='string',
Entries=[
{
'Id': 'string',
'ReceiptHandle': 'string',
'VisibilityTimeout': 123
},
]
)
:type QueueUrl: string
:param QueueUrl: [REQUIRED]
The URL of the Amazon SQS queue whose messages' visibility is changed.
Queue URLs are case-sensitive.
:type Entries: list
:param Entries: [REQUIRED]
A list of receipt handles of the messages for which the visibility timeout must be changed.
(dict) --Encloses a receipt handle and an entry id for each message in `` ChangeMessageVisibilityBatch .``
Warning
All of the following list parameters must be prefixed with ChangeMessageVisibilityBatchRequestEntry.n , where n is an integer value starting with 1 . For example, a parameter list for this action might look like this:
amp;ChangeMessageVisibilityBatchRequestEntry.1.Id=change_visibility_msg_2
amp;ChangeMessageVisibilityBatchRequestEntry.1.ReceiptHandle=replaceableYour_Receipt_Handle/replaceable
amp;ChangeMessageVisibilityBatchRequestEntry.1.VisibilityTimeout=45
Id (string) -- [REQUIRED]An identifier for this particular receipt handle used to communicate the result.
Note
The Id s of a batch request need to be unique within a request
ReceiptHandle (string) -- [REQUIRED]A receipt handle.
VisibilityTimeout (integer) --The new value (in seconds) for the message's visibility timeout.
:rtype: dict
:return: {
'Successful': [
{
'Id': 'string'
},
],
'Failed': [
{
'Id': 'string',
'SenderFault': True|False,
'Code': 'string',
'Message': 'string'
},
]
}
"""
pass
def create_queue(QueueName=None, Attributes=None):
"""
Creates a new standard or FIFO queue. You can pass one or more attributes in the request. Keep the following caveats in mind:
To successfully create a new queue, you must provide a queue name that adheres to the limits related to queues and is unique within the scope of your queues.
To get the queue URL, use the `` GetQueueUrl `` action. `` GetQueueUrl `` requires only the QueueName parameter. be aware of existing queue names:
See also: AWS API Documentation
Examples
The following operation creates an SQS queue named MyQueue.
Expected Output:
:example: response = client.create_queue(
QueueName='string',
Attributes={
'string': 'string'
}
)
:type QueueName: string
:param QueueName: [REQUIRED]
The name of the new queue. The following limits apply to this name:
A queue name can have up to 80 characters.
Valid values: alphanumeric characters, hyphens (- ), and underscores (_ ).
A FIFO queue name must end with the .fifo suffix.
Queue names are case-sensitive.
:type Attributes: dict
:param Attributes: A map of attributes with their corresponding values.
The following lists the names, descriptions, and values of the special request parameters that the CreateQueue action uses:
DelaySeconds - The length of time, in seconds, for which the delivery of all messages in the queue is delayed. Valid values: An integer from 0 to 900 seconds (15 minutes). The default is 0 (zero).
MaximumMessageSize - The limit of how many bytes a message can contain before Amazon SQS rejects it. Valid values: An integer from 1,024 bytes (1 KiB) to 262,144 bytes (256 KiB). The default is 262,144 (256 KiB).
MessageRetentionPeriod - The length of time, in seconds, for which Amazon SQS retains a message. Valid values: An integer from 60 seconds (1 minute) to 1,209,600 seconds (14 days). The default is 345,600 (4 days).
Policy - The queue's policy. A valid AWS policy. For more information about policy structure, see Overview of AWS IAM Policies in the Amazon IAM User Guide .
ReceiveMessageWaitTimeSeconds - The length of time, in seconds, for which a `` ReceiveMessage `` action waits for a message to arrive. Valid values: An integer from 0 to 20 (seconds). The default is 0 (zero).
RedrivePolicy - The parameters for the dead letter queue functionality of the source queue. For more information about the redrive policy and dead letter queues, see Using Amazon SQS Dead Letter Queues in the Amazon SQS Developer Guide .
Note
The dead letter queue of a FIFO queue must also be a FIFO queue. Similarly, the dead letter queue of a standard queue must also be a standard queue.
VisibilityTimeout - The visibility timeout for the queue. Valid values: An integer from 0 to 43,200 (12 hours). The default is 30. For more information about the visibility timeout, see Visibility Timeout in the Amazon SQS Developer Guide .
The following attributes apply only to server-side-encryption :
KmsMasterKeyId - The ID of an AWS-managed customer master key (CMK) for Amazon SQS or a custom CMK. For more information, see Key Terms . While the alias of the AWS-managed CMK for Amazon SQS is always alias/aws/sqs , the alias of a custom CMK can, for example, be alias/aws/sqs . For more examples, see KeyId in the AWS Key Management Service API Reference .
KmsDataKeyReusePeriodSeconds - The length of time, in seconds, for which Amazon SQS can reuse a data key to encrypt or decrypt messages before calling AWS KMS again. An integer representing seconds, between 60 seconds (1 minute) and 86,400 seconds (24 hours). The default is 300 (5 minutes). A shorter time period provides better security but results in more calls to KMS which incur charges after Free Tier. For more information, see How Does the Data Key Reuse Period Work? .
The following attributes apply only to FIFO (first-in-first-out) queues :
FifoQueue - Designates a queue as FIFO. Valid values: true , false . You can provide this attribute only during queue creation. You can't change it for an existing queue. When you set this attribute, you must also provide the MessageGroupId for your messages explicitly. For more information, see FIFO Queue Logic in the Amazon SQS Developer Guide .
ContentBasedDeduplication - Enables content-based deduplication. Valid values: true , false . For more information, see Exactly-Once Processing in the Amazon SQS Developer Guide .
Every message must have a unique MessageDeduplicationId ,
You may provide a MessageDeduplicationId explicitly.
If you aren't able to provide a MessageDeduplicationId and you enable ContentBasedDeduplication for your queue, Amazon SQS uses a SHA-256 hash to generate the MessageDeduplicationId using the body of the message (but not the attributes of the message).
If you don't provide a MessageDeduplicationId and the queue doesn't have ContentBasedDeduplication set, the action fails with an error.
If the queue has ContentBasedDeduplication set, your MessageDeduplicationId overrides the generated one.
When ContentBasedDeduplication is in effect, messages with identical content sent within the deduplication interval are treated as duplicates and only one copy of the message is delivered.
If you send one message with ContentBasedDeduplication enabled and then another message with a MessageDeduplicationId that is the same as the one generated for the first MessageDeduplicationId , the two messages are treated as duplicates and only one copy of the message is delivered.
Any other valid special request parameters (such as the following) are ignored:
ApproximateNumberOfMessages
ApproximateNumberOfMessagesDelayed
ApproximateNumberOfMessagesNotVisible
CreatedTimestamp
LastModifiedTimestamp
QueueArn
(string) --
(string) --
:rtype: dict
:return: {
'QueueUrl': 'string'
}
:returns:
If you don't provide a value for an attribute, the queue is created with the default value for the attribute.
If you delete a queue, you must wait at least 60 seconds before creating a queue with the same name.
"""
pass
def delete_message(QueueUrl=None, ReceiptHandle=None):
"""
Deletes the specified message from the specified queue. You specify the message by using the message's receipt handle and not the MessageId you receive when you send the message. Even if the message is locked by another reader due to the visibility timeout setting, it is still deleted from the queue. If you leave a message in the queue for longer than the queue's configured retention period, Amazon SQS automatically deletes the message.
See also: AWS API Documentation
:example: response = client.delete_message(
QueueUrl='string',
ReceiptHandle='string'
)
:type QueueUrl: string
:param QueueUrl: [REQUIRED]
The URL of the Amazon SQS queue from which messages are deleted.
Queue URLs are case-sensitive.
:type ReceiptHandle: string
:param ReceiptHandle: [REQUIRED]
The receipt handle associated with the message to delete.
"""
pass
def delete_message_batch(QueueUrl=None, Entries=None):
"""
Deletes up to ten messages from the specified queue. This is a batch version of `` DeleteMessage .`` The result of the action on each message is reported individually in the response.
See also: AWS API Documentation
:example: response = client.delete_message_batch(
QueueUrl='string',
Entries=[
{
'Id': 'string',
'ReceiptHandle': 'string'
},
]
)
:type QueueUrl: string
:param QueueUrl: [REQUIRED]
The URL of the Amazon SQS queue from which messages are deleted.
Queue URLs are case-sensitive.
:type Entries: list
:param Entries: [REQUIRED]
A list of receipt handles for the messages to be deleted.
(dict) --Encloses a receipt handle and an identifier for it.
Id (string) -- [REQUIRED]An identifier for this particular receipt handle. This is used to communicate the result.
Note
The Id s of a batch request need to be unique within a request
ReceiptHandle (string) -- [REQUIRED]A receipt handle.
:rtype: dict
:return: {
'Successful': [
{
'Id': 'string'
},
],
'Failed': [
{
'Id': 'string',
'SenderFault': True|False,
'Code': 'string',
'Message': 'string'
},
]
}
"""
pass
def delete_queue(QueueUrl=None):
"""
Deletes the queue specified by the QueueUrl , even if the queue is empty. If the specified queue doesn't exist, Amazon SQS returns a successful response.
When you delete a queue, the deletion process takes up to 60 seconds. Requests you send involving that queue during the 60 seconds might succeed. For example, a `` SendMessage `` request might succeed, but after 60 seconds the queue and the message you sent no longer exist.
When you delete a queue, you must wait at least 60 seconds before creating a queue with the same name.
See also: AWS API Documentation
:example: response = client.delete_queue(
QueueUrl='string'
)
:type QueueUrl: string
:param QueueUrl: [REQUIRED]
The URL of the Amazon SQS queue to delete.
Queue URLs are case-sensitive.
"""
pass
def generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):
"""
Generate a presigned url given a client, its method, and arguments
:type ClientMethod: string
:param ClientMethod: The client method to presign for
:type Params: dict
:param Params: The parameters normally passed to
ClientMethod.
:type ExpiresIn: int
:param ExpiresIn: The number of seconds the presigned url is valid
for. By default it expires in an hour (3600 seconds)
:type HttpMethod: string
:param HttpMethod: The http method to use on the generated url. By
default, the http method is whatever is used in the method's model.
"""
pass
def get_paginator(operation_name=None):
"""
Create a paginator for an operation.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is create_foo, and you'd normally invoke the
operation as client.create_foo(**kwargs), if the
create_foo operation can be paginated, you can use the
call client.get_paginator('create_foo').
:rtype: L{botocore.paginate.Paginator}
"""
pass
def get_queue_attributes(QueueUrl=None, AttributeNames=None):
"""
Gets attributes for the specified queue.
See also: AWS API Documentation
:example: response = client.get_queue_attributes(
QueueUrl='string',
AttributeNames=[
'All'|'Policy'|'VisibilityTimeout'|'MaximumMessageSize'|'MessageRetentionPeriod'|'ApproximateNumberOfMessages'|'ApproximateNumberOfMessagesNotVisible'|'CreatedTimestamp'|'LastModifiedTimestamp'|'QueueArn'|'ApproximateNumberOfMessagesDelayed'|'DelaySeconds'|'ReceiveMessageWaitTimeSeconds'|'RedrivePolicy'|'FifoQueue'|'ContentBasedDeduplication'|'KmsMasterKeyId'|'KmsDataKeyReusePeriodSeconds',
]
)
:type QueueUrl: string
:param QueueUrl: [REQUIRED]
The URL of the Amazon SQS queue whose attribute information is retrieved.
Queue URLs are case-sensitive.
:type AttributeNames: list
:param AttributeNames: A list of attributes for which to retrieve information.
Note
In the future, new attributes might be added. If you write code that calls this action, we recommend that you structure your code so that it can handle new attributes gracefully.
The following attributes are supported:
All - Returns all values.
ApproximateNumberOfMessages - Returns the approximate number of visible messages in a queue. For more information, see Resources Required to Process Messages in the Amazon SQS Developer Guide .
ApproximateNumberOfMessagesDelayed - Returns the approximate number of messages that are waiting to be added to the queue.
ApproximateNumberOfMessagesNotVisible - Returns the approximate number of messages that have not timed-out and aren't deleted. For more information, see Resources Required to Process Messages in the Amazon SQS Developer Guide .
CreatedTimestamp - Returns the time when the queue was created in seconds (epoch time ).
DelaySeconds - Returns the default delay on the queue in seconds.
LastModifiedTimestamp - Returns the time when the queue was last changed in seconds (epoch time ).
MaximumMessageSize - Returns the limit of how many bytes a message can contain before Amazon SQS rejects it.
MessageRetentionPeriod - Returns the length of time, in seconds, for which Amazon SQS retains a message.
Policy - Returns the policy of the queue.
QueueArn - Returns the Amazon resource name (ARN) of the queue.
ReceiveMessageWaitTimeSeconds - Returns the length of time, in seconds, for which the ReceiveMessage action waits for a message to arrive.
RedrivePolicy - Returns the parameters for dead letter queue functionality of the source queue. For more information about the redrive policy and dead letter queues, see Using Amazon SQS Dead Letter Queues in the Amazon SQS Developer Guide .
VisibilityTimeout - Returns the visibility timeout for the queue. For more information about the visibility timeout, see Visibility Timeout in the Amazon SQS Developer Guide .
The following attributes apply only to server-side-encryption :
KmsMasterKeyId - Returns the ID of an AWS-managed customer master key (CMK) for Amazon SQS or a custom CMK. For more information, see Key Terms .
KmsDataKeyReusePeriodSeconds - Returns the length of time, in seconds, for which Amazon SQS can reuse a data key to encrypt or decrypt messages before calling AWS KMS again.
The following attributes apply only to FIFO (first-in-first-out) queues :
FifoQueue - Returns whether the queue is FIFO. For more information, see FIFO Queue Logic in the Amazon SQS Developer Guide .
Note
To determine whether a queue is FIFO , you can check whether QueueName ends with the .fifo suffix.
ContentBasedDeduplication - Returns whether content-based deduplication is enabled for the queue. For more information, see Exactly-Once Processing in the Amazon SQS Developer Guide .
(string) --
:rtype: dict
:return: {
'Attributes': {
'string': 'string'
}
}
:returns:
(string) --
(string) --
"""
pass
def get_queue_url(QueueName=None, QueueOwnerAWSAccountId=None):
"""
Returns the URL of an existing queue. This action provides a simple way to retrieve the URL of an Amazon SQS queue.
To access a queue that belongs to another AWS account, use the QueueOwnerAWSAccountId parameter to specify the account ID of the queue's owner. The queue's owner must grant you permission to access the queue. For more information about shared queue access, see `` AddPermission `` or see Shared Queues in the Amazon SQS Developer Guide .
See also: AWS API Documentation
Examples
The following example retrieves the queue ARN.
Expected Output:
:example: response = client.get_queue_url(
QueueName='string',
QueueOwnerAWSAccountId='string'
)
:type QueueName: string
:param QueueName: [REQUIRED]
The name of the queue whose URL must be fetched. Maximum 80 characters. Valid values: alphanumeric characters, hyphens (- ), and underscores (_ ).
Queue names are case-sensitive.
:type QueueOwnerAWSAccountId: string
:param QueueOwnerAWSAccountId: The AWS account ID of the account that created the queue.
:rtype: dict
:return: {
'QueueUrl': 'string'
}
"""
pass
def get_waiter():
"""
"""
pass
def list_dead_letter_source_queues(QueueUrl=None):
"""
Returns a list of your queues that have the RedrivePolicy queue attribute configured with a dead letter queue.
For more information about using dead letter queues, see Using Amazon SQS Dead Letter Queues in the Amazon SQS Developer Guide .
See also: AWS API Documentation
:example: response = client.list_dead_letter_source_queues(
QueueUrl='string'
)
:type QueueUrl: string
:param QueueUrl: [REQUIRED]
The URL of a dead letter queue.
Queue URLs are case-sensitive.
:rtype: dict
:return: {
'queueUrls': [
'string',
]
}
"""
pass
def list_queues(QueueNamePrefix=None):
"""
Returns a list of your queues. The maximum number of queues that can be returned is 1,000. If you specify a value for the optional QueueNamePrefix parameter, only queues with a name that begins with the specified value are returned.
See also: AWS API Documentation
:example: response = client.list_queues(
QueueNamePrefix='string'
)
:type QueueNamePrefix: string
:param QueueNamePrefix: A string to use for filtering the list results. Only those queues whose name begins with the specified string are returned.
Queue names are case-sensitive.
:rtype: dict
:return: {
'QueueUrls': [
'string',
]
}
"""
pass
def purge_queue(QueueUrl=None):
"""
Deletes the messages in a queue specified by the QueueURL parameter.
When you purge a queue, the message deletion process takes up to 60 seconds. All messages sent to the queue before calling the PurgeQueue action are deleted. Messages sent to the queue while it is being purged might be deleted. While the queue is being purged, messages sent to the queue before PurgeQueue is called might be received, but are deleted within the next minute.
See also: AWS API Documentation
:example: response = client.purge_queue(
QueueUrl='string'
)
:type QueueUrl: string
:param QueueUrl: [REQUIRED]
The URL of the queue from which the PurgeQueue action deletes messages.
Queue URLs are case-sensitive.
"""
pass
def receive_message(QueueUrl=None, AttributeNames=None, MessageAttributeNames=None, MaxNumberOfMessages=None, VisibilityTimeout=None, WaitTimeSeconds=None, ReceiveRequestAttemptId=None):
"""
Retrieves one or more messages (up to 10), from the specified queue. Using the WaitTimeSeconds parameter enables long-poll support. For more information, see Amazon SQS Long Polling in the Amazon SQS Developer Guide .
Short poll is the default behavior where a weighted random set of machines is sampled on a ReceiveMessage call. Thus, only the messages on the sampled machines are returned. If the number of messages in the queue is small (fewer than 1,000), you most likely get fewer messages than you requested per ReceiveMessage call. If the number of messages in the queue is extremely small, you might not receive any messages in a particular ReceiveMessage response. If this happens, repeat the request.
For each message returned, the response includes the following:
The receipt handle is the identifier you must provide when deleting the message. For more information, see Queue and Message Identifiers in the Amazon SQS Developer Guide .
You can provide the VisibilityTimeout parameter in your request. The parameter is applied to the messages that Amazon SQS returns in the response. If you don't include the parameter, the overall visibility timeout for the queue is used for the returned messages. For more information, see Visibility Timeout in the Amazon SQS Developer Guide .
A message that isn't deleted or a message whose visibility isn't extended before the visibility timeout expires counts as a failed receive. Depending on the configuration of the queue, the message might be sent to the dead letter queue.
See also: AWS API Documentation
:example: response = client.receive_message(
QueueUrl='string',
AttributeNames=[
'All'|'Policy'|'VisibilityTimeout'|'MaximumMessageSize'|'MessageRetentionPeriod'|'ApproximateNumberOfMessages'|'ApproximateNumberOfMessagesNotVisible'|'CreatedTimestamp'|'LastModifiedTimestamp'|'QueueArn'|'ApproximateNumberOfMessagesDelayed'|'DelaySeconds'|'ReceiveMessageWaitTimeSeconds'|'RedrivePolicy'|'FifoQueue'|'ContentBasedDeduplication'|'KmsMasterKeyId'|'KmsDataKeyReusePeriodSeconds',
],
MessageAttributeNames=[
'string',
],
MaxNumberOfMessages=123,
VisibilityTimeout=123,
WaitTimeSeconds=123,
ReceiveRequestAttemptId='string'
)
:type QueueUrl: string
:param QueueUrl: [REQUIRED]
The URL of the Amazon SQS queue from which messages are received.
Queue URLs are case-sensitive.
:type AttributeNames: list
:param AttributeNames: A list of attributes that need to be returned along with each message. These attributes include:
All - Returns all values.
ApproximateFirstReceiveTimestamp - Returns the time the message was first received from the queue (epoch time in milliseconds).
ApproximateReceiveCount - Returns the number of times a message has been received from the queue but not deleted.
SenderId
For an IAM user, returns the IAM user ID, for example ABCDEFGHI1JKLMNOPQ23R .
For an IAM role, returns the IAM role ID, for example ABCDE1F2GH3I4JK5LMNOP:i-a123b456 .
SentTimestamp - Returns the time the message was sent to the queue (epoch time in milliseconds).
MessageDeduplicationId - Returns the value provided by the sender that calls the `` SendMessage `` action.
MessageGroupId - Returns the value provided by the sender that calls the `` SendMessage `` action. Messages with the same MessageGroupId are returned in sequence.
SequenceNumber - Returns the value provided by Amazon SQS.
Any other valid special request parameters (such as the following) are ignored:
ApproximateNumberOfMessages
ApproximateNumberOfMessagesDelayed
ApproximateNumberOfMessagesNotVisible
CreatedTimestamp
ContentBasedDeduplication
DelaySeconds
FifoQueue
LastModifiedTimestamp
MaximumMessageSize
MessageRetentionPeriod
Policy
QueueArn ,
ReceiveMessageWaitTimeSeconds
RedrivePolicy
VisibilityTimeout
(string) --
:type MessageAttributeNames: list
:param MessageAttributeNames: The name of the message attribute, where N is the index.
The name can contain alphanumeric characters and the underscore (_ ), hyphen (- ), and period (. ).
The name is case-sensitive and must be unique among all attribute names for the message.
The name must not start with AWS-reserved prefixes such as AWS. or Amazon. (or any casing variants).
The name must not start or end with a period (. ), and it should not have periods in succession (.. ).
The name can be up to 256 characters long.
When using ReceiveMessage , you can send a list of attribute names to receive, or you can return all of the attributes by specifying All or .* in your request. You can also use all message attributes starting with a prefix, for example bar.* .
(string) --
:type MaxNumberOfMessages: integer
:param MaxNumberOfMessages: The maximum number of messages to return. Amazon SQS never returns more messages than this value (however, fewer messages might be returned). Valid values are 1 to 10. Default is 1.
:type VisibilityTimeout: integer
:param VisibilityTimeout: The duration (in seconds) that the received messages are hidden from subsequent retrieve requests after being retrieved by a ReceiveMessage request.
:type WaitTimeSeconds: integer
:param WaitTimeSeconds: The duration (in seconds) for which the call waits for a message to arrive in the queue before returning. If a message is available, the call returns sooner than WaitTimeSeconds .
:type ReceiveRequestAttemptId: string
:param ReceiveRequestAttemptId: This parameter applies only to FIFO (first-in-first-out) queues.
The token used for deduplication of ReceiveMessage calls. If a networking issue occurs after a ReceiveMessage action, and instead of a response you receive a generic error, you can retry the same action with an identical ReceiveRequestAttemptId to retrieve the same set of messages, even if their visibility timeout has not yet expired.
You can use ReceiveRequestAttemptId only for 5 minutes after a ReceiveMessage action.
When you set FifoQueue , a caller of the ReceiveMessage action can provide a ReceiveRequestAttemptId explicitly.
If a caller of the ReceiveMessage action doesn't provide a ReceiveRequestAttemptId , Amazon SQS generates a ReceiveRequestAttemptId .
You can retry the ReceiveMessage action with the same ReceiveRequestAttemptId if none of the messages have been modified (deleted or had their visibility changes).
During a visibility timeout, subsequent calls with the same ReceiveRequestAttemptId return the same messages and receipt handles. If a retry occurs within the deduplication interval, it resets the visibility timeout. For more information, see Visibility Timeout in the Amazon Simple Queue Service Developer Guide .
Warning
If a caller of the ReceiveMessage action is still processing messages when the visibility timeout expires and messages become visible, another worker reading from the same queue can receive the same messages and therefore process duplicates. Also, if a reader whose message processing time is longer than the visibility timeout tries to delete the processed messages, the action fails with an error. To mitigate this effect, ensure that your application observes a safe threshold before the visibility timeout expires and extend the visibility timeout as necessary.
While messages with a particular MessageGroupId are invisible, no more messages belonging to the same MessageGroupId are returned until the visibility timeout expires. You can still receive messages with another MessageGroupId as long as it is also visible.
If a caller of ReceiveMessage can't track the ReceiveRequestAttemptId , no retries work until the original visibility timeout expires. As a result, delays might occur but the messages in the queue remain in a strict order.
The length of ReceiveRequestAttemptId is 128 characters. ReceiveRequestAttemptId can contain alphanumeric characters (a-z , A-Z , 0-9 ) and punctuation (!'#$%'()*+,-./:;=?@[\]^_`{|}~ ).
For best practices of using ReceiveRequestAttemptId , see Using the ReceiveRequestAttemptId Request Parameter in the Amazon Simple Queue Service Developer Guide .
:rtype: dict
:return: {
'Messages': [
{
'MessageId': 'string',
'ReceiptHandle': 'string',
'MD5OfBody': 'string',
'Body': 'string',
'Attributes': {
'string': 'string'
},
'MD5OfMessageAttributes': 'string',
'MessageAttributes': {
'string': {
'StringValue': 'string',
'BinaryValue': b'bytes',
'StringListValues': [
'string',
],
'BinaryListValues': [
b'bytes',
],
'DataType': 'string'
}
}
},
]
}
:returns:
QueueUrl (string) -- [REQUIRED]
The URL of the Amazon SQS queue from which messages are received.
Queue URLs are case-sensitive.
AttributeNames (list) -- A list of attributes that need to be returned along with each message. These attributes include:
All - Returns all values.
ApproximateFirstReceiveTimestamp - Returns the time the message was first received from the queue (epoch time in milliseconds).
ApproximateReceiveCount - Returns the number of times a message has been received from the queue but not deleted.
SenderId
For an IAM user, returns the IAM user ID, for example ABCDEFGHI1JKLMNOPQ23R .
For an IAM role, returns the IAM role ID, for example ABCDE1F2GH3I4JK5LMNOP:i-a123b456 .
SentTimestamp - Returns the time the message was sent to the queue (epoch time in milliseconds).
MessageDeduplicationId - Returns the value provided by the sender that calls the `` SendMessage `` action.
MessageGroupId - Returns the value provided by the sender that calls the `` SendMessage `` action. Messages with the same MessageGroupId are returned in sequence.
SequenceNumber - Returns the value provided by Amazon SQS.
Any other valid special request parameters (such as the following) are ignored:
ApproximateNumberOfMessages
ApproximateNumberOfMessagesDelayed
ApproximateNumberOfMessagesNotVisible
CreatedTimestamp
ContentBasedDeduplication
DelaySeconds
FifoQueue
LastModifiedTimestamp
MaximumMessageSize
MessageRetentionPeriod
Policy
QueueArn ,
ReceiveMessageWaitTimeSeconds
RedrivePolicy
VisibilityTimeout
(string) --
MessageAttributeNames (list) -- The name of the message attribute, where N is the index.
The name can contain alphanumeric characters and the underscore (_ ), hyphen (- ), and period (. ).
The name is case-sensitive and must be unique among all attribute names for the message.
The name must not start with AWS-reserved prefixes such as AWS. or Amazon. (or any casing variants).
The name must not start or end with a period (. ), and it should not have periods in succession (.. ).
The name can be up to 256 characters long.
When using ReceiveMessage , you can send a list of attribute names to receive, or you can return all of the attributes by specifying All or .* in your request. You can also use all message attributes starting with a prefix, for example bar.* .
(string) --
MaxNumberOfMessages (integer) -- The maximum number of messages to return. Amazon SQS never returns more messages than this value (however, fewer messages might be returned). Valid values are 1 to 10. Default is 1.
VisibilityTimeout (integer) -- The duration (in seconds) that the received messages are hidden from subsequent retrieve requests after being retrieved by a ReceiveMessage request.
WaitTimeSeconds (integer) -- The duration (in seconds) for which the call waits for a message to arrive in the queue before returning. If a message is available, the call returns sooner than WaitTimeSeconds .
ReceiveRequestAttemptId (string) -- This parameter applies only to FIFO (first-in-first-out) queues.
The token used for deduplication of ReceiveMessage calls. If a networking issue occurs after a ReceiveMessage action, and instead of a response you receive a generic error, you can retry the same action with an identical ReceiveRequestAttemptId to retrieve the same set of messages, even if their visibility timeout has not yet expired.
You can use ReceiveRequestAttemptId only for 5 minutes after a ReceiveMessage action.
When you set FifoQueue , a caller of the ReceiveMessage action can provide a ReceiveRequestAttemptId explicitly.
If a caller of the ReceiveMessage action doesn't provide a ReceiveRequestAttemptId , Amazon SQS generates a ReceiveRequestAttemptId .
You can retry the ReceiveMessage action with the same ReceiveRequestAttemptId if none of the messages have been modified (deleted or had their visibility changes).
During a visibility timeout, subsequent calls with the same ReceiveRequestAttemptId return the same messages and receipt handles. If a retry occurs within the deduplication interval, it resets the visibility timeout. For more information, see Visibility Timeout in the Amazon Simple Queue Service Developer Guide .
Warning
If a caller of the ReceiveMessage action is still processing messages when the visibility timeout expires and messages become visible, another worker reading from the same queue can receive the same messages and therefore process duplicates. Also, if a reader whose message processing time is longer than the visibility timeout tries to delete the processed messages, the action fails with an error. To mitigate this effect, ensure that your application observes a safe threshold before the visibility timeout expires and extend the visibility timeout as necessary.
While messages with a particular MessageGroupId are invisible, no more messages belonging to the same MessageGroupId are returned until the visibility timeout expires. You can still receive messages with another MessageGroupId as long as it is also visible.
If a caller of ReceiveMessage can't track the ReceiveRequestAttemptId , no retries work until the original visibility timeout expires. As a result, delays might occur but the messages in the queue remain in a strict order.
The length of ReceiveRequestAttemptId is 128 characters. ReceiveRequestAttemptId can contain alphanumeric characters (a-z , A-Z , 0-9 ) and punctuation (!"#$%'()*+,-./:;=?@[\]^_`{|}~ ).
For best practices of using ReceiveRequestAttemptId , see Using the ReceiveRequestAttemptId Request Parameter in the Amazon Simple Queue Service Developer Guide .
"""
pass
def remove_permission(QueueUrl=None, Label=None):
"""
Revokes any permissions in the queue policy that matches the specified Label parameter. Only the owner of the queue can remove permissions.
See also: AWS API Documentation
:example: response = client.remove_permission(
QueueUrl='string',
Label='string'
)
:type QueueUrl: string
:param QueueUrl: [REQUIRED]
The URL of the Amazon SQS queue from which permissions are removed.
Queue URLs are case-sensitive.
:type Label: string
:param Label: [REQUIRED]
The identification of the permission to remove. This is the label added using the `` AddPermission `` action.
"""
pass
def send_message(QueueUrl=None, MessageBody=None, DelaySeconds=None, MessageAttributes=None, MessageDeduplicationId=None, MessageGroupId=None):
"""
Delivers a message to the specified queue.
See also: AWS API Documentation
:example: response = client.send_message(
QueueUrl='string',
MessageBody='string',
DelaySeconds=123,
MessageAttributes={
'string': {
'StringValue': 'string',
'BinaryValue': b'bytes',
'StringListValues': [
'string',
],
'BinaryListValues': [
b'bytes',
],
'DataType': 'string'
}
},
MessageDeduplicationId='string',
MessageGroupId='string'
)
:type QueueUrl: string
:param QueueUrl: [REQUIRED]
The URL of the Amazon SQS queue to which a message is sent.
Queue URLs are case-sensitive.
:type MessageBody: string
:param MessageBody: [REQUIRED]
The message to send. The maximum string size is 256 KB.
Warning
A message can include only XML, JSON, and unformatted text. The following Unicode characters are allowed:
#x9 | #xA | #xD | #x20 to #xD7FF | #xE000 to #xFFFD | #x10000 to #x10FFFF
Any characters not included in this list will be rejected. For more information, see the W3C specification for characters .
:type DelaySeconds: integer
:param DelaySeconds: The length of time, in seconds, for which to delay a specific message. Valid values: 0 to 900. Maximum: 15 minutes. Messages with a positive DelaySeconds value become available for processing after the delay period is finished. If you don't specify a value, the default value for the queue applies.
Note
When you set FifoQueue , you can't set DelaySeconds per message. You can set this parameter only on a queue level.
:type MessageAttributes: dict
:param MessageAttributes: Each message attribute consists of a Name , Type , and Value . For more information, see Message Attribute Items and Validation in the Amazon SQS Developer Guide .
(string) --
(dict) --The user-specified message attribute value. For string data types, the Value attribute has the same restrictions on the content as the message body. For more information, see `` SendMessage .``
Name , type , value and the message body must not be empty or null. All parts of the message attribute, including Name , Type , and Value , are part of the message size restriction (256 KB or 262,144 bytes).
StringValue (string) --Strings are Unicode with UTF-8 binary encoding. For a list of code values, see ASCII Printable Characters .
BinaryValue (bytes) --Binary type attributes can store any binary data, such as compressed data, encrypted data, or images.
StringListValues (list) --Not implemented. Reserved for future use.
(string) --
BinaryListValues (list) --Not implemented. Reserved for future use.
(bytes) --
DataType (string) -- [REQUIRED]Amazon SQS supports the following logical data types: String , Number , and Binary . For the Number data type, you must use StringValue .
You can also append custom labels. For more information, see Message Attribute Data Types and Validation in the Amazon SQS Developer Guide .
:type MessageDeduplicationId: string
:param MessageDeduplicationId: This parameter applies only to FIFO (first-in-first-out) queues.
The token used for deduplication of sent messages. If a message with a particular MessageDeduplicationId is sent successfully, any messages sent with the same MessageDeduplicationId are accepted successfully but aren't delivered during the 5-minute deduplication interval. For more information, see Exactly-Once Processing in the Amazon SQS Developer Guide .
Every message must have a unique MessageDeduplicationId ,
You may provide a MessageDeduplicationId explicitly.
If you aren't able to provide a MessageDeduplicationId and you enable ContentBasedDeduplication for your queue, Amazon SQS uses a SHA-256 hash to generate the MessageDeduplicationId using the body of the message (but not the attributes of the message).
If you don't provide a MessageDeduplicationId and the queue doesn't have ContentBasedDeduplication set, the action fails with an error.
If the queue has ContentBasedDeduplication set, your MessageDeduplicationId overrides the generated one.
When ContentBasedDeduplication is in effect, messages with identical content sent within the deduplication interval are treated as duplicates and only one copy of the message is delivered.
If you send one message with ContentBasedDeduplication enabled and then another message with a MessageDeduplicationId that is the same as the one generated for the first MessageDeduplicationId , the two messages are treated as duplicates and only one copy of the message is delivered.
Note
The MessageDeduplicationId is available to the recipient of the message (this can be useful for troubleshooting delivery issues).
If a message is sent successfully but the acknowledgement is lost and the message is resent with the same MessageDeduplicationId after the deduplication interval, Amazon SQS can't detect duplicate messages.
The length of MessageDeduplicationId is 128 characters. MessageDeduplicationId can contain alphanumeric characters (a-z , A-Z , 0-9 ) and punctuation (!'#$%'()*+,-./:;=?@[\]^_`{|}~ ).
For best practices of using MessageDeduplicationId , see Using the MessageDeduplicationId Property in the Amazon Simple Queue Service Developer Guide .
:type MessageGroupId: string
:param MessageGroupId: This parameter applies only to FIFO (first-in-first-out) queues.
The tag that specifies that a message belongs to a specific message group. Messages that belong to the same message group are processed in a FIFO manner (however, messages in different message groups might be processed out of order). To interleave multiple ordered streams within a single queue, use MessageGroupId values (for example, session data for multiple users). In this scenario, multiple readers can process the queue, but the session data of each user is processed in a FIFO fashion.
You must associate a non-empty MessageGroupId with a message. If you don't provide a MessageGroupId , the action fails.
ReceiveMessage might return messages with multiple MessageGroupId values. For each MessageGroupId , the messages are sorted by time sent. The caller can't specify a MessageGroupId .
The length of MessageGroupId is 128 characters. Valid values are alphanumeric characters and punctuation (!'#$%'()*+,-./:;=?@[\]^_`{|}~) .
For best practices of using MessageGroupId , see Using the MessageGroupId Property in the Amazon Simple Queue Service Developer Guide .
Warning
MessageGroupId is required for FIFO queues. You can't use it for Standard queues.
:rtype: dict
:return: {
'MD5OfMessageBody': 'string',
'MD5OfMessageAttributes': 'string',
'MessageId': 'string',
'SequenceNumber': 'string'
}
"""
pass
def send_message_batch(QueueUrl=None, Entries=None):
"""
Delivers up to ten messages to the specified queue. This is a batch version of `` SendMessage .`` For a FIFO queue, multiple messages within a single batch are enqueued in the order they are sent.
The result of sending each message is reported individually in the response. Because the batch request can result in a combination of successful and unsuccessful actions, you should check for batch errors even when the call returns an HTTP status code of 200 .
The maximum allowed individual message size and the maximum total payload size (the sum of the individual lengths of all of the batched messages) are both 256 KB (262,144 bytes).
If you don't specify the DelaySeconds parameter for an entry, Amazon SQS uses the default value for the queue.
See also: AWS API Documentation
:example: response = client.send_message_batch(
QueueUrl='string',
Entries=[
{
'Id': 'string',
'MessageBody': 'string',
'DelaySeconds': 123,
'MessageAttributes': {
'string': {
'StringValue': 'string',
'BinaryValue': b'bytes',
'StringListValues': [
'string',
],
'BinaryListValues': [
b'bytes',
],
'DataType': 'string'
}
},
'MessageDeduplicationId': 'string',
'MessageGroupId': 'string'
},
]
)
:type QueueUrl: string
:param QueueUrl: [REQUIRED]
The URL of the Amazon SQS queue to which batched messages are sent.
Queue URLs are case-sensitive.
:type Entries: list
:param Entries: [REQUIRED]
A list of `` SendMessageBatchRequestEntry `` items.
(dict) --Contains the details of a single Amazon SQS message along with an Id .
Id (string) -- [REQUIRED]An identifier for a message in this batch used to communicate the result.
Note
The Id s of a batch request need to be unique within a request
MessageBody (string) -- [REQUIRED]The body of the message.
DelaySeconds (integer) --The length of time, in seconds, for which a specific message is delayed. Valid values: 0 to 900. Maximum: 15 minutes. Messages with a positive DelaySeconds value become available for processing after the delay period is finished. If you don't specify a value, the default value for the queue is applied.
Note
When you set FifoQueue , you can't set DelaySeconds per message. You can set this parameter only on a queue level.
MessageAttributes (dict) --Each message attribute consists of a Name , Type , and Value . For more information, see Message Attribute Items and Validation in the Amazon SQS Developer Guide .
(string) --
(dict) --The user-specified message attribute value. For string data types, the Value attribute has the same restrictions on the content as the message body. For more information, see `` SendMessage .``
Name , type , value and the message body must not be empty or null. All parts of the message attribute, including Name , Type , and Value , are part of the message size restriction (256 KB or 262,144 bytes).
StringValue (string) --Strings are Unicode with UTF-8 binary encoding. For a list of code values, see ASCII Printable Characters .
BinaryValue (bytes) --Binary type attributes can store any binary data, such as compressed data, encrypted data, or images.
StringListValues (list) --Not implemented. Reserved for future use.
(string) --
BinaryListValues (list) --Not implemented. Reserved for future use.
(bytes) --
DataType (string) -- [REQUIRED]Amazon SQS supports the following logical data types: String , Number , and Binary . For the Number data type, you must use StringValue .
You can also append custom labels. For more information, see Message Attribute Data Types and Validation in the Amazon SQS Developer Guide .
MessageDeduplicationId (string) --This parameter applies only to FIFO (first-in-first-out) queues.
The token used for deduplication of messages within a 5-minute minimum deduplication interval. If a message with a particular MessageDeduplicationId is sent successfully, subsequent messages with the same MessageDeduplicationId are accepted successfully but aren't delivered. For more information, see Exactly-Once Processing in the Amazon SQS Developer Guide .
Every message must have a unique MessageDeduplicationId ,
You may provide a MessageDeduplicationId explicitly.
If you aren't able to provide a MessageDeduplicationId and you enable ContentBasedDeduplication for your queue, Amazon SQS uses a SHA-256 hash to generate the MessageDeduplicationId using the body of the message (but not the attributes of the message).
If you don't provide a MessageDeduplicationId and the queue doesn't have ContentBasedDeduplication set, the action fails with an error.
If the queue has ContentBasedDeduplication set, your MessageDeduplicationId overrides the generated one.
When ContentBasedDeduplication is in effect, messages with identical content sent within the deduplication interval are treated as duplicates and only one copy of the message is delivered.
If you send one message with ContentBasedDeduplication enabled and then another message with a MessageDeduplicationId that is the same as the one generated for the first MessageDeduplicationId , the two messages are treated as duplicates and only one copy of the message is delivered.
Note
The MessageDeduplicationId is available to the recipient of the message (this can be useful for troubleshooting delivery issues).
If a message is sent successfully but the acknowledgement is lost and the message is resent with the same MessageDeduplicationId after the deduplication interval, Amazon SQS can't detect duplicate messages.
The length of MessageDeduplicationId is 128 characters. MessageDeduplicationId can contain alphanumeric characters (a-z , A-Z , 0-9 ) and punctuation (!'#$%'()*+,-./:;=?@[\]^_`{|}~ ).
For best practices of using MessageDeduplicationId , see Using the MessageDeduplicationId Property in the Amazon Simple Queue Service Developer Guide .
MessageGroupId (string) --This parameter applies only to FIFO (first-in-first-out) queues.
The tag that specifies that a message belongs to a specific message group. Messages that belong to the same message group are processed in a FIFO manner (however, messages in different message groups might be processed out of order). To interleave multiple ordered streams within a single queue, use MessageGroupId values (for example, session data for multiple users). In this scenario, multiple readers can process the queue, but the session data of each user is processed in a FIFO fashion.
You must associate a non-empty MessageGroupId with a message. If you don't provide a MessageGroupId , the action fails.
ReceiveMessage might return messages with multiple MessageGroupId values. For each MessageGroupId , the messages are sorted by time sent. The caller can't specify a MessageGroupId .
The length of MessageGroupId is 128 characters. Valid values are alphanumeric characters and punctuation (!'#$%'()*+,-./:;=?@[\]^_`{|}~) .
For best practices of using MessageGroupId , see Using the MessageGroupId Property in the Amazon Simple Queue Service Developer Guide .
Warning
MessageGroupId is required for FIFO queues. You can't use it for Standard queues.
:rtype: dict
:return: {
'Successful': [
{
'Id': 'string',
'MessageId': 'string',
'MD5OfMessageBody': 'string',
'MD5OfMessageAttributes': 'string',
'SequenceNumber': 'string'
},
],
'Failed': [
{
'Id': 'string',
'SenderFault': True|False,
'Code': 'string',
'Message': 'string'
},
]
}
"""
pass
def set_queue_attributes(QueueUrl=None, Attributes=None):
"""
Sets the value of one or more queue attributes. When you change a queue's attributes, the change can take up to 60 seconds for most of the attributes to propagate throughout the Amazon SQS system. Changes made to the MessageRetentionPeriod attribute can take up to 15 minutes.
See also: AWS API Documentation
:example: response = client.set_queue_attributes(
QueueUrl='string',
Attributes={
'string': 'string'
}
)
:type QueueUrl: string
:param QueueUrl: [REQUIRED]
The URL of the Amazon SQS queue whose attributes are set.
Queue URLs are case-sensitive.
:type Attributes: dict
:param Attributes: [REQUIRED]
A map of attributes to set.
The following lists the names, descriptions, and values of the special request parameters that the SetQueueAttributes action uses:
DelaySeconds - The length of time, in seconds, for which the delivery of all messages in the queue is delayed. Valid values: An integer from 0 to 900 (15 minutes). The default is 0 (zero).
MaximumMessageSize - The limit of how many bytes a message can contain before Amazon SQS rejects it. Valid values: An integer from 1,024 bytes (1 KiB) up to 262,144 bytes (256 KiB). The default is 262,144 (256 KiB).
MessageRetentionPeriod - The length of time, in seconds, for which Amazon SQS retains a message. Valid values: An integer representing seconds, from 60 (1 minute) to 1,209,600 (14 days). The default is 345,600 (4 days).
Policy - The queue's policy. A valid AWS policy. For more information about policy structure, see Overview of AWS IAM Policies in the Amazon IAM User Guide .
ReceiveMessageWaitTimeSeconds - The length of time, in seconds, for which a `` ReceiveMessage `` action waits for a message to arrive. Valid values: an integer from 0 to 20 (seconds). The default is 0.
RedrivePolicy - The parameters for the dead letter queue functionality of the source queue. For more information about the redrive policy and dead letter queues, see Using Amazon SQS Dead Letter Queues in the Amazon SQS Developer Guide .
Note
The dead letter queue of a FIFO queue must also be a FIFO queue. Similarly, the dead letter queue of a standard queue must also be a standard queue.
VisibilityTimeout - The visibility timeout for the queue. Valid values: an integer from 0 to 43,200 (12 hours). The default is 30. For more information about the visibility timeout, see Visibility Timeout in the Amazon SQS Developer Guide .
The following attributes apply only to server-side-encryption :
KmsMasterKeyId - The ID of an AWS-managed customer master key (CMK) for Amazon SQS or a custom CMK. For more information, see Key Terms . While the alias of the AWS-managed CMK for Amazon SQS is always alias/aws/sqs , the alias of a custom CMK can, for example, be alias/aws/sqs . For more examples, see KeyId in the AWS Key Management Service API Reference .
KmsDataKeyReusePeriodSeconds - The length of time, in seconds, for which Amazon SQS can reuse a data key to encrypt or decrypt messages before calling AWS KMS again. An integer representing seconds, between 60 seconds (1 minute) and 86,400 seconds (24 hours). The default is 300 (5 minutes). A shorter time period provides better security but results in more calls to KMS which incur charges after Free Tier. For more information, see How Does the Data Key Reuse Period Work? .
The following attribute applies only to FIFO (first-in-first-out) queues :
ContentBasedDeduplication - Enables content-based deduplication. For more information, see Exactly-Once Processing in the Amazon SQS Developer Guide .
Every message must have a unique MessageDeduplicationId ,
You may provide a MessageDeduplicationId explicitly.
If you aren't able to provide a MessageDeduplicationId and you enable ContentBasedDeduplication for your queue, Amazon SQS uses a SHA-256 hash to generate the MessageDeduplicationId using the body of the message (but not the attributes of the message).
If you don't provide a MessageDeduplicationId and the queue doesn't have ContentBasedDeduplication set, the action fails with an error.
If the queue has ContentBasedDeduplication set, your MessageDeduplicationId overrides the generated one.
When ContentBasedDeduplication is in effect, messages with identical content sent within the deduplication interval are treated as duplicates and only one copy of the message is delivered.
If you send one message with ContentBasedDeduplication enabled and then another message with a MessageDeduplicationId that is the same as the one generated for the first MessageDeduplicationId , the two messages are treated as duplicates and only one copy of the message is delivered.
Any other valid special request parameters (such as the following) are ignored:
ApproximateNumberOfMessages
ApproximateNumberOfMessagesDelayed
ApproximateNumberOfMessagesNotVisible
CreatedTimestamp
LastModifiedTimestamp
QueueArn
(string) --
(string) --
"""
pass
| 59.996406 | 577 | 0.694561 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 65,098 | 0.974871 |
1b959493f163006782c0b22480f9fb53e207608f | 883 | py | Python | disjoint_set.py | sciatti/mazesolving | 5b7fb11e52e57112bca7d8051d84bd3c6c01fe16 | [
"Unlicense"
] | null | null | null | disjoint_set.py | sciatti/mazesolving | 5b7fb11e52e57112bca7d8051d84bd3c6c01fe16 | [
"Unlicense"
] | null | null | null | disjoint_set.py | sciatti/mazesolving | 5b7fb11e52e57112bca7d8051d84bd3c6c01fe16 | [
"Unlicense"
] | null | null | null | import numpy as np
class disjoint_set:
def __init__(self, numCells, row_size):
self.rowSize = row_size
self.numCells = numCells
self.parents = np.full(self.numCells, -1)
#Returns the highest ancestor index of cell passed to it and updates parents of things in set
def find(self, cell_index):
if self.parents[cell_index] == -1:
return cell_index
result = self.find(self.parents[cell_index])
self.parents[cell_index] = result
return result
#returns True if items from distinct sets, false if the items were in the same set
def union(self, cell_indexA, cell_indexB):
ancestorA = self.find(cell_indexA)
ancestorB = self.find(cell_indexB)
if (ancestorA == ancestorB):
return False
self.parents[ancestorB] = ancestorA
return True
| 35.32 | 97 | 0.643262 | 862 | 0.976217 | 0 | 0 | 0 | 0 | 0 | 0 | 175 | 0.198188 |
1b986bdba6c54856abc4ac0745d2acf81a33c07d | 1,356 | py | Python | example/schema/tests/unittest/toys.py | donghak-shin/dp-tornado | 095bb293661af35cce5f917d8a2228d273489496 | [
"MIT"
] | 18 | 2015-04-07T14:28:39.000Z | 2020-02-08T14:03:38.000Z | example/schema/tests/unittest/toys.py | donghak-shin/dp-tornado | 095bb293661af35cce5f917d8a2228d273489496 | [
"MIT"
] | 7 | 2016-10-05T05:14:06.000Z | 2021-05-20T02:07:22.000Z | example/schema/tests/unittest/toys.py | donghak-shin/dp-tornado | 095bb293661af35cce5f917d8a2228d273489496 | [
"MIT"
] | 11 | 2015-12-15T09:49:39.000Z | 2021-09-06T18:38:21.000Z | # -*- coding: utf-8 -*-
from dp_tornado.engine.schema import Table as dpTable
from dp_tornado.engine.schema import Schema as dpSchema
from dp_tornado.engine.schema import Attribute as dpAttribute
class ToysSchema(dpTable):
__table_name__ = 'toys'
__engine__ = 'InnoDB'
__charset__ = 'euckr'
toy_id = dpAttribute.field(dpAttribute.DataType.BIGINT, ai=True, pk=True, nn=True, un=True, comment='Toy ID')
toy_cd = dpAttribute.field(dpAttribute.DataType.BIGINT(20), uq=True, nn=True, zf=True, un=True, name='toy_code', comment='Toy Code')
toy_name = dpAttribute.field(dpAttribute.DataType.VARCHAR(128), nn=True, comment='Toy Name')
toy_summary = dpAttribute.field(dpAttribute.DataType.TEXT, nn=True, comment='Toy Summary')
toy_description = dpAttribute.field(dpAttribute.DataType.LONGTEXT, nn=True, comment='Toy Description')
primary_key = dpAttribute.index(dpAttribute.IndexType.PRIMARY, 'toy_id')
idx_toys_toy_name = dpAttribute.index(dpAttribute.IndexType.INDEX, 'toy_name')
__dummy_data__ = [
{'toy_id': 1, 'toy_code': 1000, 'toy_name': 'Lego', 'toy_summary': 'Lego Limited Edition', 'toy_description': 'Lego Limited Edition.'},
{'toy_id': 2, 'toy_code': 2000, 'toy_name': 'Teddy Bear', 'toy_summary': 'Teddy Bear Limited Edition', 'toy_description': 'Teddy Bear Limited Edition.'}
]
| 46.758621 | 160 | 0.725664 | 1,155 | 0.85177 | 0 | 0 | 0 | 0 | 0 | 0 | 366 | 0.269912 |
1b99922b55a031842af64c9ed62df780101dafad | 2,278 | py | Python | scripts/pre_solve_path.py | xuzhiying9510/ncflow | 3f6f4a5b2c13ac8f6375b097b35f6c55b18d212e | [
"Artistic-1.0-cl8"
] | 10 | 2021-02-09T19:25:46.000Z | 2022-03-29T13:49:23.000Z | scripts/pre_solve_path.py | xuzhiying9510/ncflow | 3f6f4a5b2c13ac8f6375b097b35f6c55b18d212e | [
"Artistic-1.0-cl8"
] | null | null | null | scripts/pre_solve_path.py | xuzhiying9510/ncflow | 3f6f4a5b2c13ac8f6375b097b35f6c55b18d212e | [
"Artistic-1.0-cl8"
] | 5 | 2020-12-23T15:24:40.000Z | 2022-01-06T09:42:38.000Z | #! /usr/bin/env python
import os
import pickle
from pathos import multiprocessing
import sys
sys.path.append('..')
from lib.problems import get_problem
from lib.algorithms.path_formulation import PathFormulation, PATHS_DIR
from lib.path_utils import graph_copy_with_edge_weights, find_paths
global G
global num_paths
global edge_disjoint
global LOAD_FROM_DISK
LOAD_FROM_DISK = True
def find_paths_wrapper(commod):
k, (s_k, t_k, d_k) = commod
if LOAD_FROM_DISK:
if (s_k, t_k) not in paths_dict:
paths = find_paths(G, s_k, t_k, num_paths, edge_disjoint)
return ((s_k, t_k), paths)
else:
paths = find_paths(G, s_k, t_k, num_paths, edge_disjoint)
return ((s_k, t_k), paths)
if __name__ == '__main__':
problem = get_problem(sys.argv[1], model='gravity', random=False)
assert problem.traffic_matrix.is_full
global num_paths
num_paths = int(sys.argv[2])
dist_metric = sys.argv[3]
global edge_disjoint
if sys.argv[4] == 'True':
edge_disjoint = True
elif sys.argv[4] == 'False':
edge_disjoint = False
else:
raise Exception('invalid argument for edge_disjoint: {}'.format(
sys.argv[4]))
if not os.path.exists(PATHS_DIR):
os.makedirs(PATHS_DIR)
paths_fname = PathFormulation.paths_full_fname(problem, num_paths,
edge_disjoint, dist_metric)
if LOAD_FROM_DISK:
print('Loading paths from pickle file', paths_fname)
try:
with open(paths_fname, 'rb') as f:
paths_dict = pickle.load(f)
print('paths_dict: ', len(paths_dict))
except FileNotFoundError:
print('Unable to find {}'.format(paths_fname))
paths_dict = {}
global G
G = graph_copy_with_edge_weights(problem.G, dist_metric)
pool = multiprocessing.ProcessPool(28)
new_paths_dict = pool.map(find_paths_wrapper, problem.commodity_list)
for ret_val in new_paths_dict:
if ret_val is not None:
k, v = ret_val
paths_dict[k] = v
print('paths_dict: ', len(paths_dict))
print('Saving paths to pickle file')
with open(paths_fname, 'wb') as w:
pickle.dump(paths_dict, w)
| 28.475 | 78 | 0.647498 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 214 | 0.093942 |
1b99c9f74b3b415c9c0733d8df3d31427c5b3cde | 2,628 | py | Python | YouTube-PyQt5/09_radio_button.py | dloperab/Python-GUI | ab71f14c7fbb011af0735f48f5196146de11ea23 | [
"MIT"
] | 2 | 2019-03-17T16:08:09.000Z | 2019-03-29T06:37:16.000Z | YouTube-PyQt5/09_radio_button.py | dloperab/OpenCV-GUI | ab71f14c7fbb011af0735f48f5196146de11ea23 | [
"MIT"
] | null | null | null | YouTube-PyQt5/09_radio_button.py | dloperab/OpenCV-GUI | ab71f14c7fbb011af0735f48f5196146de11ea23 | [
"MIT"
] | 2 | 2020-03-14T04:32:19.000Z | 2021-03-09T17:21:33.000Z | import sys
from PyQt5 import QtGui, QtCore
from PyQt5.QtWidgets import QApplication, QDialog, QGroupBox, QVBoxLayout, QHBoxLayout, QRadioButton, QLabel
class MainWindow(QDialog):
def __init__(self):
super().__init__()
self.title = "PyQt5 Radio Button"
self.top = 400
self.left = 200
self.width = 400
self.height = 150
self.icon_name = "images/home.png"
self._init_window()
def _init_window(self):
self.setWindowIcon(QtGui.QIcon(self.icon_name))
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
self._create_ui_components()
vbox = QVBoxLayout()
vbox.addWidget(self.group_box)
self.lbl_Info = QLabel(self)
self.lbl_Info.setFont(QtGui.QFont("Sanserif", 15))
vbox.addWidget(self.lbl_Info)
self.setLayout(vbox)
self.show()
def _create_ui_components(self):
self.group_box = QGroupBox("What is your favorite sport?")
self.group_box.setFont(QtGui.QFont("Sanserif", 13))
hbox_layout = QHBoxLayout()
self.rdbtn_soccer = QRadioButton("Soccer")
self.rdbtn_soccer.setIcon(QtGui.QIcon("images/soccer.png"))
self.rdbtn_soccer.setIconSize(QtCore.QSize(30, 30))
self.rdbtn_soccer.setFont(QtGui.QFont("Sanserif", 13))
self.rdbtn_soccer.toggled.connect(self._on_radiobutton_checked)
hbox_layout.addWidget(self.rdbtn_soccer)
self.rdbtn_basketball = QRadioButton("Basketball")
self.rdbtn_basketball.setIcon(QtGui.QIcon("images/basketball.png"))
self.rdbtn_basketball.setIconSize(QtCore.QSize(30, 30))
self.rdbtn_basketball.setFont(QtGui.QFont("Sanserif", 13))
self.rdbtn_basketball.toggled.connect(self._on_radiobutton_checked)
hbox_layout.addWidget(self.rdbtn_basketball)
self.rdbtn_tennis = QRadioButton("Tennis")
self.rdbtn_tennis.setIcon(QtGui.QIcon("images/tennis.png"))
self.rdbtn_tennis.setIconSize(QtCore.QSize(30, 30))
self.rdbtn_tennis.setFont(QtGui.QFont("Sanserif", 13))
self.rdbtn_tennis.toggled.connect(self._on_radiobutton_checked)
hbox_layout.addWidget(self.rdbtn_tennis)
self.group_box.setLayout(hbox_layout)
def _on_radiobutton_checked(self):
rdbtn = self.sender()
if rdbtn.isChecked():
self.lbl_Info.setText(f"Sport selected: {rdbtn.text()}")
if __name__ == "__main__":
app = QApplication(sys.argv)
window = MainWindow()
sys.exit(app.exec()) | 35.04 | 108 | 0.667808 | 2,362 | 0.898782 | 0 | 0 | 0 | 0 | 0 | 0 | 249 | 0.094749 |
1b9b5b9e063185961a0c23077b632b02171d2fdb | 1,233 | py | Python | source/guides/migrations/0002_auto_20170123_1809.py | OpenNews/opennews-source | 71b557275bc5d03c75eb471fc3293efa492c0ac7 | [
"MIT"
] | 6 | 2017-01-05T00:51:48.000Z | 2021-11-08T10:26:04.000Z | source/guides/migrations/0002_auto_20170123_1809.py | OpenNews/opennews-source | 71b557275bc5d03c75eb471fc3293efa492c0ac7 | [
"MIT"
] | 146 | 2017-01-03T16:06:43.000Z | 2022-03-11T23:25:43.000Z | source/guides/migrations/0002_auto_20170123_1809.py | OpenNews/opennews-source | 71b557275bc5d03c75eb471fc3293efa492c0ac7 | [
"MIT"
] | 3 | 2017-02-16T22:52:47.000Z | 2019-08-15T16:49:47.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2017-01-23 18:09
from __future__ import unicode_literals
from django.db import migrations, models
import sorl.thumbnail.fields
class Migration(migrations.Migration):
dependencies = [
('guides', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='guide',
name='author_bio',
field=models.TextField(blank=True),
),
migrations.AddField(
model_name='guide',
name='author_name',
field=models.CharField(blank=True, max_length=128),
),
migrations.AddField(
model_name='guide',
name='author_photo',
field=sorl.thumbnail.fields.ImageField(blank=True, null=True, upload_to='img/uploads/guide_author_images'),
),
migrations.AddField(
model_name='guidearticle',
name='external_author_name',
field=models.CharField(blank=True, max_length=128),
),
migrations.AddField(
model_name='guidearticle',
name='external_organization_name',
field=models.CharField(blank=True, max_length=128),
),
]
| 29.357143 | 119 | 0.596107 | 1,046 | 0.848337 | 0 | 0 | 0 | 0 | 0 | 0 | 264 | 0.214112 |
1b9c385313c2968917d83f61b972f57ca0977ba1 | 5,609 | py | Python | NLP/TextClassfication/task1/train.py | MobtgZhang/ModelZoos | 1fe2c983737bd35f6acffe092c21b212930ab6dc | [
"MIT"
] | null | null | null | NLP/TextClassfication/task1/train.py | MobtgZhang/ModelZoos | 1fe2c983737bd35f6acffe092c21b212930ab6dc | [
"MIT"
] | null | null | null | NLP/TextClassfication/task1/train.py | MobtgZhang/ModelZoos | 1fe2c983737bd35f6acffe092c21b212930ab6dc | [
"MIT"
] | null | null | null | import os
import logging
import yaml
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from utils import build_simplifyweibo_4_moods_dataset
from utils import LabelReviewSplitWordsDataset
from utils import bacthfy,to_device
from model import TextCNN, TextRNN, RCNN,CapsAttNet,DPCNN
from config import get_args,check_args
from evaluate import Evaluator
logging.basicConfig(level = logging.INFO,format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger()
def train_simplifyweibo_4_moods(args):
device = "cuda" if torch.cuda.is_available() else "cpu"
# preparing the dataset
data_path = os.path.join(args.data_dir,args.dataset)
result_path = os.path.join(args.result_dir,args.dataset)
train_file_name = os.path.join(args.result_dir,args.dataset,"train.csv")
test_file_name = os.path.join(args.result_dir,args.dataset,"test.csv")
dict_file_name = os.path.join(args.result_dir,args.dataset,"dictionary.json")
if not os.path.exists(train_file_name) or \
not os.path.exists(test_file_name) or \
not os.path.exists(dict_file_name):
# create the dataset
build_simplifyweibo_4_moods_dataset(data_path,result_path)
train_dataset = LabelReviewSplitWordsDataset(train_file_name,dict_file_name,max_seq_len=args.max_seq_len)
train_dataloader = DataLoader(train_dataset,batch_size=args.batch_size,shuffle=True,collate_fn=bacthfy,num_workers= args.num_workers)
test_dataset = LabelReviewSplitWordsDataset(test_file_name,dict_file_name,max_seq_len=args.max_seq_len)
test_dataloader = DataLoader(test_dataset,batch_size=args.test_batch_size,shuffle=True,collate_fn=bacthfy,num_workers= args.num_workers)
# preparing the model
if args.model.lower() == "textcnn":
textcnn_yaml = os.path.join(args.config_dir,"TextCNN.yaml")
with open(textcnn_yaml,mode="r",encoding="utf-8") as rfp:
kwargs = yaml.safe_load(rfp.read())
model = TextCNN(args.n_class,vocab_size=len(train_dataset.data_dict),**kwargs)
elif args.model.lower() == "textrnn":
textcnn_yaml = os.path.join(args.config,"TextRNN.yaml")
with open(textcnn_yaml,mode="r",encoding="utf-8") as rfp:
kwargs = yaml.safe_load(rfp.read())
model = TextRNN(args.n_class,vocab_size=len(train_dataset.data_dict),**kwargs)
elif args.model.lower() == "rcnn":
rcnn_yaml = os.path.join(args.config_dir,"RCNN.yaml")
with open(rcnn_yaml,mode="r",encoding="utf-8") as rfp:
kwargs = yaml.safe_load(rfp.read())
model = RCNN(args.n_class,vocab_size=len(train_dataset.data_dict),**kwargs)
elif args.model.lower() == "capsattnet":
capsattnet_yaml = os.path.join(args.config_dir,"CapsAttNet.yaml")
with open(capsattnet_yaml,mode="r",encoding="utf-8") as rfp:
kwargs = yaml.safe_load(rfp.read())
model = CapsAttNet(args.n_class,vocab_size=len(train_dataset.data_dict),**kwargs)
elif args.model.lower() == "dpcnn":
dpcnn_yaml = os.path.join(args.config_dir,"DPCNN.yaml")
with open(dpcnn_yaml,mode="r",encoding="utf-8") as rfp:
kwargs = yaml.safe_load(rfp.read())
model = DPCNN(args.n_class,vocab_size=len(train_dataset.data_dict),**kwargs)
else:
raise ValueError("Unknown model name %s"%args.model)
kwargs["max_seq_len"] = args.max_seq_len
args.learning_rate = kwargs["learning_rate"]
model.to(device)
optimizer = optim.Adamax(model.parameters(),lr=args.learning_rate)
loss_fn = nn.CrossEntropyLoss()
evaluator = Evaluator()
for epoch in range(args.epoch_times):
loss_all = 0.0
model.train()
evaluator.begin_time()
for item in train_dataloader:
optimizer.zero_grad()
item = to_device(item,device)
input_tensor = item[0]
target_label = item[1]
predict_probability = model(input_tensor)
loss = loss_fn(predict_probability,target_label)
loss_all += loss.cpu().detach().numpy()
loss.backward()
optimizer.step()
loss_all /= len(train_dataloader)
corr,f1_score_value,acc_score_value,jac_score_value = evaluator.evaluate(model,test_dataloader,device)
evaluator.add_loss(loss_all)
logger.info("Epoches %d, complete!, avg loss %0.4f,f1-score %0.4f,accuracy-score %0.4f,jaccard-score %0.4f."%(epoch + 1,loss_all,f1_score_value,acc_score_value,jac_score_value))
loss_all/=len(train_dataloader)
evaluator.end_time()
evaluator.draw(args.model_name,result_path)
evaluate_file_name = os.path.join(result_path,args.model_name+"_results.csv")
evaluator.save(evaluate_file_name)
def main():
args = get_args()
check_args(args)
# First ,create a logger
logger = logging.getLogger()
logger.setLevel(logging.INFO) # Log level switch
# Second, create a handler ,which is used for writing log files
logfile = os.path.join(args.log_dir,args.model_name + '.log')
fh = logging.FileHandler(logfile, mode='w')
fh.setLevel(logging.DEBUG)
# Third,define the output format for handler
formatter = logging.Formatter("%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s")
fh.setFormatter(formatter)
# Fourth,add loggerin the handler
logger.addHandler(fh)
logger.info(str(args))
if args.dataset == "simplifyweibo_4_moods":
args.n_class = 4
train_simplifyweibo_4_moods(args)
if __name__ == "__main__":
main()
| 47.940171 | 185 | 0.702443 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 792 | 0.141101 |
1b9c5cc86b543fca12e20fbcf7f9c14dd78ad47e | 374 | py | Python | hipyelp/migrations/0014_rename_tagname_drinktag_drinktagname.py | buggydev1/Hip_yelp_Back | f80407d898d267465e5cfbf7f73905b776e91b43 | [
"MIT"
] | null | null | null | hipyelp/migrations/0014_rename_tagname_drinktag_drinktagname.py | buggydev1/Hip_yelp_Back | f80407d898d267465e5cfbf7f73905b776e91b43 | [
"MIT"
] | null | null | null | hipyelp/migrations/0014_rename_tagname_drinktag_drinktagname.py | buggydev1/Hip_yelp_Back | f80407d898d267465e5cfbf7f73905b776e91b43 | [
"MIT"
] | 2 | 2021-04-22T14:42:57.000Z | 2021-05-18T01:11:58.000Z | # Generated by Django 3.2 on 2021-04-25 02:49
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('hipyelp', '0013_alter_drinktag_tagname'),
]
operations = [
migrations.RenameField(
model_name='drinktag',
old_name='tagName',
new_name='drinktagName',
),
]
| 19.684211 | 51 | 0.59893 | 291 | 0.778075 | 0 | 0 | 0 | 0 | 0 | 0 | 116 | 0.31016 |
1b9dd876251bf4f024da196a2aaf6f17236c213b | 2,856 | py | Python | basic_YOLO/obj_vid_cuda.py | msmcs-robotics/pi-cv-vision | 0752fe8b4b754156d0920e202fc147cfee185fdb | [
"ISC"
] | null | null | null | basic_YOLO/obj_vid_cuda.py | msmcs-robotics/pi-cv-vision | 0752fe8b4b754156d0920e202fc147cfee185fdb | [
"ISC"
] | null | null | null | basic_YOLO/obj_vid_cuda.py | msmcs-robotics/pi-cv-vision | 0752fe8b4b754156d0920e202fc147cfee185fdb | [
"ISC"
] | 1 | 2022-02-05T04:06:33.000Z | 2022-02-05T04:06:33.000Z | '''
Reference:
https://thinkinfi.com/use-opencv-with-gpu-python/
Download cfg:
> wget https://raw.githubusercontent.com/pjreddie/darknet/master/cfg/yolov3.cfg
Download weights:
> wget https://pjreddie.com/media/files/yolov3.weights
Dataset:
> wget https://raw.githubusercontent.com/pjreddie/darknet/master/data/coco.names
'''
import cv2
import numpy as np
# windows / pi
#cap = cv2.VideoCapture(0)
# linux
cam_device="/dev/video0"
cap = cv2.VideoCapture(cam_device)
# capture from file
#vid = "path/to/video.mp4"
#cap = cv2.VideoCapture(vid)
# Load Yolo
yolo_weight = "model/yolov3.weights"
yolo_config = "model/yolov3.cfg"
coco_labels = "model/coco.names"
net = cv2.dnn.readNet(yolo_weight, yolo_config)
# The Difference:
# Offload Work onto GPU
net.setPreferableBackend(cv2.dnn.DNN_BACKEND_CUDA)
net.setPreferableTarget(cv2.dnn.DNN_TARGET_CUDA)
classes = []
with open(coco_labels, "r") as f:
classes = [line.strip() for line in f.readlines()]
layer_names = net.getLayerNames()
output_layers = [layer_names[i - 1] for i in net.getUnconnectedOutLayers()]
colors = np.random.uniform(0, 255, size=(len(classes), 3))
# Defining desired shape
fWidth = 256
fHeight = 256
while True:
read_ok, img = cap.read()
height, width, channels = img.shape
# Detecting objects
blob = cv2.dnn.blobFromImage(img, 0.00392, (416, 416), (0, 0, 0), True, crop=False)
net.setInput(blob)
outs = net.forward(output_layers)
# Showing informations on the screen
class_ids = []
confidences = []
boxes = []
for out in outs:
for detection in out:
scores = detection[5:]
class_id = np.argmax(scores)
confidence = scores[class_id]
if confidence > 0.5:
# Object detected
center_x = int(detection[0] * width)
center_y = int(detection[1] * height)
w = int(detection[2] * width)
h = int(detection[3] * height)
# Rectangle coordinates
x = int(center_x - w / 2)
y = int(center_y - h / 2)
boxes.append([x, y, w, h])
confidences.append(float(confidence))
class_ids.append(class_id)
indexes = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)
font = cv2.FONT_HERSHEY_DUPLEX
for i in range(len(boxes)):
if i in indexes:
x, y, w, h = boxes[i]
label = str(classes[class_ids[i]])
confidence_label = int(confidences[i] * 100)
color = colors[i]
cv2.rectangle(img, (x, y), (x + w, y + h), color, 2)
cv2.putText(img, f'{label, confidence_label}', (x-25, y + 75), font, 2, color, 2)
cv2.imshow("Image", img)
# Close video window by pressing 'q'
if cv2.waitKey(1) & 0xFF == ord('q'):
break | 27.728155 | 93 | 0.616947 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 768 | 0.268908 |
1b9ddc52beb4e9bc35334488c0af98f0f61d54c2 | 1,998 | py | Python | examples/container/tabular_benchmark_example.py | pfistfl/HPOBench | a7ad8807bd2e058ff99f703ad057b64ecadd4b66 | [
"Apache-2.0"
] | 78 | 2017-01-14T14:25:55.000Z | 2020-09-30T22:57:14.000Z | examples/container/tabular_benchmark_example.py | pfistfl/HPOBench | a7ad8807bd2e058ff99f703ad057b64ecadd4b66 | [
"Apache-2.0"
] | 84 | 2016-11-24T15:19:20.000Z | 2020-11-09T11:34:19.000Z | examples/container/tabular_benchmark_example.py | pfistfl/HPOBench | a7ad8807bd2e058ff99f703ad057b64ecadd4b66 | [
"Apache-2.0"
] | 31 | 2016-11-29T19:56:06.000Z | 2020-07-10T04:13:33.000Z | """
Tabular benchmark
=================
This examples shows the usage of the containerized tabular benchmark.
To note: You don't have to pass the container name to the Benchmark-Constructor. It is automatically set, but for
demonstration purpose, we show how to set it.
container_source can be either a path to a registry (e.g. sylabs.io, singularity_hub.org) or a local path on your local
file system. If it is a link to a registry, the container will be downloaded to the default data dir, set in the
hpobenchrc. A second call, will first look into the data directory, if the container is already available, so it will not
be downloaded twice.
Please install the necessary dependencies via ``pip install .`` and singularity (v3.5).
https://sylabs.io/guides/3.5/user-guide/quick_start.html#quick-installation-steps
"""
import argparse
from hpobench.container.benchmarks.nas.tabular_benchmarks import SliceLocalizationBenchmark as TabBenchmarkContainer
def run_experiment(on_travis=False):
benchmark = TabBenchmarkContainer(container_name='tabular_benchmarks',
container_source='library://phmueller/automl',
rng=1)
cs = benchmark.get_configuration_space(seed=1)
config = cs.sample_configuration()
print(config)
# You can pass the configuration either as a dictionary or a ConfigSpace.configuration
result_dict_1 = benchmark.objective_function(configuration=config.get_dictionary())
result_dict_2 = benchmark.objective_function(configuration=config)
print(result_dict_1, result_dict_2)
if __name__ == "__main__":
parser = argparse.ArgumentParser(prog='TabularNad')
parser.add_argument('--on_travis', action='store_true',
help='Flag to speed up the run on the continuous integration tool \"travis\". This flag can be'
'ignored by the user')
args = parser.parse_args()
run_experiment(on_travis=args.on_travis)
| 41.625 | 121 | 0.723724 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,114 | 0.557558 |
1ba07eaa9253946b4f345408b4a379ec82d0ede6 | 375 | py | Python | recip/util/Address.py | anthonybuckle/Reciprocity-Core | 3254073f44e8fe2222aed9879885a2e609d4044a | [
"MIT"
] | null | null | null | recip/util/Address.py | anthonybuckle/Reciprocity-Core | 3254073f44e8fe2222aed9879885a2e609d4044a | [
"MIT"
] | null | null | null | recip/util/Address.py | anthonybuckle/Reciprocity-Core | 3254073f44e8fe2222aed9879885a2e609d4044a | [
"MIT"
] | null | null | null | from recip.util import DataType
from recip.util import Validator
def toAddressBytes(address):
if address.startswith('0x'):
address = address[2:]
return DataType.fromHex(address)
def toAddressStr(addressBytes):
return DataType.toHex(addressBytes)
def to0xAddress(addressBytes):
address = toAddressStr(addressBytes)
return "0x{0}".format(address) | 26.785714 | 40 | 0.746667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 11 | 0.029333 |
1ba1f83792d3dfefb74bfb86f0f1b1667d473d05 | 542 | py | Python | webapp/models/req_error_log.py | myfreshcity/mystock | 3a8832e8c498128683b6af528da92d7fda32386d | [
"MIT"
] | 2 | 2016-09-19T09:18:17.000Z | 2022-02-16T14:55:51.000Z | webapp/models/req_error_log.py | myfreshcity/mystock | 3a8832e8c498128683b6af528da92d7fda32386d | [
"MIT"
] | 2 | 2020-04-29T13:01:45.000Z | 2020-04-29T13:01:45.000Z | webapp/models/req_error_log.py | myfreshcity/mystock | 3a8832e8c498128683b6af528da92d7fda32386d | [
"MIT"
] | 2 | 2018-06-29T15:09:36.000Z | 2019-09-05T09:26:06.000Z | from datetime import datetime
from webapp.services import db
class ReqErrorLog(db.Model):
__tablename__ = 'req_error_log'
id = db.Column(db.Integer, primary_key=True)
action = db.Column(db.String(50))
key = db.Column(db.String(255))
msg = db.Column(db.String(2000))
created_time = db.Column(db.DateTime, default=datetime.now)
def __init__(self, action, key, msg):
self.action = action
self.key = key
self.msg = msg
def __repr__(self):
return '<ReqErrorLog %r>' % self.id
| 23.565217 | 63 | 0.653137 | 476 | 0.878229 | 0 | 0 | 0 | 0 | 0 | 0 | 33 | 0.060886 |
1ba5d06c5362ae2cf5a563053ad286ea87461d19 | 632 | py | Python | frappe-bench/apps/erpnext/erpnext/buying/doctype/supplier_scorecard_standing/supplier_scorecard_standing.py | Semicheche/foa_frappe_docker | a186b65d5e807dd4caf049e8aeb3620a799c1225 | [
"MIT"
] | 1 | 2021-04-29T14:55:29.000Z | 2021-04-29T14:55:29.000Z | frappe-bench/apps/erpnext/erpnext/buying/doctype/supplier_scorecard_standing/supplier_scorecard_standing.py | Semicheche/foa_frappe_docker | a186b65d5e807dd4caf049e8aeb3620a799c1225 | [
"MIT"
] | null | null | null | frappe-bench/apps/erpnext/erpnext/buying/doctype/supplier_scorecard_standing/supplier_scorecard_standing.py | Semicheche/foa_frappe_docker | a186b65d5e807dd4caf049e8aeb3620a799c1225 | [
"MIT"
] | 1 | 2021-04-29T14:39:01.000Z | 2021-04-29T14:39:01.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2017, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class SupplierScorecardStanding(Document):
pass
@frappe.whitelist()
def get_scoring_standing(standing_name):
standing = frappe.get_doc("Supplier Scorecard Standing", standing_name)
return standing
@frappe.whitelist()
def get_standings_list():
standings = frappe.db.sql("""
SELECT
scs.name
FROM
`tabSupplier Scorecard Standing` scs""",
{}, as_dict=1)
return standings | 21.793103 | 72 | 0.761076 | 48 | 0.075949 | 0 | 0 | 336 | 0.531646 | 0 | 0 | 243 | 0.384494 |
1ba6078890512ae4e2a9c49facfe629996e04ba9 | 1,405 | py | Python | Server/Protocol/Messages/Server/LoginOkMessage.py | Voeed/Brawl-stars-v11 | b60743c306ab471053d81aa59f812b19a2c7f8f3 | [
"MIT"
] | 3 | 2021-04-14T18:33:53.000Z | 2021-09-26T13:53:54.000Z | Server/Protocol/Messages/Server/LoginOkMessage.py | ImColette-dev/Brawl-stars-v11-server | b3232b574c5b2fcc29803e5c8c9e9650d0d59d73 | [
"MIT"
] | 1 | 2021-09-09T12:23:34.000Z | 2021-09-09T12:23:34.000Z | Server/Protocol/Messages/Server/LoginOkMessage.py | ImColette-dev/Brawl-stars-v11-server | b3232b574c5b2fcc29803e5c8c9e9650d0d59d73 | [
"MIT"
] | 1 | 2021-07-28T16:15:29.000Z | 2021-07-28T16:15:29.000Z | from DataStream.ByteStream import ByteStream
from Logic.Player import Player
class LoginOkMessage(ByteStream):
def __init__(self, client, player):
super().__init__(client)
self.id = 20104
self.version = 1
self.player = player;
def encode(self):
self.writeInt(self.player.HighID)
self.writeInt(self.player.LowID)
self.writeInt(self.player.HighID)
self.writeInt(self.player.LowID)
self.writeString(self.player.Token)
self.writeString()
self.writeString()
self.writeInt(11)
self.writeInt(112)
self.writeInt(1)
self.writeString("integration")
self.writeInt(0) #1
self.writeInt(0) #1
self.writeInt(0) #61
self.writeString()
#isAtEnd
self.writeString()
self.writeString()
#isAtEnd
self.writeInt(0)
self.writeString()
self.writeString()
self.writeString()
self.writeInt(0)
self.writeString()
self.writeString()
self.writeString()
#isAtEnd
self.writeVInt(0)
#isAtEnd
#TODO: stringReference
print("[INFO] Message LoginOkMessage has been sent.") | 25.089286 | 61 | 0.525267 | 1,324 | 0.942349 | 0 | 0 | 0 | 0 | 0 | 0 | 128 | 0.091103 |
1ba69d7263d302075677d86a662b9416bf88de61 | 11,790 | py | Python | cronjob/python/Loan_bak_-13122019_1118AM/saveDPWorkingDay.py | heodat234/worldfone4xs_ibm | 6b508c3d99c48c5b8c9f1d979c356fc573e999a2 | [
"MIT"
] | null | null | null | cronjob/python/Loan_bak_-13122019_1118AM/saveDPWorkingDay.py | heodat234/worldfone4xs_ibm | 6b508c3d99c48c5b8c9f1d979c356fc573e999a2 | [
"MIT"
] | null | null | null | cronjob/python/Loan_bak_-13122019_1118AM/saveDPWorkingDay.py | heodat234/worldfone4xs_ibm | 6b508c3d99c48c5b8c9f1d979c356fc573e999a2 | [
"MIT"
] | null | null | null | #!/usr/bin/python3.6
# -*- coding: utf-8 -*-
import re
import ftplib
import calendar
import time
import sys
import os
import json
from pprint import pprint
from datetime import datetime
from datetime import date, timedelta
from bson import ObjectId
from helper.ftp import Ftp
from helper.mongod import Mongodb
from helper.excel import Excel
from helper.jaccs import Config
from helper.common import Common
from helper.mongodbaggregate import Mongodbaggregate
mongodb = Mongodb("worldfone4xs")
_mongodb = Mongodb("_worldfone4xs")
excel = Excel()
config = Config()
ftp = Ftp()
common = Common()
mongodbaggregate = Mongodbaggregate("worldfone4xs")
base_url = common.base_url()
wff_env = common.wff_env(base_url)
mongodb = Mongodb(MONGODB="worldfone4xs", WFF_ENV=wff_env)
_mongodb = Mongodb(MONGODB="_worldfone4xs", WFF_ENV=wff_env)
log = open(base_url + "cronjob/python/Loan/log/saveDailyProdProdEachUserGroup.txt","a")
now = datetime.now()
subUserType = 'LO'
collection = common.getSubUser(subUserType, 'Daily_prod_working_day')
try:
insertData = []
updateData = []
listDebtGroup = []
dpWorkingdaysdaycol = {'1': 'No. of Overdue accounts', '2': 'No. of Paid accounts end of day', '3': 'No. of Paid accounts Accumulated', '4': 'Collected ratio (account)', '5': 'Overdue outstanding balance', '6': 'Collected amount (end of day)', '7': 'Collected amount Accumulated', '8': 'Collected ratio (amount)'}
due = {
'01' : '12th',
'02' : '22nd',
'03' : '31st'
}
today = date.today()
# today = datetime.strptime('20/11/2019', "%d/%m/%Y").date()
day = today.day
month = today.month
year = today.year
weekday = today.weekday()
lastDayOfMonth = calendar.monthrange(year, month)[1]
todayString = today.strftime("%d/%m/%Y")
todayTimeStamp = int(time.mktime(time.strptime(str(todayString + " 00:00:00"), "%d/%m/%Y %H:%M:%S")))
startMonth = int(time.mktime(time.strptime(str('01/' + str(month) + '/' + str(year) + " 00:00:00"), "%d/%m/%Y %H:%M:%S")))
endMonth = int(time.mktime(time.strptime(str(str(lastDayOfMonth) + '/' + str(month) + '/' + str(year) + " 23:59:59"), "%d/%m/%Y %H:%M:%S")))
holidayOfMonth = mongodb.get(MONGO_COLLECTION=common.getSubUser(subUserType, 'Report_off_sys'))
listHoliday = map(lambda offDateRow: {offDateRow['off_date']}, holidayOfMonth)
dueDateThisMonth = mongodb.get(MONGO_COLLECTION=common.getSubUser(subUserType, 'Report_due_date'))
if todayTimeStamp in listHoliday or (weekday == 5) or weekday == 6:
sys.exit()
todayString = today.strftime("%d/%m/%Y")
starttime = int(time.mktime(time.strptime(str(todayString + " 00:00:00"), "%d/%m/%Y %H:%M:%S")))
endtime = int(time.mktime(time.strptime(str(todayString + " 23:59:59"), "%d/%m/%Y %H:%M:%S")))
yesterday_starttime = starttime - 86400
yesterday_endtime = endtime - 86400
mainProduct = {}
mainProductRaw = mongodb.get(MONGO_COLLECTION=common.getSubUser(subUserType, 'Product'))
for prod in mainProductRaw:
mainProduct[prod['code']] = prod['name']
debtGroup = _mongodb.getOne(MONGO_COLLECTION=common.getSubUser(subUserType, 'Jsondata'), WHERE={'tags': ['debt', 'group']})
dueDate = _mongodb.getOne(MONGO_COLLECTION=common.getSubUser(subUserType, 'Jsondata'), WHERE={'tags': ['debt', 'duedate']})
for group in debtGroup['data']:
for duedate in dueDate['data']:
listDebtGroup.append(group['text'] + duedate['text'])
listDebtGroup = sorted(listDebtGroup)
listGroupProductRaw = _mongodb.getOne(MONGO_COLLECTION=common.getSubUser(subUserType, 'Jsondata'), WHERE={'tags': ['group', 'debt', 'product']})
listGroupProduct = listGroupProductRaw['data']
lnjc05 = mongodb.get(MONGO_COLLECTION=common.getSubUser(subUserType, 'LNJC05'))
total_lnjc05 = 0
total_cur_bal_lnjc05 = 0
for lnjc05_row in lnjc05:
total_lnjc05 += 1
total_cur_bal_lnjc05 += lnjc05_row['current_balance']
list_acc = mongodb.get(MONGO_COLLECTION=common.getSubUser(subUserType, 'List_of_account_in_collection'))
total_list_acc = 0
total_cur_bal_list_acc = 0
for list_acc_row in list_acc:
total_list_acc += 1
total_cur_bal_list_acc += list_acc_row['cur_bal']
for debtGroupCell in list(listDebtGroup):
if debtGroupCell[0:1] is not 'F':
dueDayOfMonth = mongodb.getOne(MONGO_COLLECTION=common.getSubUser(subUserType, 'Report_due_date'), WHERE={'for_month': str(month), 'debt_group': debtGroupCell[1:3]})
dueDayLastMonth = mongodb.getOne(MONGO_COLLECTION=common.getSubUser(subUserType, 'Report_due_date'), WHERE={'for_month': str(month - 1), 'debt_group': debtGroupCell[1:3]})
if todayTimeStamp > dueDayOfMonth['due_date_add_1']:
todayIndex = str(common.countWorkingDaysBetweendate(starttime = dueDayOfMonth['due_date_add_1'], endtime = todayTimeStamp, mongodb=mongodb))
else:
todayIndex = str(common.countWorkingDaysBetweendate(starttime = dueDayLastMonth['due_date_add_1'], endtime = todayTimeStamp, mongodb=mongodb))
for groupProductCell in listGroupProduct:
for key in dpWorkingdaysdaycol:
groupInfoByDueDate = list(mongodb.get(MONGO_COLLECTION=common.getSubUser(subUserType, 'Group'), WHERE={'debt_groups': debtGroupCell, 'name': {"$regex": groupProductCell['text'] + '.*'}}))
groupInfoByDueDate.extend([{'name': 'Total'}])
for groupCell in groupInfoByDueDate:
debtList = []
cur_bal = 0
if groupProductCell['value'] == 'SIBS':
count_acc = total_lnjc05
cur_bal = total_cur_bal_lnjc05
if groupProductCell['value'] == 'Card':
count_acc = total_list_acc
cur_bal = total_cur_bal_list_acc
no_overdue = count_acc
no_paid_acc_accumulayed = 0
no_overdue_amt = cur_bal
no_paid_acc_accumulayed_amt = 0
if(groupCell['name'] != 'Total'):
temp = {
'group' : debtGroupCell[0:1] + ' GROUP',
'month' : today.strftime("%b-%y"),
'due' : due[debtGroupCell[1:3]],
'product' : groupProductCell['value'],
'day' : dpWorkingdaysdaycol[key],
'day_code' : key,
'team_name' : groupCell['name'],
'team_id' : str(groupCell['_id']),
}
if todayTimeStamp < dueDayOfMonth['due_date_add_1']:
temp['due_date'] = dueDayLastMonth['due_date'] if dueDayLastMonth is not None else ''
# #Lay gia tri no vao ngay due date + 1#
# incidenceInfo = mongodb.getOne(MONGO_COLLECTION=common.getSubUser(subUserType, 'Due_date_next_date'), WHERE={'for_month': str(month - 1), 'team_id': str(groupCell['_id'])})
# #Lay gia tri no vao ngay due date + 1#
else:
# incidenceInfo = mongodb.getOne(MONGO_COLLECTION=common.getSubUser(subUserType, 'Due_date_next_date'), WHERE={'for_month': str(month), 'team_id': str(groupCell['_id'])})
temp['due_date'] = dueDayOfMonth['due_date']
if key == '2':
temp['index_' + todayIndex] = 0
if key == '6':
temp['index_' + todayIndex] = 0
if key == '1':
temp['index_' + todayIndex] = no_overdue
if key == '3':
temp['index_' + todayIndex] = no_paid_acc_accumulayed
if key == '5':
temp['index_' + todayIndex] = no_overdue_amt
if key == '7':
temp['index_' + todayIndex] = no_paid_acc_accumulayed_amt
# Không cho tính target trong này, phải lấy từ bảng đầu tháng
# if todayTimeStamp == dueDayOfMonth['due_date_add_1']:
# temp['target'] = target['target'],
# temp['target_acc'] = (no_overdue * int(temp['target'])) / 100
# temp['target_amt'] = (no_overdue_amt * int(temp['target'])) / 100
temp['start_acc'] = 0
temp['start_amt'] = 0
if key == '4':
# temp['col_ratio_acc'] = no_acc_end_date / no_overdue if no_overdue not in [None, 0] else 0
temp['index_' + todayIndex] = 0
if key == '8':
temp['index_' + todayIndex] = 0
if todayTimeStamp != dueDayOfMonth['due_date_add_1']:
yesterdayData = mongodb.getOne(MONGO_COLLECTION=common.getSubUser(subUserType, 'Daily_prod_working_day'), WHERE={'team_id': str(groupCell['_id']), 'day_code': key, 'updated_at': {'$gte': yesterday_starttime, '$lte': yesterday_endtime}})
no_acc_end_date = 0
no_acc_end_date_amt = 0
if yesterdayData is not None:
# pprint(temp)
no_acc_end_date = yesterdayData['no_overdue'] - no_overdue
no_acc_end_date_amt = yesterdayData['no_overdue_amt'] - no_overdue_amt
updateDataYesterday = {}
if key == '2':
updateDataYesterday['index_' + (todayIndex - 1)] = no_acc_end_date
if key == '6':
updateDataYesterday['index_' + (todayIndex - 1)] = no_acc_end_date_amt
updateDataYesterday['index_' + todayIndex] = temp['index_' + todayIndex]
updateDataYesterday['updated_at'] = time.time()
# pprint(temp)
mongodb.update(MONGO_COLLECTION=common.getSubUser(subUserType, 'Daily_prod_working_day'), WHERE={'team_id': str(groupCell['_id']), 'day_code': key, 'updated_at': {'$gte': yesterday_starttime, '$lte': yesterday_endtime}}, VALUE=updateDataYesterday)
else:
pprint(temp)
mongodb.insert(MONGO_COLLECTION=common.getSubUser(subUserType, 'Daily_prod_working_day'), insert_data=temp)
# checkYesterdayExist =
print('DONE')
except Exception as e:
# log.write(now.strftime("%d/%m/%Y, %H:%M:%S") + ': ' + str(e) + '\n')
pprint(str(e))
| 53.590909 | 318 | 0.537065 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,719 | 0.230326 |
1baabd5bd4355d53876aed7f693222e894bfb08a | 5,524 | py | Python | backend/hqlib/metric_source/issue_log/jira_action_list.py | ICTU/quality-report | f6234e112228ee7cfe6476c2d709fe244579bcfe | [
"Apache-2.0"
] | 25 | 2016-11-25T10:41:24.000Z | 2021-07-03T14:02:49.000Z | backend/hqlib/metric_source/issue_log/jira_action_list.py | ICTU/quality-report | f6234e112228ee7cfe6476c2d709fe244579bcfe | [
"Apache-2.0"
] | 783 | 2016-09-19T12:10:21.000Z | 2021-01-04T20:39:15.000Z | backend/hqlib/metric_source/issue_log/jira_action_list.py | ICTU/quality-report | f6234e112228ee7cfe6476c2d709fe244579bcfe | [
"Apache-2.0"
] | 15 | 2015-03-25T13:52:49.000Z | 2021-03-08T17:17:56.000Z | """
Copyright 2012-2019 Ministerie van Sociale Zaken en Werkgelegenheid
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
import datetime
from typing import List, Tuple
import dateutil.parser
from dateutil.relativedelta import relativedelta
from hqlib import utils
from hqlib.metric_source.abstract.issue_log import ActionLog
from hqlib.metric_source import JiraFilter
class JiraActionList(ActionLog):
""" Jira used as an action list """
metric_source_name = 'Jira Action List'
def __init__(self, url: str, username: str, password: str, field_name: str = 'duedate', *args, **kwargs) -> None:
self._fields_to_ignore = kwargs.pop('fields_to_ignore', [])
self.__url = url
self.__field_name = field_name
self.__jira_filter = JiraFilter(self.__url, username, password, self.__field_name)
super().__init__(*args, **kwargs)
@classmethod
def _is_str_date_before(cls, str_date: str, limit_date: datetime.datetime) -> bool:
return utils.parse_iso_datetime_local_naive(str_date) < limit_date
def _get_issues_older_than(self, *metric_source_ids: str,
limit_date: datetime.datetime) -> List[Tuple[str, str, str]]:
try:
extra_fields = ['updated', 'created'] + [list(field.keys())[0] for field in self._fields_to_ignore]
issues = self.__jira_filter.issues_with_field_exceeding_value(
*metric_source_ids,
extra_fields=extra_fields,
compare=self._is_str_date_before, limit_value=limit_date)
return [i for i in issues if not self.__should_issue_be_ignored(i)]
except IndexError as reason:
logging.error("Jira filter result for overdue issues inadequate. Reason: %s.", reason)
return None
def __should_issue_be_ignored(self, issue) -> bool:
for index, ignore in enumerate(self._fields_to_ignore):
if issue[index + 5] == list(ignore.values())[0]:
return True
return False
def ignored_lists(self) -> List[str]:
""" Return the ignored lists. """
return self._fields_to_ignore
def nr_of_over_due_actions(self, *metric_source_ids: str) -> int:
""" Return the number of over due actions. """
issue_list = self._get_issues_older_than(*metric_source_ids, limit_date=datetime.datetime.now())
return len(issue_list) if issue_list is not None else -1
def over_due_actions_url(self, *metric_source_ids: str) -> List[Tuple[str, str, str]]:
""" Return the urls to the over due actions. """
issue_list = self._get_issues_older_than(*metric_source_ids, limit_date=datetime.datetime.now())
return [(issue[0], issue[1], self.__get_formatted_time_delta(issue[2])) for issue in issue_list] \
if issue_list is not None else []
def nr_of_inactive_actions(self, *metric_source_ids: str) -> int:
""" Return the number of inactive actions. """
issue_list = self._get_issues_inactive_for(*metric_source_ids)
return len(issue_list) if issue_list is not None else -1
def inactive_actions_url(self, *metric_source_ids: str) -> List[Tuple[str, str, str]]:
""" Return the urls for the inactive actions. """
issue_list = self._get_issues_inactive_for(*metric_source_ids)
return [(issue[0], issue[1], self.__get_formatted_time_delta(issue[3])) for issue in issue_list] \
if issue_list is not None else []
def _get_issues_inactive_for(self, *metric_source_ids, delta: relativedelta = relativedelta(days=14)):
try:
overdue_issue_list = self._get_issues_older_than(*metric_source_ids, limit_date=datetime.datetime.now())
return [issue for issue in overdue_issue_list if issue[3]
is not None and utils.parse_iso_datetime_local_naive(issue[3]) <= (datetime.datetime.now() - delta)]
except IndexError as reason:
logging.error("Jira filter result for inactive issues inadequate. Reason: %s.", reason)
return None
@classmethod
def __get_formatted_time_delta(cls, date_to_parse) -> str:
return utils.format_timedelta(datetime.datetime.now().astimezone() - dateutil.parser.parse(date_to_parse))
def metric_source_urls(self, *metric_source_ids: str) -> List[str]:
""" Return the url(s) to the metric source for the metric source id. """
return self.__jira_filter.metric_source_urls(*metric_source_ids)
def datetime(self, *metric_source_ids: str) -> datetime.datetime: # pylint: disable=unused-argument,no-self-use
""" Return the date and time of the last measurement. """
issue_list = self._get_issues_older_than(*metric_source_ids, limit_date=datetime.datetime.now())
return max([max(utils.parse_iso_datetime_local_naive(issue[4]),
utils.parse_iso_datetime_local_naive(issue[3]) if issue[3] else datetime.datetime.min)
for issue in issue_list])
| 48.884956 | 120 | 0.696235 | 4,656 | 0.842867 | 0 | 0 | 365 | 0.066075 | 0 | 0 | 1,221 | 0.221035 |
1babb441d317176604ca786715349466258b3690 | 651 | py | Python | salt/urls.py | mentix02/Saltan | 9ab55be8b36ea6d8c73fcb87e8f936e9b9bb28c0 | [
"Apache-2.0"
] | 4 | 2017-04-01T15:33:39.000Z | 2017-09-27T11:25:36.000Z | salt/urls.py | mentix02/Saltan | 9ab55be8b36ea6d8c73fcb87e8f936e9b9bb28c0 | [
"Apache-2.0"
] | null | null | null | salt/urls.py | mentix02/Saltan | 9ab55be8b36ea6d8c73fcb87e8f936e9b9bb28c0 | [
"Apache-2.0"
] | null | null | null | from django.conf.urls import url, include
from . import views, salt
from django.contrib.auth.views import logout
app_name = 'salt'
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^visitor/', include('salt.visitor_urls')),
url(r'^profile/', include('salt.profile')),
url(r'^salts/$', salt.salts, name='salts'),
url(r'^add_salt/$', salt.AddSalt.as_view(), name='add_salt'),
url(r'^salt/(?P<salt_id>[0-9]+)/$', salt.SaltDetail.as_view(), name='detail'),
url(r'^salt/delete/(?P<pk>[0-9]+)/$', salt.delete_salt, name='delete_salt'),
url(r'^logout/$', logout, {'template_name': 'salt/index.html', 'next_page': '/'}, name='logout'),
]
| 38.294118 | 98 | 0.65745 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 266 | 0.408602 |
1baea6433c91aefdeee1385fa3f02122e8c0dac4 | 598 | py | Python | v2x_solution/car/admin.py | Michaelwwgo/V2X_Project | d26f476329dd7f6083e9275e01e2748d38918afc | [
"MIT"
] | 1 | 2021-02-03T08:15:59.000Z | 2021-02-03T08:15:59.000Z | v2x_solution/car/admin.py | Michaelwwgo/V2X_Project | d26f476329dd7f6083e9275e01e2748d38918afc | [
"MIT"
] | null | null | null | v2x_solution/car/admin.py | Michaelwwgo/V2X_Project | d26f476329dd7f6083e9275e01e2748d38918afc | [
"MIT"
] | null | null | null | from django.contrib import admin
from . import models
@admin.register(models.Car)
class CarAdmin(admin.ModelAdmin):
list_display = (
'number',
'isFind',
'created_at',
'updated_at',
)
@admin.register(models.CriminalCar)
class CriminalCarAdmin(admin.ModelAdmin):
list_display = (
'car',
'image',
'created_at',
'updated_at',
)
@admin.register(models.PostCar)
class PostCarAdmin(admin.ModelAdmin):
list_display = (
'name',
'number',
'owner',
'created_at',
'updated_at',
) | 19.933333 | 41 | 0.583612 | 443 | 0.740803 | 0 | 0 | 539 | 0.901338 | 0 | 0 | 121 | 0.202341 |
1bb0b59411150c5bdec442937feec3008a2498ad | 2,452 | py | Python | fate/filecommands.py | Mattias1/fate | 10266406336bc4c683ff5b23af32ac3447f7f054 | [
"MIT"
] | null | null | null | fate/filecommands.py | Mattias1/fate | 10266406336bc4c683ff5b23af32ac3447f7f054 | [
"MIT"
] | null | null | null | fate/filecommands.py | Mattias1/fate | 10266406336bc4c683ff5b23af32ac3447f7f054 | [
"MIT"
] | null | null | null | """
This module contains commands for opening, closing, saving and loading files.
"""
import logging
from . import commands
from .operation import Operation
from . import document
from .commandtools import Compose
from .selection import Selection, Interval
import selectors # Depend on selectors to be loaded
from .prompt import prompt
def save(doc, filename=None):
"""Save document text to file."""
filename = filename or doc.filename
if filename:
try:
with open(filename, 'w') as fd:
fd.write(doc.text)
except (FileNotFoundError, PermissionError) as e:
logging.error(str(e))
else:
doc.saved = True
doc.OnWrite.fire(doc)
else:
logging.error('No filename')
commands.save = save
def load(doc, filename=None):
"""Load document text from file."""
filename = filename or doc.filename
if filename:
try:
with open(filename, 'r') as fd:
newtext = fd.read()
except (FileNotFoundError, PermissionError) as e:
logging.error(str(e))
else:
commands.selectall(doc)
operation = Operation(doc, newcontent=[newtext])
operation(doc)
doc.selection = Selection(Interval(0, 0))
doc.saved = True
doc.OnRead.fire(doc)
else:
logging.error('No filename')
commands.load = load
def open_document(doc):
"""Open a new document."""
filename = doc.promptinput
document.Document(filename)
commands.open_document = Compose(prompt('Filename: '), open_document)
def quit_document(doc):
"""Close current document."""
def check_answer(doc):
answer = doc.promptinput
if answer == 'y':
doc.quit()
elif answer == 'n':
return
else:
quit_document(doc)
if not doc.saved:
ask_quit = prompt('Unsaved changes! Really quit? (y/n)')
ask_quit(doc, check_answer)
else:
doc.quit()
commands.quit_document = quit_document
def quit_all(doc=None):
"""Close all documents."""
docs = list(document.documentlist)
for doc in docs:
quit_document(doc)
commands.quit_all = quit_all
def force_quit(doc=None):
"""Quit all documents without warning if unsaved changes."""
docs = list(document.documentlist)
for doc in docs:
doc.quit()
commands.force_quit = force_quit
| 25.810526 | 77 | 0.619902 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 415 | 0.16925 |
1bb2e3f560ed306c26ad373fdafc6aea95fe218d | 5,897 | py | Python | metrics.py | TUDelftHao/TUDelftHao-4DLongitudinal-MRI-segmentation | c028e3c8b64812a05e39efa80699a327172c095d | [
"MIT"
] | 4 | 2020-07-28T06:03:43.000Z | 2021-09-10T09:12:10.000Z | metrics.py | TUDelftHao/TUDelftHao-4DLongitudinal-MRI-segmentation | c028e3c8b64812a05e39efa80699a327172c095d | [
"MIT"
] | null | null | null | metrics.py | TUDelftHao/TUDelftHao-4DLongitudinal-MRI-segmentation | c028e3c8b64812a05e39efa80699a327172c095d | [
"MIT"
] | 1 | 2021-09-10T09:12:21.000Z | 2021-09-10T09:12:21.000Z | import numpy as np
import torch
from loss import to_one_hot
from collections import OrderedDict
from scipy.ndimage import morphology
from medpy.metric.binary import hd, asd
def indiv_dice(im1, im2, tid):
im1 = im1 == tid
im2 = im2 == tid
im1=np.asarray(im1).astype(np.bool)
im2=np.asarray(im2).astype(np.bool)
if im1.shape != im2.shape:
raise ValueError("Shape mismatch: im1 and im2 must have the same shape.")
# Compute Dice coefficient
intersection = np.logical_and(im1, im2)
dsc=2. * intersection.sum() / ((im1.sum() + im2.sum()) + 1e-5)
return dsc
def dice(im1, im2, ignore_idx=None):
label_dict = OrderedDict()
label_dict['bg'] = 0
label_dict['csf'] = 0
label_dict['gm'] = 0
label_dict['wm'] = 0
label_dict['tm'] = 0
tot = 0
count = 0
if isinstance(ignore_idx, int):
ignore_idx = [ignore_idx]
for i, label in enumerate(label_dict):
if ignore_idx and i not in ignore_idx:
dsc = indiv_dice(im1, im2, i)
label_dict[label] += dsc
if i != 0:
tot += dsc
count += 1
elif not ignore_idx:
dsc = indiv_dice(im1, im2, i)
label_dict[label] += dsc
if i != 0:
tot += dsc
count += 1
label_dict['avg'] = tot / count
return label_dict
def dice_coe(output, target, eps=1e-5):
target = to_one_hot(target)
output = output.contiguous().view(output.shape[0], output.shape[1], -1)
target = target.contiguous().view(target.shape[0], target.shape[1], -1).type_as(output)
num = 2*torch.sum(output*target, dim=-1)
den = torch.sum(output + target, dim=-1) + eps
BG_dice_coe = torch.mean(num[:, 0]/den[:, 0]).numpy()
flair_dice_coe = torch.mean(num[:, 1]/den[:, 1]).numpy()
t1_dice_coe = torch.mean(num[:, 2]/den[:, 2]).numpy()
t1gd_dice_coe = torch.mean(num[:, 3]/den[:, 3]).numpy()
# if there is no tumor in patch, the dice score should be 1
if torch.sum(target[:, 4]) == 0 and torch.sum(output[:, 4]) < 1:
t2_dice_coe = np.ones(t1gd_dice_coe.shape)
else:
t2_dice_coe = torch.mean(num[:, 4]/den[:, 4]).numpy()
# average dice score only consider positive data
avg_dice_coe = (flair_dice_coe + t1_dice_coe + t1gd_dice_coe + t2_dice_coe)/4
dice_coe = {}
dice_coe['avg'] = avg_dice_coe
dice_coe['bg'] = BG_dice_coe
dice_coe['csf'] = flair_dice_coe
dice_coe['gm'] = t1_dice_coe
dice_coe['wm'] = t1gd_dice_coe
dice_coe['tm'] = t2_dice_coe
return dice_coe
def one_hot_numpy(array):
labels = np.unique(array)
size = np.array(array.shape)
one_hot_target = np.zeros(np.insert(size, 0, len(labels), axis=0))
for i in range(len(labels)):
channel = np.where(array == i, 1, 0)
one_hot_target[i, ...] = channel
return one_hot_target
def dice_coe_infernce(pred, target):
target = one_hot_numpy(target)
pred = np.reshape(pred, (pred.shape[0], -1))
target = np.reshape(target, (pred.shape[0], -1))
num = 2 * np.sum(pred * target, axis=-1)
den = np.sum(pred + target, axis=-1) + 1e-5
BG = num[0] / den[0]
CSF = num[1] / den[1]
GM = num[2] / den[2]
WM = num[3] / den[3]
if np.sum(target[4]) == 0 and np.sum(pred[4]) == 0:
TM = 1.
else:
TM = num[4] / den[4]
avg = (CSF + GM + WM + TM) / 4
score = {}
score['avg'] = np.round(avg, 3)
score['bg'] = np.round(BG, 3)
score['csf'] = np.round(CSF, 3)
score['gm'] = np.round(GM, 3)
score['wm'] = np.round(WM, 3)
score['tm'] = np.round(TM, 3)
return score
def Hausdorf(pred, gt, replace_NaN=100):
HD95_dict = {}
bg_hd = hd(pred==0, gt==0)
if 1 in np.unique(pred):
csf_hd = hd(pred==1, gt==1)
else:
csf_hd = replace_NaN
if 2 in np.unique(pred):
gm_hd = hd(pred==2, gt==2)
else:
gm_hd = replace_NaN
if 3 in np.unique(pred):
wm_hd = hd(pred==3, gt==3)
else:
wm_hd = replace_NaN
if 4 in np.unique(pred):
tm_hd = hd(pred==4, gt==4)
else:
tm_hd = replace_NaN
HD95_dict['avg'] = (csf_hd + gm_hd + wm_hd + tm_hd) / 4
HD95_dict['bg'] = bg_hd
HD95_dict['csf'] = csf_hd
HD95_dict['gm'] = gm_hd
HD95_dict['wm'] = wm_hd
HD95_dict['tm'] = tm_hd
return HD95_dict
def AverageSurfaceDist(pred, gt, replace_NaN=100):
ASD = {}
bg_asd = asd(pred==0, gt==0)
if 1 in np.unique(pred):
csf_asd = asd(pred==1, gt==1)
else:
csf_asd = replace_NaN
if 2 in np.unique(pred):
gm_asd = asd(pred==2, gt==2)
else:
gm_asd = replace_NaN
if 3 in np.unique(pred):
wm_asd = asd(pred==3, gt==3)
else:
wm_asd = replace_NaN
if 4 in np.unique(pred):
tm_asd = asd(pred==4, gt==4)
else:
tm_asd = replace_NaN
ASD['avg'] = (csf_asd + gm_asd + wm_asd + tm_asd) / 4
ASD['bg'] = bg_asd
ASD['csf'] = csf_asd
ASD['gm'] = gm_asd
ASD['wm'] = wm_asd
ASD['tm'] = tm_asd
return ASD
if __name__ == '__main__':
# yp = np.random.random(size=(2, 5, 3, 3, 3))
# yp = torch.from_numpy(yp)
# yt = np.zeros(shape=(2, 3, 3, 3))
# yt = yt + 1
# yt = torch.from_numpy(yt)
# coe = dice_coe(yp, yt)
# print(coe)
# print(end)
im1 = np.random.random((4,3,3,3))
im2 = np.random.random((2,3,3,3))
gt = np.argmax(im1, axis=0)
im = gt ==1
print(gt ==1)
# sds = surfd(im1[0], gt, sampling=[1,1,1], HD95=True)
# print(sds)
# dict = dice(im1, im2, ignore_idx=4)
# print(dict)
| 28.080952 | 92 | 0.545192 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 654 | 0.110904 |
1bb43f5bab8451713de15545ccf7d1fdb2033a55 | 8,770 | py | Python | apt_smart/backends/debian.py | walkonbothsides/apt-smart | 43c4838266356e98c80a274af181f6f63c34f5ad | [
"MIT"
] | null | null | null | apt_smart/backends/debian.py | walkonbothsides/apt-smart | 43c4838266356e98c80a274af181f6f63c34f5ad | [
"MIT"
] | null | null | null | apt_smart/backends/debian.py | walkonbothsides/apt-smart | 43c4838266356e98c80a274af181f6f63c34f5ad | [
"MIT"
] | null | null | null | # Automated, robust apt-get mirror selection for Debian and Ubuntu.
#
# Author: martin68 and Peter Odding
# Last Change: September 15, 2019
# URL: https://apt-smart.readthedocs.io
"""
Discovery of Debian package archive mirrors.
Here are references to some of the material that I've needed to consult while
working on this module:
- `Notes about sources.list on the Debian wiki <https://wiki.debian.org/SourcesList>`_
- `The Debian backports webpages <https://backports.debian.org/Instructions/>`_
- `Documentation about the "proposed-updates" mechanism <https://www.debian.org/releases/proposed-updates.html>`_
"""
# Standard library modules.
import logging
import json
# External dependencies.
import six
from bs4 import BeautifulSoup
from humanfriendly import Timer, format, pluralize
# Modules included in our package.
from apt_smart import CandidateMirror, mirrors_are_equal
from apt_smart.http import fetch_url
LTS_ARCHITECTURES = ('i386', 'amd64', 'armel', 'armhf')
"""The names of the architectures supported by the Debian LTS team (a tuple of strings)."""
LTS_RELEASES = {
'jessie': 1593468000, # 2020-06-30
'stretch': 1656540000, # 2022-06-30
}
"""
A dictionary with `Debian LTS`_ releases and their EOL dates.
This is needed because distro-info-data_ doesn't contain information
about Debian LTS releases but nevertheless ``archive.debian.org``
doesn't adopt a release until the LTS status expires (this was
originally reported in `issue #5`_).
.. _Debian LTS: https://wiki.debian.org/LTS
.. _issue #5: https://github.com/xolox/python-apt-mirror-updater/issues/5
"""
MIRRORS_URL = 'https://www.debian.org/mirror/list'
"""The URL of the HTML page listing all primary Debian mirrors (a string)."""
SECURITY_URL = 'http://security.debian.org/'
"""The base URL of the Debian mirror with security updates (a string)."""
OLD_RELEASES_URL = 'http://archive.debian.org/debian-archive/debian/'
"""The URL where EOL (end of life) Debian releases are hosted (a string)."""
BASE_URL = 'http://ftp.debian.org/debian/dists/codename-updates/InRelease'
"""The URL where official repo treated as base are hosted (a string).
The InRelease file contains `Date:` which can be gotten as :attr:`.base_last_updated`
to determine which mirrors are up-to-date"""
DEFAULT_SUITES = 'release', 'security', 'updates'
"""A tuple of strings with the Debian suites that are enabled by default."""
VALID_COMPONENTS = 'main', 'contrib', 'non-free'
"""A tuple of strings with the names of the components available in the Debian package repositories."""
VALID_SUITES = 'release', 'security', 'updates', 'backports', 'proposed-updates'
"""A tuple of strings with the names of the suites available in the Debian package repositories."""
# Initialize a logger for this module.
logger = logging.getLogger(__name__)
def discover_mirrors():
"""
Discover available Debian mirrors by querying :data:`MIRRORS_URL`.
:returns: A set of :class:`.CandidateMirror` objects that have their
:attr:`~.CandidateMirror.mirror_url` property set.
:raises: If no mirrors are discovered an exception is raised.
An example run:
>>> from apt_smart.backends.debian import discover_mirrors
>>> from pprint import pprint
>>> pprint(discover_mirrors())
set([CandidateMirror(mirror_url='http://ftp.at.debian.org/debian/'),
CandidateMirror(mirror_url='http://ftp.au.debian.org/debian/'),
CandidateMirror(mirror_url='http://ftp.be.debian.org/debian/'),
CandidateMirror(mirror_url='http://ftp.bg.debian.org/debian/'),
CandidateMirror(mirror_url='http://ftp.br.debian.org/debian/'),
CandidateMirror(mirror_url='http://ftp.by.debian.org/debian/'),
CandidateMirror(mirror_url='http://ftp.ca.debian.org/debian/'),
CandidateMirror(mirror_url='http://ftp.ch.debian.org/debian/'),
CandidateMirror(mirror_url='http://ftp.cn.debian.org/debian/'),
CandidateMirror(mirror_url='http://ftp.cz.debian.org/debian/'),
...])
"""
timer = Timer()
logger.info("Discovering Debian mirrors at %s ..", MIRRORS_URL)
# Find which country the user is in to get mirrors in that country
try:
url = 'https://ipapi.co/json'
response = fetch_url(url, timeout=2)
# On py3 response is bytes and json.loads throws TypeError in py3.4 and 3.5,
# so decode it to str
if isinstance(response, six.binary_type):
response = response.decode('utf-8')
data = json.loads(response)
country = data['country_name']
logger.info("Found your location: %s by %s", country, url)
except Exception:
url = 'http://ip-api.com/json'
response = fetch_url(url, timeout=5)
if isinstance(response, six.binary_type):
response = response.decode('utf-8')
data = json.loads(response)
country = data['country']
logger.info("Found your location: %s by %s", country, url)
data = fetch_url(MIRRORS_URL, timeout=20, retry=True)
soup = BeautifulSoup(data, 'html.parser')
tables = soup.findAll('table')
flag = False # flag is True when find the row's text is that country
mirrors = set()
if not tables:
raise Exception("Failed to locate <table> element in Debian mirror page! (%s)" % MIRRORS_URL)
else:
for row in tables[1].findAll("tr"): # tables[1] organises mirrors by country.
if flag:
if not row.a: # End of mirrors located in that country
break
else:
mirrors.add(CandidateMirror(mirror_url=row.a['href']))
if row.get_text() == country:
flag = True
if len(mirrors) < 3: # Too few, add tables[0] which contains Primary Debian mirror sites all around the world.
mirrors.add(CandidateMirror(mirror_url=a['href']) for a in tables[0].findAll('a', href=True))
if not mirrors:
raise Exception("Failed to discover any Debian mirrors! (using %s)" % MIRRORS_URL)
logger.info("Discovered %s in %s.", pluralize(len(mirrors), "Debian mirror"), timer)
return mirrors
def generate_sources_list(mirror_url, codename,
suites=DEFAULT_SUITES,
components=VALID_COMPONENTS,
enable_sources=False):
"""
Generate the contents of ``/etc/apt/sources.list`` for a Debian system.
:param mirror_url: The base URL of the mirror (a string).
:param codename: The codename of a Debian release (a string like 'wheezy'
or 'jessie') or a Debian release class (a string like
'stable', 'testing', etc).
:param suites: An iterable of strings (defaults to
:data:`DEFAULT_SUITES`, refer to
:data:`VALID_SUITES` for details).
:param components: An iterable of strings (refer to
:data:`VALID_COMPONENTS` for details).
:param enable_sources: :data:`True` to include ``deb-src`` entries,
:data:`False` to omit them.
:returns: The suggested contents of ``/etc/apt/sources.list`` (a string).
"""
# Validate the suites.
invalid_suites = [s for s in suites if s not in VALID_SUITES]
if invalid_suites:
msg = "Invalid Debian suite(s) given! (%s)"
raise ValueError(msg % invalid_suites)
# Validate the components.
invalid_components = [c for c in components if c not in VALID_COMPONENTS]
if invalid_components:
msg = "Invalid Debian component(s) given! (%s)"
raise ValueError(msg % invalid_components)
# Generate the /etc/apt/sources.list file contents.
lines = []
directives = ('deb', 'deb-src') if enable_sources else ('deb',)
for suite in suites:
for directive in directives:
lines.append(format(
'{directive} {mirror} {suite} {components}', directive=directive,
mirror=(OLD_RELEASES_URL if mirrors_are_equal(mirror_url, OLD_RELEASES_URL)
else (SECURITY_URL if suite == 'security' else mirror_url)),
suite=(codename if suite == 'release' else (
('%s/updates' % codename if suite == 'security'
else codename + '-' + suite))),
components=' '.join(components),
))
return '\n'.join(lines)
def get_eol_date(updater):
"""
Override the EOL date for `Debian LTS`_ releases.
:param updater: The :class:`~apt_smart.AptMirrorUpdater` object.
:returns: The overridden EOL date (a number) or :data:`None`.
"""
if updater.architecture in LTS_ARCHITECTURES:
return LTS_RELEASES.get(updater.distribution_codename)
| 42.572816 | 115 | 0.662714 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,581 | 0.636374 |
1bb4d37b6f5910fd85a5d687f9ba95669ab512ff | 223 | py | Python | src/backend/tests/__init__.py | mrzzy/memento | a83db7dd769c949d9924f5ef29930d818b105ef4 | [
"MIT"
] | 1 | 2019-11-18T04:30:32.000Z | 2019-11-18T04:30:32.000Z | src/backend/tests/__init__.py | mrzzy/NP-Portfolio-2 | a83db7dd769c949d9924f5ef29930d818b105ef4 | [
"MIT"
] | 1 | 2021-03-10T06:04:20.000Z | 2021-03-10T06:04:20.000Z | src/backend/tests/__init__.py | mrzzy/NP-Portfolio-2 | a83db7dd769c949d9924f5ef29930d818b105ef4 | [
"MIT"
] | null | null | null | #
# Memento
# Backend
# Tests
#
from .models.identity import *
from .models.assignment import *
from .models.notification import *
from .ops.identity import *
from .ops.assignment import *
from .ops.notification import *
| 15.928571 | 34 | 0.744395 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 27 | 0.121076 |
1bb62a6e4a3e0347985f31deb3133042f6ca02e2 | 3,380 | py | Python | src/kgextractiontoolbox/entitylinking/classifier.py | HermannKroll/KGExtractionToolbox | c17a55dd1fa098f5033b7765ed0f80d3abb44cb7 | [
"MIT"
] | 6 | 2021-09-17T09:49:59.000Z | 2021-12-06T10:07:01.000Z | src/kgextractiontoolbox/entitylinking/classifier.py | HermannKroll/KGExtractionToolbox | c17a55dd1fa098f5033b7765ed0f80d3abb44cb7 | [
"MIT"
] | null | null | null | src/kgextractiontoolbox/entitylinking/classifier.py | HermannKroll/KGExtractionToolbox | c17a55dd1fa098f5033b7765ed0f80d3abb44cb7 | [
"MIT"
] | 1 | 2021-09-18T17:56:12.000Z | 2021-09-18T17:56:12.000Z | import re
from pathlib import Path
from typing import Union
from kgextractiontoolbox.document.document import TaggedDocument
class Classifier:
def __init__(self, classification, rule_path: Union[str, Path]):
self.rules = []
self.explanations = []
self.classification = classification
self.rules, self.rules_org_str = Classifier.read_ruleset(rule_path)
def classify_document(self, doc: TaggedDocument, consider_sections=False):
"""
Classify whether a document text content matches on of the classifier rules
:param doc: the document to classify
:param consider_sections: should sections be considered?
:return:
"""
matches = []
for content, offset in doc.iterate_over_text_elements(sections=consider_sections):
for idx, rule in enumerate(self.rules):
rule_match = []
for idx2, term in enumerate(rule):
# the rules are split by a ' '
rule_org_str = self.rules_org_str[idx][idx2]
term_match = term.search(content)
if not term_match:
break
else:
pos = term_match.regs[0]
pos = str((pos[0] + offset, pos[1] + offset))
rule_match.append(f"{rule_org_str}:{term_match.group(0)}{pos}")
# else will be executed if loop does not encounter a break
else:
matches.append(' AND '.join([rm for rm in rule_match]))
# Execute all rules - if a rule matches then add classification
if matches:
doc.classification[self.classification] = ';'.join([m for m in matches])
return doc
@staticmethod
def compile_entry_to_regex(term):
term = term.strip()
# replace the * operator
term = term.replace("*", "\\w*")
# add that the word must start with the term
term = term + "\\b"
# check if there is the w/1 operator for one arbitrary word
if 'w/' in term:
term_rule = term
for subterm in term.split(' '):
# replace w/1 by only one word
if subterm[0] == 'w' and subterm[1] == '/':
word_count = int(subterm.split('/')[1])
word_sequence = []
for i in range(0, word_count):
word_sequence.append(r'\w*')
word_sequence = ' '.join([w for w in word_sequence])
term_rule = term_rule.replace(subterm, word_sequence)
# set term now to the new rule
term = term_rule
return re.compile(term, re.IGNORECASE)
@staticmethod
def compile_line_to_regex(line: str):
return list([Classifier.compile_entry_to_regex(term) for term in line.split("AND")])
@staticmethod
def read_ruleset(filepath: Union[str, Path]):
ruleset = []
rule_org_str = []
with open(filepath, "r") as f:
for line in f:
rule_string = line.strip()
rule_org_str.append(rule_string.replace('AND ', '').split(' '))
terms = Classifier.compile_line_to_regex(rule_string)
ruleset.append(terms)
return ruleset, rule_org_str
| 41.219512 | 92 | 0.563609 | 3,251 | 0.961834 | 0 | 0 | 1,568 | 0.463905 | 0 | 0 | 676 | 0.2 |
1bb6d7bdf554c1d1734130831eaf1dc5e76294f2 | 3,169 | py | Python | ism_pkg/ism.py | endsley/stochastic_ISM | 6438eb17bd391e8698e989156acb9b786c8f7299 | [
"MIT"
] | 1 | 2021-11-04T06:30:22.000Z | 2021-11-04T06:30:22.000Z | ism_pkg/ism.py | endsley/stochastic_ISM | 6438eb17bd391e8698e989156acb9b786c8f7299 | [
"MIT"
] | null | null | null | ism_pkg/ism.py | endsley/stochastic_ISM | 6438eb17bd391e8698e989156acb9b786c8f7299 | [
"MIT"
] | 2 | 2021-11-04T06:30:24.000Z | 2022-02-07T02:04:14.000Z | #!/usr/bin/env python
from ism_pkg.kernels.gaussian import gaussian
from ism_pkg.optimizer.full_ism import full_ism
from ism_pkg.optimizer.stochastic_ism import stochastic_ism
from ism_pkg.tools.HSIC_IDS_optimizer import HSIC_IDS_optimizer
from ism_pkg.tools.terminal_print import *
from ism_pkg.tools.rff_layer import *
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import accuracy_score
from sklearn.svm import SVC
from sklearn.linear_model import SGDClassifier
from sklearn.kernel_approximation import RBFSampler
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import make_pipeline
class ism():
def __init__(self, stochastic=False, var_percentage=0.9, debug_mode=True, max_repeat=200, batch_size_per_class=10):
self.db = {}
self.db['stochastic'] = stochastic
self.db['batch_size_per_class'] = batch_size_per_class
self.db['max_repeat'] = max_repeat
self.db['var_percentage'] = var_percentage
self.db['debug_mode'] = debug_mode
self.db['convergence_method'] = 'use_eigen_values' # use_eigen_values is faster but gradient might not = 0 and use_W is slower but more accurate with gradient = 0
self.db['kernel'] = gaussian(self.db) # try : gaussian, polynomial, squared, linear
self.rff = None
if stochastic: self.db['optimizer'] = stochastic_ism(self.db)
else: self.db['optimizer'] = full_ism(self.db)
print('stochastic:%s, class batch size:%d'%(self.db['stochastic'], self.db['batch_size_per_class']))
def __del__(self):
pass
#del self.db['kernel']
#del self.db['optimizer']
#self.db.clear()
def fit(self, X, Y, use_RFF=False):
db = self.db
Optimizer = db['optimizer']
Optimizer.initialize(X,Y)
Optimizer.update_f(X, Y)
self.W = self.db['W']
self.σ = self.db['kernel'].σ
if use_RFF: self.rff = rff_layer(X@self.W, self.σ)
def predict(self, X, RFF_out=False):
if RFF_out:
return self.rff.apply_layer(X)
else:
return [X@self.W, self.σ]
def classify_fit(self, X, Y, σ, W=None, classifier_type='HSIC_optimizer', class_batch_size=200, n_components=600): #classifier_type='HSIC_optimizer', or 'svm'
print('\nClassifier: %s'%classifier_type)
if W is not None: X = X@W
self.classifier_type = classifier_type
γ = 1/(2*σ*2)
if classifier_type == 'svm':
svm = SVC(gamma=γ)
svm.fit(X,Y)
self.classifier = svm
if classifier_type == 'svm_stochastic':
self.ℱₓ = RBFSampler(gamma=γ, random_state=1, n_components=n_components)
Φx = self.ℱₓ.fit_transform(X)
clf = SGDClassifier(max_iter=5000, tol=1e-3, verbose=False)
clf.fit(Φx, Y)
self.classifier = clf
if classifier_type == 'HSIC_optimizer':
Ƕ = HSIC_IDS_optimizer(σ, self.db['stochastic'], class_batch_size)
Ƕ.fit(X, Y)
self.classifier = Ƕ
return self.classifier
def classify_predict(self, X, Y, W=None):
if W is not None: X = X@W
#if self.rff is not None: X = self.predict(X, RFF_out=True)
if self.classifier_type == 'svm_stochastic':
X = self.ℱₓ.fit_transform(X)
Cf = self.classifier
Ŷ = Cf.predict(X)
Ŷ = LabelEncoder().fit_transform(Ŷ)
Y = LabelEncoder().fit_transform(Y)
ᘔ = accuracy_score(Y, Ŷ)
return [Ŷ, ᘔ]
| 30.76699 | 164 | 0.724203 | 2,567 | 0.800936 | 0 | 0 | 0 | 0 | 0 | 0 | 675 | 0.210608 |
1bb742a4023e045284504065668d9a075bf310a7 | 3,806 | py | Python | ziggurat_foundations/ext/pyramid/sign_in.py | fmigneault/ziggurat_foundations | c4eaac1d3e0d1a5dc09e1a3450e6c6631b701c0d | [
"BSD-3-Clause"
] | null | null | null | ziggurat_foundations/ext/pyramid/sign_in.py | fmigneault/ziggurat_foundations | c4eaac1d3e0d1a5dc09e1a3450e6c6631b701c0d | [
"BSD-3-Clause"
] | null | null | null | ziggurat_foundations/ext/pyramid/sign_in.py | fmigneault/ziggurat_foundations | c4eaac1d3e0d1a5dc09e1a3450e6c6631b701c0d | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import importlib
import logging
import pyramid.security
from ziggurat_foundations.models.base import get_db_session
from ziggurat_foundations.models.services.user import UserService
CONFIG_KEY = "ziggurat_foundations"
log = logging.getLogger(__name__)
class ZigguratSignInSuccess(object):
def __contains__(self, other):
return True
def __init__(self, headers, came_from, user):
self.headers = headers
self.came_from = came_from
self.user = user
class ZigguratSignInBadAuth(object):
def __contains__(self, other):
return False
def __init__(self, headers, came_from):
self.headers = headers
self.came_from = came_from
class ZigguratSignOut(object):
def __contains__(self, other):
return True
def __init__(self, headers):
self.headers = headers
def includeme(config):
settings = config.registry.settings
sign_in_path = settings.get("%s.sign_in.sign_in_pattern" % CONFIG_KEY, "/sign_in")
sign_out_path = settings.get(
"%s.sign_in.sign_out_pattern" % CONFIG_KEY, "/sign_out"
)
session_provider_callable_config = settings.get(
"%s.session_provider_callable" % CONFIG_KEY
)
signin_came_from_key = settings.get(
"%s.sign_in.came_from_key" % CONFIG_KEY, "came_from"
)
signin_username_key = settings.get("%s.sign_in.username_key" % CONFIG_KEY, "login")
signin_password_key = settings.get(
"%s.sign_in.password_key" % CONFIG_KEY, "password"
)
if not session_provider_callable_config:
def session_provider_callable(request):
return get_db_session()
else:
parts = session_provider_callable_config.split(":")
_tmp = importlib.import_module(parts[0])
session_provider_callable = getattr(_tmp, parts[1])
endpoint = ZigguratSignInProvider(
settings=settings,
session_getter=session_provider_callable,
signin_came_from_key=signin_came_from_key,
signin_username_key=signin_username_key,
signin_password_key=signin_password_key,
)
config.add_route(
"ziggurat.routes.sign_in",
sign_in_path,
use_global_views=True,
factory=endpoint.sign_in,
)
config.add_route(
"ziggurat.routes.sign_out",
sign_out_path,
use_global_views=True,
factory=endpoint.sign_out,
)
class ZigguratSignInProvider(object):
def __init__(self, *args, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
def sign_in(self, request):
came_from = request.params.get(self.signin_came_from_key, "/")
db_session = self.session_getter(request)
user = UserService.by_user_name(
request.params.get(self.signin_username_key), db_session=db_session
)
if user is None:
# if no result, test to see if email exists
user = UserService.by_email(
request.params.get(self.signin_username_key), db_session=db_session
)
if user:
password = request.params.get(self.signin_password_key)
if UserService.check_password(user, password):
headers = pyramid.security.remember(request, user.id)
return ZigguratSignInSuccess(
headers=headers, came_from=came_from, user=user
)
headers = pyramid.security.forget(request)
return ZigguratSignInBadAuth(headers=headers, came_from=came_from)
def sign_out(self, request):
headers = pyramid.security.forget(request)
return ZigguratSignOut(headers=headers)
def session_getter(self, request):
raise NotImplementedError()
| 30.448 | 87 | 0.672097 | 1,919 | 0.504204 | 0 | 0 | 0 | 0 | 0 | 0 | 357 | 0.093799 |
1bba5c530c2518836d4b1289c7bb20d6e786e823 | 856 | py | Python | controlinverilog/time_delay.py | simoore/control-in-verilog | 9b00ff48c15c8c56458d1611eaa3fec6f4c94bdb | [
"MIT"
] | null | null | null | controlinverilog/time_delay.py | simoore/control-in-verilog | 9b00ff48c15c8c56458d1611eaa3fec6f4c94bdb | [
"MIT"
] | null | null | null | controlinverilog/time_delay.py | simoore/control-in-verilog | 9b00ff48c15c8c56458d1611eaa3fec6f4c94bdb | [
"MIT"
] | null | null | null | import jinja2
class TimeDelay(object):
def __init__(self, name, dw, aw):
self.aw = aw
self.dw = dw
context = {'name': name, 'dw': dw, 'aw': aw}
loader = jinja2.PackageLoader('controlinverilog', 'templates')
env = jinja2.Environment(loader=loader)
template = env.get_template('delay.v')
self.verilog = template.render(context)
def print_summary(self):
print('Delay formula (taps): <delay> - 1')
print('Delay formula (s): (<delay> - 1)/<fexe>')
print('Max delay (s): %d/<fexe>' % (2 ** self.aw))
print('Data word length: %d' % self.dw)
def print_verilog(self, filename=None):
if filename is None:
print(self.verilog)
else:
with open(filename, 'w') as text_file:
text_file.write(self.verilog)
| 26.75 | 70 | 0.566589 | 839 | 0.98014 | 0 | 0 | 0 | 0 | 0 | 0 | 179 | 0.209112 |
1bbcbec7b0ff29eb9f2ca1b3c44280f498408155 | 1,026 | py | Python | app/hacks.py | qaisjp/hackupc17f | 01e26a1a671535d80645854a240bb9d000c2db98 | [
"MIT"
] | null | null | null | app/hacks.py | qaisjp/hackupc17f | 01e26a1a671535d80645854a240bb9d000c2db98 | [
"MIT"
] | null | null | null | app/hacks.py | qaisjp/hackupc17f | 01e26a1a671535d80645854a240bb9d000c2db98 | [
"MIT"
] | null | null | null | from datetime import date
def get_routes(out_city, return_city, reach_time, return_time):
return [
# just return objects that SkyScanner gives you
{
# obj1
},
{
# obj2
},
{
# obj3
}
]
def get_hacks():
return [
{
"name": "HackSheffield",
"location": "Sheffield, South Yorkshire, UK",
"begins": date(2017,10,14),
"ends": date(2017,10,15),
"website": "https://website.org"
},
{
"name": "HackBudapest",
"location": "Budapest, Hungary",
"begins": date(2017, 10, 21),
"ends": date(2017, 10, 22),
"website": "https://website.org"
},
{
"name": "Do You Have The GUTS",
"location": "Glasgow, UK",
"begins": date(2017, 10, 27),
"ends": date(2017, 10, 29),
"website": "https://website.org"
}
]
| 21.829787 | 63 | 0.432749 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 360 | 0.350877 |
1bbccc04ca561c0de127406dc967af2ae1e1f9f5 | 7,845 | py | Python | codex-py/codex/config/prop.py | egustafson/home-sensors | 232b36fe6fa2a2e3bce1391a91dffa192f17b835 | [
"Apache-2.0"
] | null | null | null | codex-py/codex/config/prop.py | egustafson/home-sensors | 232b36fe6fa2a2e3bce1391a91dffa192f17b835 | [
"Apache-2.0"
] | null | null | null | codex-py/codex/config/prop.py | egustafson/home-sensors | 232b36fe6fa2a2e3bce1391a91dffa192f17b835 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
## Design notes & thoughts ##############################
##
## Goals:
## * Support nested dict and array objects.
## * Access elements & sub-trees via "obj['key.key.index']"
## * Access via JSON-Path
## - https://jsonpath-rw.readthedocs.io/en/latest/
## * Serializable to JSON
## * De-serializable from YAML & JSON
## * Support serialization for types:
## - ISO8601 timestamps -> Py datetime
## - UUID -> Py uuid.UUID
##
## Inspiration:
## * https://configtree.readthedocs.io/en/latest/
## * http://www.kr41.net/2015/06-15-about_configtree.html
##
##
from collections.abc import Mapping, MutableMapping
from collections.abc import Sequence, MutableSequence
## Note - there should be an IN-mutable base class, but the
## code using class 'Config' does not treat the Config as
## an inmutable object -- fix and replace class Config with
## ConfigDO, and then make it a 'Mapping' and derive a
## 'State' class from MutableMapping.
class PropPath:
def __init__(self, path):
#
# Validate path string, split and put in []
#
self.path = []
self.path[0] = path
def next():
return self.path[0]
def next_is_digit():
return isinstance(self.path[0], int)
def remainder():
if len(self.path) > 1:
return self.path[1:]
else:
return None
import re
from collections import Iterable
key_regex = r'([\w\-]+)'
idx_regex = r'((\[\d+\])|(\d+))'
sfx_regex = r'(\.'+key_regex+r')|(\.?'+idx_regex+r')'
k_regex = r'^(?P<key>'+key_regex+r')(?P<sk>('+sfx_regex+r')*)$'
kre = re.compile(k_regex)
i_regex = r'^(?P<key>'+idx_regex+r')(?P<sk>('+sfx_regex+r')*)$'
ire = re.compile(i_regex)
def splitkey(key):
if isinstance(key, int):
return (key, None)
# else
m = ire.fullmatch(key)
if m is not None:
k = m.group('key')
sk = m.group('sk').lstrip('.')
if k is None:
raise KeyError(key)
return (int(k.strip('[]')), sk)
# else
m = kre.fullmatch(key)
if m is not None:
k = m.group('key')
sk = m.group('sk').lstrip('.')
if k is None:
raise KeyError(key)
return (k, sk)
# else
raise KeyError("Invalid key, '{}'".format(key))
def mksubkeytype(subkey):
(k, sk) = splitkey(subkey)
if isinstance(k, int):
return PropList()
else:
return PropMap()
class PropMap(MutableMapping):
def __init__(self):
# print("PropMap.__init__")
self._data = dict()
def load(self, *args, **kwargs):
# print("PropMap.load()")
self._data = dict()
for a in args:
if isinstance(a, Mapping):
for (k,v) in a.items():
self.__setitem__(k,v)
elif not isinstance(a, str) and isinstance(a, Sequence):
for (k,v) in a:
self.__setitem__(k,v)
for (k,v) in kwargs:
self.__setitem__(k,v)
return self
def __getitem__(self, key):
# print("getting: {}".format(key))
(k, sk) = splitkey(key)
if sk:
#print("get recursing: k='{}', sk='{}'".format(k, sk))
return self._data[k][sk]
return self._data[k]
def __setitem__(self, key, value):
# print("PropMap._setitem: {} = {}".format(key, value))
(k, sk) = splitkey(key)
v = value
if isinstance(value, Mapping):
v = PropMap().load(value)
if not isinstance(value, str) and isinstance(value, Sequence):
v = PropList().load(value)
if sk:
if k not in self._data:
self._data[k] = mksubkeytype(sk)
#print("created sub-element")
self._data[k].__setitem__(sk,v)
else:
self._data[k] = v
def __delitem__(self, key):
(k, sk) = splitkey(key)
if sk:
del self._data[k][sk]
else:
del self._data[k]
def __iter__(self):
return iter(self._data)
def __len__(self):
return len(self._data)
def __repr__(self):
return dict.__repr__(dict(self))
def _flatten(self, prefix=''):
flat = dict()
for (k, v) in self._data.items():
key = k
if len(prefix) > 0:
key = ".".join((prefix, k))
if isinstance(v, PropMap):
flat.update( v._flatten(key) )
elif isinstance(v, PropList):
flat.update( v._flatten(key) )
else:
flat[key] = v
return flat
def as_properties(self):
return self._flatten()
def as_yaml(self):
#
# TODO
#
return ""
def as_json(self):
#
# TODO
#
return ""
def dump(self, prefix=''):
for (k, v) in self._data.items():
key = k
if len(prefix) > 0:
key = ".".join((prefix, k))
if isinstance(v, PropMap):
v.dump(key)
else:
print("{}: {}".format(key, v))
class PropList(MutableSequence):
def __init__(self):
# print("PropList.__init__")
self._data = list()
def load(self, *args):
# print("PropList.load()")
self._data = list()
for a in args:
v = a
if isinstance(a, Mapping):
v = PropMap().load(a)
self._data.append(v)
elif not isinstance(a, str) and isinstance(a, Sequence):
for ii in a:
self._append(ii)
return self
def __getitem__(self, key):
# print("getting: {}".format(key))
(k, sk) = splitkey(key)
if sk:
#print("getting recursing: k='{}', sk='{}'".format(k, sk))
return self._data[k][sk]
return self._data[k]
def __setitem__(self, key, value):
# print("setting: {}".format(key))
(k, sk) = splitkey(key)
if not isinstance(k, int):
raise KeyError("Key is not an int")
while len(self._data) < k+1:
self._data.append(None)
v = value
if isinstance(value, Mapping):
v = PropMap().load(value)
elif not isinstance(value, str) and isinstance(value, Sequence):
v = PropList().load(value)
if sk:
if k not in self._data:
self._data[k] = mksubkeytype(sk)
#print("created sub-element")
self._data[k].__setitem__(sk,v)
else:
self._data[k] = v
def insert(self, key, value):
(k, sk) = splitkey(key)
if not isinstance(k, int):
raise KeyError("Key is not an int")
if sk:
if k >= len(self._data):
k = len(self._data)
self._data.insert(k, None)
self.__setitem__("{}.{}".format(k,sk),v)
else:
self._data.insert(k, value)
def __delitem__(self, key):
(k, sk) = splitkey(key)
if sk:
del self._data[k][sk]
else:
del self._data[k]
def _append(self, value):
idx = len(self._data)
self._data.append(None)
self.__setitem__(idx, value)
def __len__(self):
return len(self._data)
def __repr__(self):
return list.__repr__(list(self))
def _flatten(self, prefix=''):
flat = dict()
ii = 0
for v in self._data:
key = "{}[{}]".format(prefix, ii)
ii += 1
if isinstance(v, PropMap):
flat.update( v._flatten(key) )
elif isinstance(v, PropList):
flat.update( v._flatten(key) )
else:
flat[key] = v
return flat
| 27.720848 | 72 | 0.511918 | 5,812 | 0.740854 | 0 | 0 | 0 | 0 | 0 | 0 | 1,652 | 0.21058 |
1bbdb51947871969a82c9e0bfe969e843adddd28 | 419 | py | Python | victim/victim.py | blendin/findpeer_poc | 4e57e4a1fe4291a6aa874c25efc0eec8d6680fa8 | [
"MIT"
] | 3 | 2017-02-27T18:01:30.000Z | 2017-04-11T01:59:55.000Z | victim/victim.py | blendin/findpeer_poc | 4e57e4a1fe4291a6aa874c25efc0eec8d6680fa8 | [
"MIT"
] | null | null | null | victim/victim.py | blendin/findpeer_poc | 4e57e4a1fe4291a6aa874c25efc0eec8d6680fa8 | [
"MIT"
] | 1 | 2021-12-09T15:31:39.000Z | 2021-12-09T15:31:39.000Z | from flask import Flask, request
from subprocess import Popen, PIPE
import shlex
import os
app = Flask(__name__)
@app.route('/', methods=['POST'])
def cmd():
cmd = request.form['cmd']
process = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True)
(output, error) = process.communicate()
exit_code = process.wait()
print(output)
print(error)
return output
app.run(host='0.0.0.0', port='8080')
| 22.052632 | 62 | 0.670644 | 0 | 0 | 0 | 0 | 265 | 0.632458 | 0 | 0 | 29 | 0.069212 |
1bbe66e627f5fbbe75b9f195c03efc8b24413bf8 | 489 | py | Python | solutions/2019/prob_15.py | PolPtoAmo/HPCodeWarsBCN | 8a98b1feb6d8b7d2d5b8b4dace3e02af9e6bb4e8 | [
"MIT"
] | 1 | 2021-02-27T09:46:06.000Z | 2021-02-27T09:46:06.000Z | solutions/2019/prob_15.py | PolPtoAmo/HPCodeWarsBCN | 8a98b1feb6d8b7d2d5b8b4dace3e02af9e6bb4e8 | [
"MIT"
] | null | null | null | solutions/2019/prob_15.py | PolPtoAmo/HPCodeWarsBCN | 8a98b1feb6d8b7d2d5b8b4dace3e02af9e6bb4e8 | [
"MIT"
] | 1 | 2021-02-27T12:03:33.000Z | 2021-02-27T12:03:33.000Z | entrada = input()
def fibonacci(n):
fib = [0, 1, 1]
if n > 0 and n <= 2:
return fib[1]
elif n == 0:
return fib[0]
else:
for x in range(3, n+1):
fib.append(0)
fib[x] = fib[x-1] + fib[x-2]
return fib[n]
numeros = entrada.split(' ')
numeros = list(map(int, numeros))
print(str(fibonacci(numeros[0])) + " " + str(fibonacci(numeros[1])) + " "
+ str(fibonacci(numeros[2])) + " " + str(fibonacci(numeros[3])))
| 19.56 | 73 | 0.503067 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 0.02454 |
1bbeef824c196490b52c030d0f2de427178e48b0 | 441 | py | Python | DomJudge/practica10/EsMonti.py | Camiloasc1/AlgorithmsUNAL | 1542b8f2c170f9b5a24638f05ae50fa2c85cfc7b | [
"MIT"
] | null | null | null | DomJudge/practica10/EsMonti.py | Camiloasc1/AlgorithmsUNAL | 1542b8f2c170f9b5a24638f05ae50fa2c85cfc7b | [
"MIT"
] | null | null | null | DomJudge/practica10/EsMonti.py | Camiloasc1/AlgorithmsUNAL | 1542b8f2c170f9b5a24638f05ae50fa2c85cfc7b | [
"MIT"
] | null | null | null | import sys
def isHeap(H):
for i in xrange(1, len(H)):
if H[parent(i)] > H[i]:
return False
else:
return True
def parent(i):
return (i - 1) / 2
def read():
while True:
if int(sys.stdin.readline()) == 0:
return
line = sys.stdin.readline()
hp = map(int, line.split())
if isHeap(hp):
print 'Yes'
else:
print 'No'
read()
| 16.961538 | 42 | 0.46712 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9 | 0.020408 |
1bc1d594734ef2d4bf40da4822dca6b856406167 | 8,465 | py | Python | turbustat/statistics/cramer/cramer.py | CFD-UTSA/Turbulence-stars | 354d02e38d15e3b0d1f751b43f430dbd3a14c250 | [
"MIT"
] | 42 | 2016-04-07T20:49:59.000Z | 2022-03-28T12:54:13.000Z | turbustat/statistics/cramer/cramer.py | CFD-UTSA/Turbulence-stars | 354d02e38d15e3b0d1f751b43f430dbd3a14c250 | [
"MIT"
] | 131 | 2015-03-05T21:42:27.000Z | 2021-07-22T14:59:04.000Z | turbustat/statistics/cramer/cramer.py | CFD-UTSA/Turbulence-stars | 354d02e38d15e3b0d1f751b43f430dbd3a14c250 | [
"MIT"
] | 21 | 2015-06-10T17:10:06.000Z | 2022-02-28T15:59:42.000Z | # Licensed under an MIT open source license - see LICENSE
from __future__ import print_function, absolute_import, division
import numpy as np
from sklearn.metrics.pairwise import pairwise_distances
from ..threeD_to_twoD import _format_data
from ...io import input_data, common_types, threed_types
class Cramer_Distance(object):
"""
Compute the Cramer distance between two data cubes. The data cubes
are flattened spatially to give 2D objects. We clip off empty channels
and keep only the top quartile in the remaining channels.
Parameters
----------
cube1 : %(dtypes)s
First cube to compare.
cube2 : %(dtypes)s
Second cube to compare.
noise_value1 : float, optional
Noise level in the first cube.
noise_value2 : float, optional
Noise level in the second cube.
data_format : str, optional
Method to arange cube into 2D. Only 'intensity' is currently
implemented.
"""
__doc__ %= {"dtypes": " or ".join(common_types + threed_types)}
def __init__(self, cube1, cube2, noise_value1=-np.inf,
noise_value2=-np.inf):
super(Cramer_Distance, self).__init__()
self.cube1 = input_data(cube1, no_header=True)
self.cube2 = input_data(cube2, no_header=True)
self.noise_value1 = noise_value1
self.noise_value2 = noise_value2
@property
def data_matrix1(self):
'''
2D representation of `cube1`. Each column contains the
brightest N pixels in a spectral channel, set in
`~Cramer_Distance.format_data`.
'''
return self._data_matrix1
@property
def data_matrix2(self):
'''
2D representation of `cube2`. Each column contains the
brightest N pixels in a spectral channel, set in
`~Cramer_Distance.format_data`.
'''
return self._data_matrix2
def format_data(self, data_format='intensity', seed=13024, normalize=True,
**kwargs):
'''
Rearrange data into a 2D object using the given format.
Parameters
----------
data_format : {'intensity', 'spectra'}, optional
The method to use to construct the data matrix. The default is
intensity, which picks the brightest values in each channel. The
other option is 'spectra', which will pick the N brightest spectra
to compare.
seed : int, optional
When the data are mismatched, the larger data set is randomly
sampled to match the size of the other.
normalize : bool, optional
Forces the data sets into the same interval, removing the
effect of different ranges of intensities (or whatever unit the
data traces).
kwargs : Passed to `~turbustat.statistics.threeD_to_twoD._format_data`.
'''
self._data_matrix1 = _format_data(self.cube1, data_format=data_format,
noise_lim=self.noise_value1,
normalize=normalize, **kwargs)
self._data_matrix2 = _format_data(self.cube2, data_format=data_format,
noise_lim=self.noise_value2,
normalize=normalize, **kwargs)
# Need to check if the same number of samples is taken
samps1 = self.data_matrix1.shape[1]
samps2 = self.data_matrix2.shape[1]
if samps1 != samps2:
# Set the seed due to the sampling
np.random.seed(seed)
if samps1 < samps2:
new_data = np.empty((self.data_matrix2.shape[0], samps1))
for i in range(self.data_matrix2.shape[0]):
new_data[i, :] = \
np.random.choice(self.data_matrix2[i, :], samps1,
replace=False)
self._data_matrix2 = new_data
else:
new_data = np.empty((self.data_matrix1.shape[0], samps2))
for i in range(self.data_matrix1.shape[0]):
new_data[i, :] = \
np.random.choice(self.data_matrix1[i, :], samps2,
replace=False)
self._data_matrix1 = new_data
def cramer_statistic(self, n_jobs=1):
'''
Applies the Cramer Statistic to the datasets.
Parameters
----------
n_jobs : int, optional
Sets the number of cores to use to calculate
pairwise distances. Default is 1.
'''
# Adjust what we call n,m based on the larger dimension.
# Then the looping below is valid.
if self.data_matrix1.shape[0] >= self.data_matrix2.shape[0]:
m = self.data_matrix1.shape[0]
n = self.data_matrix2.shape[0]
larger = self.data_matrix1
smaller = self.data_matrix2
else:
n = self.data_matrix1.shape[0]
m = self.data_matrix2.shape[0]
larger = self.data_matrix2
smaller = self.data_matrix1
pairdist11 = pairwise_distances(larger, metric="euclidean",
n_jobs=n_jobs)
pairdist22 = pairwise_distances(smaller, metric="euclidean",
n_jobs=n_jobs)
pairdist12 = pairwise_distances(larger, smaller,
metric="euclidean", n_jobs=n_jobs)
# Take sqrt of each
# We default to using the Cramer kernel in Baringhaus & Franz (2004)
# \phi(dist) = sqrt(dist) / 2.
# The normalization values below reflect this
pairdist11 = np.sqrt(pairdist11)
pairdist12 = np.sqrt(pairdist12)
pairdist22 = np.sqrt(pairdist22)
term1 = 0.0
term2 = 0.0
term3 = 0.0
for i in range(m):
for j in range(n):
term1 += pairdist12[i, j]
for ii in range(m):
term2 += pairdist11[i, ii]
if i < n:
for jj in range(n):
term3 += pairdist22[i, jj]
m, n = float(m), float(n)
term1 *= (1 / (m * n))
term2 *= (1 / (2 * m ** 2.))
term3 *= (1 / (2 * n ** 2.))
self._distance = (m * n / (m + n)) * (term1 - term2 - term3)
@property
def distance(self):
'''
Cramer distance between `cube1` and `cube2`.
'''
return self._distance
def distance_metric(self, verbose=False, normalize=True, n_jobs=1,
label1="1", label2="2", save_name=None):
'''
Run the Cramer statistic.
Parameters
----------
verbose : bool, optional
Enable plotting of the data matrices.
normalize : bool, optional
See `Cramer_Distance.format_data`.
n_jobs : int, optional
See `Cramer_Distance.cramer_statistic`.
label1 : str, optional
Object or region name for data1
label2 : str, optional
Object or region name for data2
save_name : str,optional
Save the figure when a file name is given.
'''
self.format_data(normalize=normalize)
self.cramer_statistic(n_jobs=n_jobs)
if verbose:
import matplotlib.pyplot as plt
all_max = max(self.data_matrix1.max(),
self.data_matrix2.max())
all_min = min(self.data_matrix1.min(),
self.data_matrix2.min())
plt.subplot(121)
plt.title(label1)
plt.imshow(self.data_matrix1.T, origin='lower',
vmin=all_min, vmax=all_max)
plt.yticks([])
plt.xticks([0, self.data_matrix1.shape[0]])
plt.xlabel("Channel")
plt.subplot(122)
plt.title(label2)
plt.imshow(self.data_matrix2.T, origin='lower',
vmin=all_min, vmax=all_max)
plt.colorbar()
plt.yticks([])
plt.xticks([0, self.data_matrix2.shape[0]])
plt.xlabel("Channel")
plt.tight_layout()
if save_name is not None:
plt.savefig(save_name)
plt.close()
else:
plt.show()
return self
| 34.133065 | 79 | 0.553574 | 8,163 | 0.964324 | 0 | 0 | 650 | 0.076787 | 0 | 0 | 3,231 | 0.381689 |
1bc4e6e243b1a851a5186fb66f7d5e2b2427c623 | 1,761 | py | Python | examples/simple_automon_node.py | hsivan/automon | 222b17651533bdb2abce7de36a80156ab7b9cc21 | [
"BSD-3-Clause"
] | 1 | 2022-02-25T17:50:32.000Z | 2022-02-25T17:50:32.000Z | examples/simple_automon_node.py | hsivan/automon | 222b17651533bdb2abce7de36a80156ab7b9cc21 | [
"BSD-3-Clause"
] | null | null | null | examples/simple_automon_node.py | hsivan/automon | 222b17651533bdb2abce7de36a80156ab7b9cc21 | [
"BSD-3-Clause"
] | 1 | 2022-03-12T08:12:37.000Z | 2022-03-12T08:12:37.000Z | import os
import logging
from timeit import default_timer as timer
import numpy as np
from automon import AutomonNode
from automon.zmq_socket_utils import init_client_socket
from function_def import func_inner_product
logging.getLogger('automon').setLevel(logging.INFO)
def time_to_wait_for_next_sample_milliseconds(start_time, num_received_samples):
return (num_received_samples - (timer() - start_time)) * 1000
NODE_IDX = int(os.getenv('NODE_IDX', '0')) # Change the node index for different nodes
node = AutomonNode(idx=NODE_IDX, func_to_monitor=func_inner_product, d=40)
# Open a client socket and connect to the server socket. Wait for 'start' message from the server.
client_socket = init_client_socket(NODE_IDX, host=os.getenv('HOST', '127.0.0.1'), port=6400)
# Wait for a message from the coordinator (local data requests or local constraint updates) and send the reply to the coordinator.
# Read new data samples every 1 second and update the node local vector. Report violations to the coordinator.
start = timer()
num_data_samples = 0
while True:
if time_to_wait_for_next_sample_milliseconds(start, num_data_samples) <= 0:
# Time to read the next data sample
data = np.random.normal(loc=1, scale=0.1, size=(40,))
message_violation = node.update_data(data)
if message_violation:
client_socket.send(message_violation)
num_data_samples += 1
event = client_socket.poll(timeout=time_to_wait_for_next_sample_milliseconds(start, num_data_samples))
if event != 0:
# Received a message from the coordinator before the timeout has reached
message = client_socket.recv()
reply = node.parse_message(message)
if reply:
client_socket.send(reply)
| 45.153846 | 130 | 0.752981 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 527 | 0.299262 |
1bc7b03933c7e93ee398c95e4200d80860ba30bb | 886 | py | Python | Labs/lab6/l6e4.py | felixchiasson/ITI1520 | 4208904bf7576433313524ebd1c1bdb9f49277f2 | [
"MIT"
] | null | null | null | Labs/lab6/l6e4.py | felixchiasson/ITI1520 | 4208904bf7576433313524ebd1c1bdb9f49277f2 | [
"MIT"
] | null | null | null | Labs/lab6/l6e4.py | felixchiasson/ITI1520 | 4208904bf7576433313524ebd1c1bdb9f49277f2 | [
"MIT"
] | null | null | null | #! /usr/bin/env python3
################################################################################
# File Name : l6e4.py
# Created By : Félix Chiasson (7138723)
# Creation Date : [2015-10-20 11:40]
# Last Modified : [2015-10-20 13:46]
# Description : Count with the count method
################################################################################
def compte(word, string):
compteur = 0
index = 0
while index < (len(word)-len(string)+1):
if string in word[index:index+len(string)]:
compteur += 1
index = index + 1
return compteur
def compteB(c, s):
return c.count(s)
word = input("Veuillez entrer le mot: ")
print(compte(word,'a'))
print(compte(word,'de la'))
print("Avec count()")
print(compteB(word,'a'))
print(compteB(word,'de la'))
| 29.533333 | 80 | 0.460497 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 498 | 0.561443 |
1bc7d3bbb363bf058656bb36d4bb63b2262d8270 | 2,187 | py | Python | bin/convertBackup.py | Kuruchy/kuruchy.github.io | d45336739f877c4b4bf00e9556c3b220802ec74b | [
"MIT"
] | null | null | null | bin/convertBackup.py | Kuruchy/kuruchy.github.io | d45336739f877c4b4bf00e9556c3b220802ec74b | [
"MIT"
] | null | null | null | bin/convertBackup.py | Kuruchy/kuruchy.github.io | d45336739f877c4b4bf00e9556c3b220802ec74b | [
"MIT"
] | null | null | null | import os
import re
import shutil
from datetime import date
customHeader = """
---
layout: post
title: {}
categories: {}
excerpt: {}
---
"""
def ModifiedMarkDownFile():
#Loop each file
blog = [filename for filename in os.listdir('notion-backup') if filename.startswith("Blog") and filename.endswith(".md")][0]
os.chdir('notion-backup/{}'.format(blog.replace('.md','')))
for file in os.listdir():
if file.endswith('.md'):
notionMarkDownFile = file
#Read Front
lines = []
with open(notionMarkDownFile, 'r') as f:
lines = f.readlines()
data = lines[4].split('|')
data = data[1:-1]
title = data[0]
title = title[1:-1]
categories = data[1]
categories = categories[1:-1]
excerpt = data[2]
excerpt = excerpt[1:-1]
date = data[3]
date = date[1:-1]
#New File Name
fileName = title.replace(' ', '_').lower()
newMarkdownFileName="{}-{}.md".format(date, fileName)
#Clean Header
imagesOrigen = notionMarkDownFile.replace('.md','')
notionMarkDownFolder = imagesOrigen.replace(' ', '%20')
newHeader = customHeader.format(title, categories, excerpt)
with open(notionMarkDownFile, 'w') as f:
f.write(newHeader)
for number, line in enumerate(lines[5:]):
if line.startswith('!['):
line = line.replace(notionMarkDownFolder, 'images')
f.write(line)
#Rename file
os.rename(notionMarkDownFile, newMarkdownFileName)
#Move Resouces
shutil.move(newMarkdownFileName, '../../_posts/{}'.format(newMarkdownFileName))
if os.path.isdir(imagesOrigen):
allImages = os.listdir(imagesOrigen)
for image in allImages:
shutil.move(imagesOrigen + '/' + image, '../../images/' + image)
#Remove md file
shutil.rmtree('../../notion-backup')
if __name__ == '__main__':
ModifiedMarkDownFile()
| 29.554054 | 128 | 0.537723 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 333 | 0.152263 |
1bc830b97a2a994105982aa14aea3d79e767f033 | 7,714 | py | Python | website/apps/monster/views.py | bopopescu/drawquest-web | 8d8f9149b6efeb65202809a5f8916386f58a1b3b | [
"BSD-3-Clause"
] | 61 | 2015-11-10T17:13:46.000Z | 2021-08-06T17:58:30.000Z | website/apps/monster/views.py | bopopescu/drawquest-web | 8d8f9149b6efeb65202809a5f8916386f58a1b3b | [
"BSD-3-Clause"
] | 13 | 2015-11-11T07:49:41.000Z | 2021-06-09T03:45:31.000Z | website/apps/monster/views.py | bopopescu/drawquest-web | 8d8f9149b6efeb65202809a5f8916386f58a1b3b | [
"BSD-3-Clause"
] | 18 | 2015-11-11T04:50:04.000Z | 2021-08-20T00:57:11.000Z | import random
from django.http import HttpResponseRedirect
from django.conf import settings
from apps.monster.util import public_api_method
from apps.monster.models import (MonsterPart, MonsterInvite, MonsterTileDetails, MONSTER_GROUP,
CompletedMonsterSet, mobile_details_from_queryset)
from canvas import fact, browse
from canvas.api_decorators import api_decorator
from canvas.browse import TileDetails
from canvas.cache_patterns import CachedCall
from canvas.metrics import Metrics
from canvas.models import Content, Comment, Category
from canvas.shortcuts import r2r_jinja
from canvas.util import base36encode
from canvas.view_guards import require_staff, require_user
from canvas.view_helpers import redirect_trailing, CommentViewData, tile_render_options
from configuration import Config
urlpatterns = []
api = api_decorator(urlpatterns)
def landing(request, **kwargs):
category = Category.get(name=MONSTER_GROUP)
sort = 'new'
kwargs['offset'] = request.GET.get('offset', 0)
show_pins = False
nav = browse.Navigation.load_json_or_404(
kwargs,
sort=sort,
category=category,
mobile=request.is_mobile,
replies_only=True,
public_only=True,
)
front_data = {
'tiles': browse.get_browse_tiles(request.user, nav),
'render_options': tile_render_options(sort, show_pins),
}
# Overrides the default nav category that gets set in a context processor.
request.nav_category = category
sort_types = []
if sort in ['active', 'new']:
sort_types.extend([
('active threads', '/x/%s/active' % category.name),
('new posts', '/x/%s/new' % category.name)
])
active_sort_url = '/x/%s/%s' % (category.name, sort)
nav_data = nav.dump_json()
front_data.update(locals())
front_data['nav_category'] = category.details()
front_data['DOMAIN'] = settings.DOMAIN
return r2r_jinja('monster/landing.html', front_data)
@require_user
def create(request):
ctx = {
'request': request,
'monster_group': MONSTER_GROUP,
'monster_content': Content.all_objects.get(id=Content.SMALL_DRAW_FROM_SCRATCH_PK).details(),
}
return r2r_jinja('monster/create.html', ctx)
def random(request):
part = MonsterPart.get_random_new_monster(request.user)
skip = 'skip' in request.GET
if part:
if skip:
Metrics.skip_monster.record(request, monster_id=part.id)
else:
Metrics.random_monster_complete.record(request, monster_id=part.id)
return HttpResponseRedirect('/monster/{0}/complete'.format(base36encode(part.id)))
else:
Metrics.no_more_monsters.record(request)
ctx = {'request':request}
return r2r_jinja('monster/nomore.html', ctx)
@redirect_trailing
def view(request, short_id, option=None):
from apps.monster.jinja_tags import monster_image_tile
view_data = CommentViewData(request, short_id)
main_comment = view_data.op_comment
replies = [Comment.details_by_id(cid) for cid in view_data.reply_ids]
has_replies = len(replies) > 0
complete_link = option and (option == 'complete')
if complete_link and request.user.is_anonymous():
fact.record('monster_start_flow', request, {'monster_id': short_id})
reply_id = None
if option:
try:
reply_id = int(option)
except ValueError:
pass
(
(main_comment,),
replies
) = CachedCall.many_multicall(
[main_comment],
replies,
)
replies = [reply for reply in replies if not reply.is_collapsed]
monster_part = MonsterPart.get_by_comment(main_comment)
main_comment_details = main_comment
main_comment = TileDetails(main_comment)
made_bottom = False
made_top = main_comment.comment.real_author == request.user.username
linked_monster_footer_image = ""
current_monster_index = 0
for i in range(len(replies)):
reply = replies[i]
if reply_id is not None and reply.id == int(reply_id):
current_monster_index = i
elif reply.real_author == request.user.username and reply_id is None:
current_monster_index = i
made_bottom = True
try:
if (has_replies
and replies[current_monster_index].reply_content
and replies[current_monster_index].reply_content.footer):
linked_monster_footer_image = replies[current_monster_index].reply_content.footer['name']
except (AttributeError, IndexError):
pass
made_part = made_top or made_bottom
if made_part:
CompletedMonsterSet(request.user).sadd(main_comment.comment.id)
can_make_bottom = (not made_part) and complete_link
can_invite = made_top
# incomplete monster without an invite link, send to monster index
if not has_replies and not complete_link and not can_invite:
return HttpResponseRedirect('/monster')
ctx = {
'can_invite': can_invite,
'can_make_bottom': can_make_bottom,
'current_monster_index': current_monster_index,
'domain': settings.DOMAIN,
'made_bottom': made_bottom,
'made_part': made_part,
'made_top': made_top,
'main_comment': main_comment,
'monster_content': main_comment.comment.reply_content,
'og_image_url': linked_monster_footer_image.replace("https", "http", 1),
'monster_group': MONSTER_GROUP,
'monster_name': main_comment.comment.title,
'replies': MonsterTileDetails.from_shared_op_details_with_viewer_stickers(request.user, main_comment_details, replies),
'request': request,
'short_id': main_comment.comment.short_id(),
'start_content': Content.all_objects.get(id=Content.SMALL_DRAW_FROM_SCRATCH_PK).details(),
}
return r2r_jinja('monster/view.html', ctx)
@public_api_method
@require_user
def api_browse_monsters(request, payload=None):
"""
accepts posted json in the following format
{'offset': 0, 'count': 9}
returns client sanitized comment details
"""
if not payload:
payload = {'offset':0, 'count':9}
offset = payload['offset']
count = 9
category = Category.get(name=MONSTER_GROUP)
sort = 'new'
nav = browse.Navigation.load_json_or_404(
payload,
sort=sort,
category=category,
mobile=request.is_mobile,
replies_only=True,
public_only=True,
offset=payload['offset'],
count=payload['count'],
)
data = {
'monsters': mobile_details_from_queryset(browse.get_front_comments(request.user, nav)),
}
return data
@public_api_method
@require_user
def api_monster_details(request, short_id, payload={}):
view_data = CommentViewData(request, short_id)
main_comment = view_data.op_comment
replies = [Comment.details_by_id(cid) for cid in view_data.reply_ids]
has_replies = len(replies) > 0
(
(main_comment,),
replies
) = CachedCall.many_multicall(
[main_comment],
replies,
)
treplies = []
made_bottom = False
for reply in replies:
cur = reply.to_client()
if reply.real_author == request.user.username:
cur['current_user_authored'] = made_bottom = True
treplies.append(cur)
ctx = {
'top': main_comment,
'bottoms': treplies,
'current_user_made_bottom': made_bottom,
'current_user_made_top': main_comment.real_author == request.user.username,
'start_content': Content.all_objects.get(id=Content.SMALL_DRAW_FROM_SCRATCH_PK).details(),
}
return ctx
| 31.876033 | 127 | 0.680062 | 0 | 0 | 0 | 0 | 5,156 | 0.668395 | 0 | 0 | 1,016 | 0.131709 |
1bc988441eda0d1f7534cd5783137f0074c065e7 | 802 | py | Python | samples/chapter1_projectile.py | carnieri/raytracer | 8b518c35ba6648c77d949bb60788e9aed771b1da | [
"BSD-3-Clause"
] | null | null | null | samples/chapter1_projectile.py | carnieri/raytracer | 8b518c35ba6648c77d949bb60788e9aed771b1da | [
"BSD-3-Clause"
] | null | null | null | samples/chapter1_projectile.py | carnieri/raytracer | 8b518c35ba6648c77d949bb60788e9aed771b1da | [
"BSD-3-Clause"
] | null | null | null | from dataclasses import dataclass
from raytracer.tuple import tuple, point, vector, magnitude, normalize, dot, cross
from raytracer.util import equal
@dataclass
class Projectile:
position: tuple # point
velocity: tuple # vector
@dataclass
class Environment:
gravity: tuple # vector
wind: tuple # vector
def tick(env, proj):
position = proj.position + proj.velocity
velocity = proj.velocity + env.gravity + env.wind
return Projectile(position, velocity)
# projectile starts one unit above the origin
p = Projectile(point(0, 1, 0), normalize(vector(1, 1, 0)))
# gravity -0.1 unit/tick, and wind is -0.01 unit/tick
e = Environment(vector(0, -0.1, 0), vector(-0.01, 0, 0))
i = 0
while p.position.y > 0:
p = tick(e, p)
print(f"iteration {i} {p}")
i += 1 | 25.0625 | 82 | 0.680798 | 150 | 0.187032 | 0 | 0 | 172 | 0.214464 | 0 | 0 | 149 | 0.185786 |