hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f0351c577a324e005d0a9c1acdf54fc0b4f28867 | 2,003 | py | Python | bin/varipack.py | angrydill/ItsyBitser | bf9689136748bef3d022aa7529b4529e610abbf7 | [
"MIT"
] | null | null | null | bin/varipack.py | angrydill/ItsyBitser | bf9689136748bef3d022aa7529b4529e610abbf7 | [
"MIT"
] | 1 | 2021-04-26T15:31:50.000Z | 2021-04-26T15:31:50.000Z | bin/varipack.py | angrydill/ItsyBitser | bf9689136748bef3d022aa7529b4529e610abbf7 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
""" Packs/unpacks Hextream content to/from the Varipacker format """
import sys
import argparse
from itsybitser import hextream, varipacker
def main():
""" Program entry point """
parser = argparse.ArgumentParser(
description="Packs/unpacks Hextream content to/from the Varipacker format"
)
commands = parser.add_mutually_exclusive_group(required=True)
commands.add_argument("-p", "--pack", action="store_true",
help="Pack Hextream content into Varipacker format")
commands.add_argument("-u", "--unpack", action="store_true",
help="Unpack Varipacker content into Hextream format")
parser.add_argument('infile', nargs='?', type=argparse.FileType('r', encoding="UTF-8"),
help="Name of file with content to be packed/unpacked",
default=sys.stdin)
parser.add_argument('outfile', nargs='?', type=argparse.FileType('w', encoding="UTF-8"),
help="Name of file in which to write packed/unpacked content",
default=sys.stdout)
parser.add_argument("-c", "--comment", type=str,
help="Prepend the output with specified comment string")
parser.add_argument("-n", "--omit-newline", action="store_true",
help="The ending newline character(s) will be omitted from the output")
args = parser.parse_args()
source_content = args.infile.read()
if args.pack:
binary_content = hextream.decode(source_content)
output_content = varipacker.encode(binary_content)
else:
binary_content = varipacker.decode(varipacker.distill(source_content))
output_content = hextream.encode(binary_content)
if args.comment:
args.outfile.write("# {}\n".format(args.comment))
args.outfile.write(output_content)
if not args.omit_newline:
args.outfile.write("\n")
if __name__ == "__main__":
main()
| 41.729167 | 95 | 0.642536 |
f035d65dd23cce88533c77e43cfdaf49e3f5a500 | 1,665 | py | Python | reobject/models/model.py | agusmakmun/reobject | a7689bbb37f021c6c8ea72d6984513adec0d8a17 | [
"Apache-2.0"
] | 92 | 2017-02-08T21:51:03.000Z | 2021-05-27T22:58:07.000Z | reobject/models/model.py | agusmakmun/reobject | a7689bbb37f021c6c8ea72d6984513adec0d8a17 | [
"Apache-2.0"
] | 15 | 2017-01-27T22:54:42.000Z | 2021-05-27T01:31:58.000Z | reobject/models/model.py | agusmakmun/reobject | a7689bbb37f021c6c8ea72d6984513adec0d8a17 | [
"Apache-2.0"
] | 7 | 2017-02-08T22:00:41.000Z | 2021-05-26T00:26:44.000Z | import attr
from reobject.models.manager import ManagerDescriptor, RelatedManagerDescriptor
from reobject.models.store import Store, ModelStoreMapping
| 24.850746 | 79 | 0.581381 |
f035dcaa83c43d11bb11c5eb08b76bef164f27d0 | 11,525 | py | Python | label.py | winstonwzhang/osumapper | e773b45650f8a013de48ff169a93ea1745c6f931 | [
"Apache-2.0"
] | null | null | null | label.py | winstonwzhang/osumapper | e773b45650f8a013de48ff169a93ea1745c6f931 | [
"Apache-2.0"
] | null | null | null | label.py | winstonwzhang/osumapper | e773b45650f8a013de48ff169a93ea1745c6f931 | [
"Apache-2.0"
] | null | null | null | import os
import re
import sys
import pdb
import numpy as np
from math import log, e
from scipy.signal import find_peaks
import word
from utils import *
# hit object subword integer representations from word.py
h_int = word.obj_str2int[word.HITCIRCLE]
e_int = word.obj_str2int[word.EMPTY]
sb_int = word.obj_str2int[word.SLIDER_BEGIN]
sc_int = word.obj_str2int[word.SLIDER_CENTER]
se_int = word.obj_str2int[word.SLIDER_END]
b_int = word.obj_str2int[word.BREAK]
SW_int = word.dir_str2int[word.SW]
SE_int = word.dir_str2int[word.SE]
NW_int = word.dir_str2int[word.NW]
NE_int = word.dir_str2int[word.NE]
W_int = word.dir_str2int[word.W]
E_int = word.dir_str2int[word.E]
N_int = word.dir_str2int[word.N]
crawl_int = word.vel_str2int[word.CRAWL]
slow_int = word.vel_str2int[word.SLOW]
med_int = word.vel_str2int[word.MED]
def loadModelPred(arr_file, sec_len):
'''
arr_file: path to numpy array predictions saved by model
sec_len: length of original song in seconds
'''
# process label arr (from model prediction)
label_arr = np.load(arr_file)
num_bins = len(label_arr)
# cropped audio length (from spectrogram calculations) in seconds
crop_sec = int(np.floor(sec_len*16000/512)*512)/16000
return label_arr, crop_sec
def label2Array(label_arr, tick_arr, time_bpm, wav_len):
'''
labels: array Nx1 with values [0,1] indicating probability of hit object
tick_arr: ticks in ms
time_bpm: list of lists, with each element list containing
[offset, bpm, meter] for each uninherited timing section
wav_len: length of cropped audio from spectrogram in seconds
'''
ticks = np.copy(tick_arr)
# set all objects greater than threshold to 3 (hitcircle) for now
labels = np.copy(label_arr)
thresh = 0.1
labels[labels > thresh] = h_int
labels[labels <= thresh] = e_int
labels = labels.astype(np.uint8)
objs = labels == h_int
N = len(labels)
bin_len = wav_len / (N-1) # length of each time bin in seconds
bin_in_sec = 1 / bin_len # number of bins in every second
# convert ticks (ms) to time bin indices
#tick_diff = np.diff(ticks)
# only keep tick idx with difference > bin length
#kept_ticks = np.where(tick_diff > round(bin_len*1000))[0]
#kept_ticks = ticks[kept_ticks]
# search for best model predictions for given timing ticks
choice = 1
if choice == 1:
best_shifts = getBestShifts(time_bpm, ticks, label_arr, bin_in_sec, N, wav_len)
for i, section in enumerate(time_bpm):
if i == len(time_bpm)-1:
rg = (section[0], wav_len*1000)
else:
rg = (section[0], time_bpm[i+1][0])
section_idx = np.bitwise_and(ticks >= rg[0], ticks < rg[1])
ticks[section_idx] = ticks[section_idx] + best_shifts[i]
tbi = getTickBins(ticks, bin_in_sec, N)
else:
tbi = getTickBins(ticks, bin_in_sec, N)
# if too many hit objects, increase threshold
while objs[tbi].sum() > len(objs)/4:
if thresh >= 0.95:
break
thresh += 0.05
labels[label_arr > thresh] = h_int
labels[label_arr <= thresh] = e_int
objs = labels == h_int
# if too few hit objects, decrease threshold
while objs[tbi].sum() < len(objs)/10:
if thresh <= 0.05:
break
thresh -= 0.05
labels[label_arr > thresh] = h_int
labels[label_arr <= thresh] = e_int
objs = labels == h_int
# get final hit objects for each tick
tick_obj = labels[tbi]
# initialize word array
word_arr = np.zeros((len(tick_obj), 3)).astype(np.uint8)
word_arr[:,0] = tick_obj.flatten()
word_arr[:,1] = E_int
word_arr[:,2] = slow_int
# now look for potential slider starts and ends
# two consecutive hitcircles: slider start and end
hits = np.copy(tick_obj).flatten()
# object index
h_idx = np.where(hits == h_int)[0]
# object mask
h_mask = np.zeros(h_idx.shape, dtype=bool)
# difference in ticks between each hit object
diff = np.diff(h_idx)
### JUMPS: find distribution of tick differences (exclude high tick diff)
diff_dist, _ = np.histogram(diff[diff<10], bins=np.arange(11))
# most common tick difference is assumed to be the
# base time difference between hitcircle jumps
# exclude 1 tick difference
jump_diff = np.argmax(diff_dist[2:])+2
# mask of all ticks with constant base tick diff
base_mask = diff == jump_diff
# jump sections have constant base tick diff for longer than 4 objects
jump_starts = []
jump_mask = np.copy(h_mask)
jump_areas = pattwhere_sequ([True,True,True], base_mask)
if jump_areas.any():
jump_idx_list = consecutive(jump_areas)
# store starting tick idx and length of every jump section (hit circles > 4)
for jump_idx in jump_idx_list:
sec_len = len(jump_idx)
# section length + 2 from the extension of [True,True,True] window
tup = (jump_idx[0], sec_len+2)
jump_starts.append(tup)
jump_mask[tup[0]:tup[0]+tup[1]+1] = True
# jumps should have changing direction (either SW or SE)
# jumps should have medium velocity
for jtup in jump_starts:
s_hidx = jtup[0]
s_hlen = jtup[1]
# first hitcircle in jump section won't change velocity
jump_idx = h_idx[s_hidx+1:s_hidx+s_hlen+1]
word_arr[jump_idx,1] = np.random.choice(np.array([W_int, E_int, SW_int, SE_int]))
word_arr[jump_idx,2] = med_int
# break up long jump sections with sliders
limit = np.random.randint(6,11)
if s_hlen > limit:
num_breaks = s_hlen // limit
ss_idx = np.arange(s_hidx+limit, s_hidx+s_hlen, limit)
for ss in ss_idx:
setSlider(tick_obj, word_arr, h_idx[ss], h_idx[ss+1])
### STREAMS: store starting tick idx and length of every stream (> 3 consec hitcircles)
stream_starts = []
stream_mask = np.copy(h_mask)
# find all occurrences of two consecutive hitcircles
twos = pattwhere_sequ([h_int, h_int], hits)
if twos.any():
# 2 consecutive twos = 3 hitcircles, 3 consec twos = 4 hitcircles, etc
twos_idx_list = consecutive(twos)
for twos_idx in twos_idx_list:
tup_num = len(twos_idx)
# >= 3 consec hitcircles (stream)
if tup_num > 1:
# store tuple (stream starting tick index, stream length in ticks)
tup = (twos_idx[0], tup_num+1)
stream_starts.append(tup)
stream_obj_mask = np.bitwise_and(h_idx >= tup[0], h_idx < tup[0]+tup[1])
stream_mask[stream_obj_mask] = True
# streams should have a constant direction (either NW or NE)
# streams should have 'c' velocity (unless spaced streams are wanted)
for stup in stream_starts:
sidx = stup[0]
slen = stup[1]
# first hitcircle in stream won't change direction or velocity
word_arr[sidx+1:sidx+slen,1] = np.random.choice(np.array([NW_int, N_int, NE_int]))
word_arr[sidx+1:sidx+slen,2] = np.random.choice(np.array([crawl_int, slow_int]))
### SLIDERS: hit objects not belonging to jump or stream sections
slider_mask = ~(jump_mask | stream_mask)
slider_idx = h_idx[slider_mask]
slider_diff = np.diff(slider_idx)
# gaps less than threshold (10 ticks) can be made sliders
obj_avail = np.where(slider_diff < 11)[0]
# use every other obj as slider start
slider_starts = slider_idx[obj_avail[::3]]
slider_ends = slider_idx[obj_avail[::3]+1]
# sliders should have a changing direction (NW, N, NE)
# slider centers should have 'c' velocity
# slider ends should have 'm' slider velocity
for ss, se in zip(slider_starts, slider_ends):
setSlider(tick_obj, word_arr, ss, se)
### remove lone objects
#circle_idx = tick_obj == 3
#for ci in circle_idx:
# print(ci)
# final update to word_arr
word_arr[:,0] = tick_obj.flatten()
# visualize word_arr
#import matplotlib.pyplot as plt
#plt.plot(word_arr[:,0])
#plt.plot(word_arr[:,1])
#plt.plot(word_arr[:,2])
#plt.show()
#pdb.set_trace()
return word_arr
def array2Label(tick_arr, arr, wav_len, num_bins):
'''
tick_arr: ticks in ms
word_arr: [num ticks] x 3 array with hitobject, direction, and velocity information
wav_len: length of cropped audio from spectrogram in seconds
num_bins: number of bins in spectrogram
'''
N = num_bins
bin_len = wav_len / (num_bins-1) # length of each time bin in seconds
bin_in_sec = 1 / bin_len # number of bins in every second
labels = np.zeros((N,3))
# convert ticks (ms) to time bin indices
tbi = np.floor((tick_arr/1000) * bin_in_sec).astype(np.int)
# shift four bins into the future due to spectrogram window being 4 hop lengths (2048/512)
tbi = tbi + 4
# hit object classes
labels[tbi,:] = arr
return labels | 35.791925 | 98 | 0.632278 |
f03610bfed11de35610fed3ea3d3b041e18742ab | 8,982 | py | Python | bbp/tests/AcceptTests.py | kevinmilner/bbp | d9ba291b123be4e85f76317ef23600a339b2354d | [
"Apache-2.0"
] | null | null | null | bbp/tests/AcceptTests.py | kevinmilner/bbp | d9ba291b123be4e85f76317ef23600a339b2354d | [
"Apache-2.0"
] | null | null | null | bbp/tests/AcceptTests.py | kevinmilner/bbp | d9ba291b123be4e85f76317ef23600a339b2354d | [
"Apache-2.0"
] | 1 | 2018-11-12T23:10:02.000Z | 2018-11-12T23:10:02.000Z | #!/usr/bin/env python
"""
Southern California Earthquake Center Broadband Platform
Copyright 2010-2017 Southern California Earthquake Center
These are acceptance tests for the broadband platforms
$Id: AcceptTests.py 1795 2017-02-09 16:23:34Z fsilva $
"""
from __future__ import division, print_function
# Import Python modules
import os
import new
import sys
import shutil
import optparse
import unittest
# Import Broadband modules
import bband_utils
import seqnum
import cmp_bbp
from install_cfg import InstallCfg
def find_tests(test, rerun):
"""
# This function searches for .xml files in the accept_inputs directory
"""
install = InstallCfg()
resume = True
accept_test_inputs = "accept_inputs"
accept_test_refs = "accept_refs"
input_dir = os.path.join(install.A_TEST_REF_DIR, accept_test_inputs)
if not os.path.exists(input_dir):
# These are expected to be in the dist
print("Acceptance test inputs dir %s does not exist, aborting" %
(input_dir))
sys.exit()
# Create list of test XML files
files = os.listdir(input_dir)
wfext = ".xml"
# First we find all the tests
test_files = []
for testfile in files:
if testfile.endswith(wfext):
# Don't add SDSU tests on Mac OS X
if sys.platform == 'darwin' and testfile.find("SDSU") >= 0:
if test is None or (test is not None and testfile.find(test) >= 0):
print("*** Mac OS X detected: skipping test %s." %
(testfile))
continue
if test is None:
test_files.append(testfile)
else:
if testfile.find(test) >= 0:
test_files.append(testfile)
resume_file = os.path.join(install.A_OUT_LOG_DIR, "resume.txt")
resume_list = ""
if rerun:
os.remove(resume_file)
# Check for already completed tests if not rerunning
if resume == True and rerun == False:
if os.path.exists(resume_file):
resume_fp = open(resume_file, 'r')
resume_list = resume_fp.read().splitlines()
completed_test_count = len(resume_list)
print("==> Completed Tests : %d" % (completed_test_count))
resume_fp.close()
if ((test is None) and
(completed_test_count >= len(test_files))):
print("All the acceptance tests have passed previously!")
proceed = raw_input("Would you like to re-run "
"all the acceptance tests? (y/n)")
if str.lower(proceed) == 'y':
os.remove(resume_file)
resume_list = ""
else:
sys.exit(0)
# Create unittest test case for each file
for xml_file in test_files:
# Skip test if we ran it already
if xml_file in resume_list:
print("==> Skipping %s" % (xml_file))
continue
file_base = xml_file[0:xml_file.find(wfext)]
# pieces = file_base.split('-')
# Adjust tolerance depending on test mode
tolerance = 0.03
#This defines a method that we're going to add to the
#BBPAcceptanceTests class. The keyword binding has to
#be done b/c Python is storing pointers to 'file' and 'file_base'
#so w/o the keywords, 'file' and 'file_base' in the function will
#point to the final values
# We create a method object which is an instance method for
# BBPAcceptanceTests which executes the code in
# testPermutation
method = new.instancemethod(permutation_test,
None, BBPAcceptanceTests)
# We give the method a new name in BBPAcceptanceTests
# which contains the xml file being run
setattr(BBPAcceptanceTests, "test_%s" % file_base, method)
if __name__ == '__main__':
# Parse options
parser = optparse.OptionParser()
parser.add_option("-t", "--test",
dest="test",
help="Execute specific test",
metavar="TEST")
parser.add_option("-r", "--rerun",
action="store_true",
dest="rerun",
help="Rerun tests already completed")
(options, args) = parser.parse_args()
if options.test is not None:
test = options.test
else:
test = None
if options.rerun is not None:
rerun = True
else:
rerun = False
find_tests(test, rerun)
suite = unittest.TestLoader().loadTestsFromTestCase(BBPAcceptanceTests)
print("==> Number of tests to run: %d" % suite.countTestCases())
unittest.TextTestRunner(verbosity=2).run(suite)
| 40.098214 | 83 | 0.554888 |
f038a863268c516819dbf950a745c99c6fc026b5 | 6,375 | py | Python | tutorials/03-advanced/image_captioning/model.py | xuwangyin/pytorch-tutorial | d6a29c19288c817432b3b101765596e037e01989 | [
"MIT"
] | 11 | 2017-08-20T18:12:34.000Z | 2020-03-18T18:03:16.000Z | tutorials/03-advanced/image_captioning/model.py | xuwangyin/pytorch-tutorial | d6a29c19288c817432b3b101765596e037e01989 | [
"MIT"
] | null | null | null | tutorials/03-advanced/image_captioning/model.py | xuwangyin/pytorch-tutorial | d6a29c19288c817432b3b101765596e037e01989 | [
"MIT"
] | 5 | 2017-08-10T05:15:37.000Z | 2021-12-01T08:23:30.000Z | import torch
import torch.nn as nn
import torchvision.models as models
from torch.nn.utils.rnn import pack_padded_sequence as pack
from torch.nn.utils.rnn import pad_packed_sequence as unpack
from torch.autograd import Variable
| 47.574627 | 132 | 0.674039 |
f03ab886461270d772569e4546b232254bbdaeb6 | 3,525 | py | Python | .ipynb_checkpoints/main2-checkpoint.py | jcus/python-challenge | 8e00b7ae932e970a98c419e5b49fc7a0dfc3eac5 | [
"RSA-MD"
] | null | null | null | .ipynb_checkpoints/main2-checkpoint.py | jcus/python-challenge | 8e00b7ae932e970a98c419e5b49fc7a0dfc3eac5 | [
"RSA-MD"
] | null | null | null | .ipynb_checkpoints/main2-checkpoint.py | jcus/python-challenge | 8e00b7ae932e970a98c419e5b49fc7a0dfc3eac5 | [
"RSA-MD"
] | null | null | null | {
"cells": [
{
"cell_type": "code",
"execution_count": null,
"id": "001887f2",
"metadata": {},
"outputs": [],
"source": [
"# import os modules to create path across operating system to load csv file\n",
"import os\n",
"# module for reading csv files\n",
"import csv"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "77c0f7d8",
"metadata": {},
"outputs": [],
"source": [
"# read csv data and load to budgetDB\n",
"csvpath = os.path.join(\"Resources\",\"budget_data.csv\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "b2da0e1e",
"metadata": {},
"outputs": [],
"source": [
"# creat a txt file to hold the analysis\n",
"outputfile = os.path.join(\"Analysis\",\"budget_analysis.txt\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f3c0fd89",
"metadata": {},
"outputs": [],
"source": [
"# set var and initialize to zero\n",
"totalMonths = 0 \n",
"totalBudget = 0"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "4f807576",
"metadata": {},
"outputs": [],
"source": [
"# set list to store all of the monthly changes\n",
"monthChange = [] \n",
"months = []"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "ad264653",
"metadata": {},
"outputs": [],
"source": [
"# use csvreader object to import the csv library with csvreader object\n",
"with open(csvpath, newline = \"\") as csvfile:\n",
"# # create a csv reader object\n",
" csvreader = csv.reader(csvfile, delimiter=\",\")\n",
" \n",
" # skip the first row since it has all of the column information\n",
" #next(csvreader)\n",
" \n",
"#header: date, profit/losses\n",
"print(csvreader)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "27fc81c1",
"metadata": {},
"outputs": [],
"source": [
"for p in csvreader:\n",
" print(\"date: \" + p[0])\n",
" print(\"profit: \" + p[1])"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "83749f03",
"metadata": {},
"outputs": [],
"source": [
"# read the header row\n",
"header = next(csvreader)\n",
"print(f\"csv header:{header}\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "3b441a20",
"metadata": {},
"outputs": [],
"source": [
"# move to the next row (first row)\n",
"firstRow = next(csvreader)\n",
"totalMonths = (len(f\"[csvfile.index(months)][csvfile]\"))"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "a815e200",
"metadata": {},
"outputs": [],
"source": [
"output = (\n",
" f\"Financial Anaylsis \\n\"\n",
" f\"------------------------- \\n\"\n",
" f\"Total Months: {totalMonths} \\n\")\n",
"print(output)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "6bf35c14",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.8"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
| 21.759259 | 84 | 0.508936 |
f03ac5d7659f10dfb37b2809abf402ff19dbec0f | 338 | py | Python | Algorithms/Problems/MaximumSubarray/tests/maximum_substring_naive_test.py | Nalhin/AlgorithmsAndDataStructures | 2d2c87d0572e107c993c3c8866b8beefd4d22082 | [
"MIT"
] | 1 | 2021-11-16T13:02:25.000Z | 2021-11-16T13:02:25.000Z | Algorithms/Problems/MaximumSubarray/tests/maximum_substring_naive_test.py | Nalhin/AlgorithmsAndDataStructures | 2d2c87d0572e107c993c3c8866b8beefd4d22082 | [
"MIT"
] | null | null | null | Algorithms/Problems/MaximumSubarray/tests/maximum_substring_naive_test.py | Nalhin/AlgorithmsAndDataStructures | 2d2c87d0572e107c993c3c8866b8beefd4d22082 | [
"MIT"
] | null | null | null | from Algorithms.Problems.MaximumSubarray.maximum_substring_naive import (
maximum_subarray_naive,
)
| 24.142857 | 73 | 0.683432 |
f03b44b7bd155b16cda8a428f739db53cb3a8257 | 138 | py | Python | helios/workflows/__init__.py | thiagosfs/helios-server | 1616f742c0d3ab8833aab4cfbcc45d9818c68716 | [
"Apache-2.0"
] | 525 | 2015-01-04T11:51:26.000Z | 2022-03-31T17:15:20.000Z | helios/workflows/__init__.py | thiagosfs/helios-server | 1616f742c0d3ab8833aab4cfbcc45d9818c68716 | [
"Apache-2.0"
] | 238 | 2015-01-02T17:50:37.000Z | 2022-02-09T16:39:49.000Z | helios/workflows/__init__.py | thiagosfs/helios-server | 1616f742c0d3ab8833aab4cfbcc45d9818c68716 | [
"Apache-2.0"
] | 238 | 2015-01-05T23:09:20.000Z | 2022-03-21T16:47:33.000Z | """
Helios Election Workflows
"""
from helios.datatypes import LDObjectContainer
| 13.8 | 46 | 0.76087 |
f03bb71c1a6622417a2b810cfaa5541f5b608f38 | 2,210 | py | Python | tests/test_searcher.py | dbvirus/searcher | f4c2036da0a822bc9e408dcda10462d2d4335f03 | [
"MIT"
] | null | null | null | tests/test_searcher.py | dbvirus/searcher | f4c2036da0a822bc9e408dcda10462d2d4335f03 | [
"MIT"
] | null | null | null | tests/test_searcher.py | dbvirus/searcher | f4c2036da0a822bc9e408dcda10462d2d4335f03 | [
"MIT"
] | null | null | null | """
Unit tests for the searcher module. Those tests mock the Entrez class
and do not make any sort of HTTP request.
"""
# pylint: disable=redefined-outer-name
import io
from pathlib import Path
from Bio import Entrez
from dbvirus_searcher import Searcher
def test_searcher_initialization(searcher):
"""
Tests a searcher initialization parameters
"""
assert isinstance(searcher, Searcher)
assert searcher.db == "sra"
new_searcher = Searcher("another@test.com", db="other_db")
assert new_searcher.db == "other_db"
def test_searcher_searches_sra(searcher: Searcher, mocker):
"""
Tests if the searcher, when supplied with a valid search string,
calls the correct Biopython's Entrez methods
"""
# We need to supply a return value to the esearch function.
# That return value must be a buffer.
mocker.patch("Bio.Entrez.esearch")
Entrez.esearch.return_value = io.StringIO("{}")
searcher.search('"Homo sapiens"[Organism]')
# pylint: disable=no-member
Entrez.esearch.assert_called_with(
"sra", '"Homo sapiens"[Organism]', retmax=10, retmode="json"
)
def test_searcher_configurer_entrez():
"""
In order for everything to work, the Searcher must set Entrez's e-mail and
API Key parameters
"""
Searcher(email="test@test.com", api_key="3141516")
assert Entrez.email == "test@test.com"
assert Entrez.api_key == "3141516"
def test_searcher_returns_dictionary(searcher: Searcher, mocker):
"""
The searcher must return a json formatted SRA resultset
"""
mocker.patch("Bio.Entrez.esearch")
Entrez.esearch.return_value = io.StringIO("{}")
result = searcher.search("Human", max_results=3)
assert isinstance(result, dict)
def test_fetch_result(searcher: Searcher, mocker):
"""
Given an Entrez UID, the searcher must acquire the related data
"""
mocker.patch("Bio.Entrez.efetch")
Entrez.efetch.return_value = open(
Path(__file__).parent.absolute().joinpath("sample_efetch_result.xml")
)
data = searcher.fetch("8801091")
# pylint: disable=no-member
Entrez.efetch.assert_called()
assert data
assert isinstance(data, dict)
| 27.283951 | 78 | 0.698643 |
f03d4c226a3b3aa190f45b9620b4a20bd1deafdc | 2,296 | py | Python | isso/tests/test_html.py | Nildeala/isso | 661f2a68813e6ba5c234c9b84f440681712cdcef | [
"MIT"
] | 1 | 2017-08-24T21:10:01.000Z | 2017-08-24T21:10:01.000Z | isso/tests/test_html.py | Nildeala/isso | 661f2a68813e6ba5c234c9b84f440681712cdcef | [
"MIT"
] | null | null | null | isso/tests/test_html.py | Nildeala/isso | 661f2a68813e6ba5c234c9b84f440681712cdcef | [
"MIT"
] | null | null | null |
try:
import unittest2 as unittest
except ImportError:
import unittest
from isso.core import Config
from isso.utils import html
| 37.639344 | 108 | 0.562718 |
f03f530e1dc98bcc4544ec79667fa0181fc768d4 | 373 | py | Python | tests/data_elements/test_other_double.py | GalBenZvi/dicom_parser | fc3e892ebf99c4e5d62cb5e7de7df341baf445fe | [
"MIT"
] | 11 | 2020-08-08T21:41:54.000Z | 2021-07-27T12:48:31.000Z | tests/data_elements/test_other_double.py | GalBenZvi/dicom_parser | fc3e892ebf99c4e5d62cb5e7de7df341baf445fe | [
"MIT"
] | 45 | 2020-03-03T14:32:16.000Z | 2021-07-30T16:42:17.000Z | tests/data_elements/test_other_double.py | GalBenZvi/dicom_parser | fc3e892ebf99c4e5d62cb5e7de7df341baf445fe | [
"MIT"
] | 6 | 2021-10-19T09:19:22.000Z | 2022-03-13T19:26:10.000Z | """
Definition of the :class:`OtherDoubleTestCase` class.
"""
from dicom_parser.data_elements.other_double import OtherDouble
from tests.test_data_element import DataElementTestCase
| 23.3125 | 65 | 0.766756 |
f03ff005925224be3fdb7edb21130977774e1f37 | 14,317 | py | Python | acme_compact.py | felixfontein/acme-compact | 922df35fc70e6f157a51d572a02c12fa34caaa35 | [
"MIT"
] | 5 | 2015-12-19T20:09:53.000Z | 2017-02-06T08:13:27.000Z | acme_compact.py | felixfontein/acme-compact | 922df35fc70e6f157a51d572a02c12fa34caaa35 | [
"MIT"
] | null | null | null | acme_compact.py | felixfontein/acme-compact | 922df35fc70e6f157a51d572a02c12fa34caaa35 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""Command line interface for the compact ACME library."""
import acme_lib
import argparse
import sys
import textwrap
if __name__ == "__main__":
try:
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent("""\
This script automates the process of getting a signed TLS certificate from
Let's Encrypt using the ACME protocol. It can both be run from the server
and from another machine (when splitting the process up in two steps).
The script needs to have access to your private account key, so PLEASE READ
THROUGH IT! It's only 265+569 lines (including docstrings), so it won't
take too long.
===Example Usage: Creating Letsencrypt account key, private key for certificate and CSR===
python acme_compact.py gen-account-key --account-key /path/to/account.key
python acme_compact.py gen-key --key /path/to/domain.key
python acme_compact.py gen-csr --key /path/to/domain.key --csr /path/to/domain.csr --domains example.com,www.example.com
===================
Note that the email address does not have to be specified.
Also note that by default, RSA keys are generated. If you want ECC keys,
please specify "--algorithm <alg>" with <alg> being "p-256" or "p-384".
===Example Usage: Creating certifiate from CSR on server===
python acme_compact.py get-certificate --account-key /path/to/account.key --email mail@example.com --csr /path/to/domain.csr --acme-dir /usr/share/nginx/html/.well-known/acme-challenge/ --cert /path/to/signed.crt 2>> /var/log/acme_compact.log
===================
===Example Usage: Creating certifiate from CSR from another machine===
python acme_compact.py get-certificate-part-1 --account-key /path/to/account.key --email mail@example.com --csr /path/to/domain.csr --statefile /path/to/state.json --acme-dir /tmp/acme-challenge/ 2>> /var/log/acme_compact.log
... copy files from /tmp/acme-challenge/ into /usr/share/nginx/html/.well-known/acme-challenge/ on the web server ...
python acme_compact.py get-certificate-part-2 --csr /path/to/domain.csr --statefile /path/to/state.json --cert /path/to/signed.crt 2>> /var/log/acme_compact.log
===================
===Example Usage: Combining signed certificate with intermediate certificate===
python acme_compact.py get-intermediate --cert /path/to/domain-intermediate.crt
cat /path/to/signed.crt /path/to/domain-intermediate.crt > /path/to/signed-with-intermediate.crt
===================
""")
)
commands = {
'gen-account-key': {
'help': 'Generates an account key.',
'requires': ["account_key"],
'optional': ["key_length", "algorithm"],
'command': _gen_account_key,
},
'gen-key': {
'help': 'Generates a certificate key.',
'requires': ["key"],
'optional': ["key_length", "algorithm"],
'command': _gen_cert_key,
},
'gen-csr': {
'help': 'Generates a certificate signing request (CSR). Under *nix, use /dev/stdin after --key to provide key via stdin.',
'requires': ["domains", "key", "csr"],
'optional': ["must_staple"],
'command': _gen_csr,
},
'print-csr': {
'help': 'Prints the given certificate signing request (CSR) in human-readable form.',
'requires': ["csr"],
'optional': [],
'command': _print_csr,
},
'get-root': {
'help': 'Retrieves the root certificate from the CA server and prints it to stdout (if --cert is not specified).',
'requires': [],
'optional': ["root_url", "cert"],
'command': _get_root,
},
'get-intermediate': {
'help': 'Retrieves the intermediate certificate from the CA server and prints it to stdout (if --cert is not specified).',
'requires': [],
'optional': ["intermediate_url", "cert"],
'command': _get_intermediate,
},
'get-certificate': {
'help': 'Given a CSR and an account key, retrieves a certificate and prints it to stdout (if --cert is not specified).',
'requires': ["account_key", "csr", "acme_dir"],
'optional': ["CA", "cert", "email"],
'command': _get_certificate,
},
'get-certificate-part-1': {
'help': 'Given a CSR and an account key, prepares retrieving a certificate. The generated challenge files must be manually uploaded to their respective positions.',
'requires': ["account_key", "csr", "acme_dir", "statefile"],
'optional': ["CA", "email"],
'command': _get_certificate_part1,
},
'get-certificate-part-2': {
'help': 'Assuming that get-certificate-part-1 ran through and the challenges were uploaded, retrieves a certificate and prints it to stdout (if --cert is not specified).',
'requires': ["csr", "statefile"],
'optional': ["cert"],
'command': _get_certificate_part2,
},
}
parser.add_argument("command", type=str, nargs='?', help="must be one of {0}".format(', '.join('"{0}"'.format(command) for command in sorted(commands.keys()))))
parser.add_argument("--account-key", required=False, help="path to your Let's Encrypt account private key")
parser.add_argument("--algorithm", required=False, default="rsa", help="the algorithm to use (rsa, ...)") # FIXME
parser.add_argument("--key-length", type=int, default=4096, required=False, help="key length for private keys")
parser.add_argument("--key", required=False, help="path to your certificate's private key")
parser.add_argument("--csr", required=False, help="path to your certificate signing request")
parser.add_argument("--acme-dir", required=False, help="path to the .well-known/acme-challenge/ directory")
parser.add_argument("--CA", required=False, default=None, help="CA to use (default: {0})".format(acme_lib.default_ca))
parser.add_argument("--use-staging-CA", required=False, default=False, action='store_true', help="Use Let's Encrypt staging CA")
parser.add_argument("--statefile", required=False, default=None, help="state file for two-part run")
parser.add_argument("-d", "--domains", required=False, default=None, help="a comma-separated list of domain names")
parser.add_argument("--cert", required=False, help="file name to store certificate into (otherwise it is printed on stdout)")
parser.add_argument("--email", required=False, help="email address (will be associated with account)")
parser.add_argument("--intermediate-url", required=False, default=acme_lib.default_intermediate_url, help="URL for the intermediate certificate (default: {0})".format(acme_lib.default_intermediate_url))
parser.add_argument("--root-url", required=False, default=acme_lib.default_root_url, help="URL for the root certificate (default: {0})".format(acme_lib.default_root_url))
parser.add_argument("--must-staple", required=False, default=False, action='store_true', help="request must staple extension for certificate")
args = parser.parse_args()
if args.command is None:
sys.stderr.write("Command must be one of {1}. More information on the available commands:\n\n".format(args.command, ', '.join('"{0}"'.format(command) for command in sorted(commands.keys()))))
for command in sorted(commands.keys()):
cmd = commands[command]
sys.stderr.write(' {0}:\n'.format(command))
sys.stderr.write('{0}\n'.format(textwrap.indent(cmd['help'], prefix=' ')))
if cmd['requires']:
sys.stderr.write(' Mandatory options: {0}\n'.format(', '.join(['--{0}'.format(opt.replace('_', '-')) for opt in cmd['requires']])))
if cmd['optional']:
sys.stderr.write(' Optional options: {0}\n'.format(', '.join(['--{0}'.format(opt.replace('_', '-')) for opt in cmd['optional']])))
sys.exit(-1)
elif args.command not in commands:
sys.stderr.write("Unknown command '{0}'! Command must be one of {1}.\n".format(args.command, ', '.join('"{0}"'.format(command) for command in sorted(commands.keys()))))
sys.exit(-1)
else:
cmd = commands[args.command]
accepted = set()
values = {}
if args.__dict__['use_staging_CA']:
if args.__dict__['CA'] is not None:
sys.stderr.write("Cannot specify both '--use-staging-CA' and provide '--CA'!\n")
sys.exit(-1)
args.__dict__['CA'] = acme_lib.staging_ca
for req in cmd['requires']:
accepted.add(req)
if args.__dict__[req] is None:
sys.stderr.write("Command '{0}' requires that option '{1}' is set!\n".format(args.command, req))
sys.exit(-1)
values[req] = args.__dict__[req]
for opt in cmd['optional']:
accepted.add(opt)
values[opt] = args.__dict__[opt]
for opt in args.__dict__:
if opt == 'command':
continue
if args.__dict__[opt] is not parser.get_default(opt):
if opt not in accepted:
sys.stderr.write("Warning: option '{0}' is ignored for this command.\n".format(opt))
if 'CA' in values and values['CA'] is None:
values['CA'] = acme_lib.default_ca
cmd['command'](**values)
except Exception as e:
sys.stderr.write("Error occured: {0}\n".format(str(e)))
sys.exit(-2)
| 53.823308 | 258 | 0.605155 |
f040ef34e6f7f28b762c5ac7fa85d111d72daca8 | 1,530 | py | Python | cdk/consoleme_ecs_service/nested_stacks/vpc_stack.py | avishayil/consoleme-ecs-service | 357f290c23fb74c6752961a4a4582e4cbab54e0a | [
"MIT"
] | 2 | 2021-06-19T04:28:43.000Z | 2021-06-19T06:12:25.000Z | cdk/consoleme_ecs_service/nested_stacks/vpc_stack.py | avishayil/consoleme-ecs-service | 357f290c23fb74c6752961a4a4582e4cbab54e0a | [
"MIT"
] | 10 | 2021-06-19T08:12:41.000Z | 2021-06-20T22:00:34.000Z | cdk/consoleme_ecs_service/nested_stacks/vpc_stack.py | avishayil/consoleme-ecs-service | 357f290c23fb74c6752961a4a4582e4cbab54e0a | [
"MIT"
] | null | null | null | """
VPC stack for running ConsoleMe on ECS
"""
import urllib.request
from aws_cdk import (
aws_ec2 as ec2,
core as cdk
)
| 25.081967 | 82 | 0.578431 |
f0425b1ddda33471bcd698350aad4a8f84b9b335 | 1,837 | py | Python | mnt/us/kapps/apps/gallery/gallery.py | PhilippMundhenk/kapps | eed07669d8554393bfbd40acd8d255475e90b88e | [
"MIT"
] | 1 | 2021-11-19T08:40:44.000Z | 2021-11-19T08:40:44.000Z | mnt/us/kapps/apps/gallery/gallery.py | PhilippMundhenk/kapps | eed07669d8554393bfbd40acd8d255475e90b88e | [
"MIT"
] | null | null | null | mnt/us/kapps/apps/gallery/gallery.py | PhilippMundhenk/kapps | eed07669d8554393bfbd40acd8d255475e90b88e | [
"MIT"
] | null | null | null | from core.kapp import Kapp
from core.httpResponse import HTTPResponse
from core.Kcommand import Kcommand
import uuid
import os
def register(appID, appPath, ctx):
print("register " + GalleryApp.name)
app = GalleryApp(appID, appPath, ctx)
app.subscribe(GetImage(), app.getImageCallback)
app.subscribe(ViewImage(), app.viewImageCallback)
return app
| 30.114754 | 138 | 0.619488 |
f043daf48c42d7f929f4d25cb52dddbc3fe2c981 | 2,356 | py | Python | adding/adding_task.py | tk-rusch/coRNN | afd81744d108a2d623761b635b4ba56770d9e05d | [
"MIT"
] | 24 | 2020-10-06T22:25:39.000Z | 2021-11-28T09:33:30.000Z | adding/adding_task.py | tk-rusch/coRNN | afd81744d108a2d623761b635b4ba56770d9e05d | [
"MIT"
] | 2 | 2020-12-02T16:44:10.000Z | 2021-08-20T11:59:49.000Z | adding/adding_task.py | tk-rusch/coRNN | afd81744d108a2d623761b635b4ba56770d9e05d | [
"MIT"
] | 5 | 2020-10-20T13:54:59.000Z | 2021-09-23T06:21:49.000Z | from torch import nn, optim
import torch
import model
import torch.nn.utils
import utils
import argparse
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
parser = argparse.ArgumentParser(description='training parameters')
parser.add_argument('--n_hid', type=int, default=128,
help='hidden size of recurrent net')
parser.add_argument('--T', type=int, default=100,
help='length of sequences')
parser.add_argument('--max_steps', type=int, default=60000,
help='max learning steps')
parser.add_argument('--log_interval', type=int, default=100,
help='log interval')
parser.add_argument('--batch', type=int, default=50,
help='batch size')
parser.add_argument('--batch_test', type=int, default=1000,
help='size of test set')
parser.add_argument('--lr', type=float, default=2e-2,
help='learning rate')
parser.add_argument('--dt',type=float, default=6e-2,
help='step size <dt> of the coRNN')
parser.add_argument('--gamma',type=float, default=66,
help='y controle parameter <gamma> of the coRNN')
parser.add_argument('--epsilon',type=float, default = 15,
help='z controle parameter <epsilon> of the coRNN')
args = parser.parse_args()
n_inp = 2
n_out = 1
model = model.coRNN(n_inp, args.n_hid, n_out, args.dt, args.gamma, args.epsilon).to(device)
objective = nn.MSELoss()
optimizer = optim.Adam(model.parameters(), lr=args.lr)
if __name__ == '__main__':
train()
| 31.837838 | 91 | 0.617997 |
f04422d2ac94f3225baf51c6adc62d54d1588ade | 4,904 | py | Python | notebooks/KJIO/kapi_io.py | Eclasik/kinetica-jupyterlab | c94b7e2e182e500e1c34ccef3af146a9c0a21bd6 | [
"Xnet",
"X11",
"CECILL-B"
] | 2 | 2019-11-24T23:49:20.000Z | 2021-09-19T23:05:01.000Z | notebooks/KJIO/kapi_io.py | Eclasik/kinetica-jupyterlab | c94b7e2e182e500e1c34ccef3af146a9c0a21bd6 | [
"Xnet",
"X11",
"CECILL-B"
] | null | null | null | notebooks/KJIO/kapi_io.py | Eclasik/kinetica-jupyterlab | c94b7e2e182e500e1c34ccef3af146a9c0a21bd6 | [
"Xnet",
"X11",
"CECILL-B"
] | 3 | 2019-11-24T23:49:39.000Z | 2021-02-10T17:49:16.000Z | # File: kapi_io.py
# Purpose: I/O of between dataframes and Kinetica with native API.
# Author: Chad Juliano
# Date: 07/20/2018
###############################################################################
import numpy as np
import pandas as pd
import gpudb
import sys
KDBC = gpudb.GPUdb(encoding='BINARY', host='127.0.0.1', port='9191')
KAPI_TYPE_MAP = { 'int64' : gpudb.GPUdbRecordColumn._ColumnType.LONG,
'int32' : gpudb.GPUdbRecordColumn._ColumnType.INT,
'int16' : gpudb.GPUdbRecordColumn._ColumnType.INT,
'float64' : gpudb.GPUdbRecordColumn._ColumnType.DOUBLE,
'float32' : gpudb.GPUdbRecordColumn._ColumnType.FLOAT,
'object' : gpudb.GPUdbRecordColumn._ColumnType.STRING }
def get_coldef(_col_name, _np_dtype, _col_props):
"""Convert a Numpy type to Kinetica type."""
if(str(_np_dtype) not in KAPI_TYPE_MAP):
raise Exception('Type not supported: {}'.format(_np_dtype))
_k_type = KAPI_TYPE_MAP[str(_np_dtype)]
_k_properties = []
if(_col_name in _col_props):
_k_properties = _col_props[_col_name]
if(_k_type == gpudb.GPUdbRecordColumn._ColumnType.STRING and len(_k_properties) == 0):
_k_properties.append(gpudb.GPUdbColumnProperty.CHAR16)
return gpudb.GPUdbRecordColumn(_col_name, _k_type, _k_properties)
def save_df(_df, _table_name, _schema, _kdbc=KDBC, _col_props={}, _is_replicated=False):
"""Save a Dataframe to a Kinetica table."""
# Should index be used to create a column?
_use_index = (_df.index.name != None)
# Construct the type to use for creating the table.
_result_type = []
if(_use_index):
_idx_type = get_coldef(_df.index.name, _df.index.dtype, _col_props)
_idx_type.column_properties.append('shard_key')
_result_type.append(_idx_type)
for _idx in range(_df.columns.size):
_col_name = _df.columns[_idx]
_dtype = _df.dtypes[_idx]
_result_type.append(get_coldef(_col_name, _dtype, _col_props))
print('Dropping table: <{}>'.format(_table_name))
_kdbc.clear_table(_table_name, options={ 'no_error_if_not_exists':'true' })
_print_replicated = ''
if(_is_replicated):
_print_replicated = 'replicated '
print('Creating {} table: <{}>'.format(_print_replicated, _table_name))
for _idx, _coldef in enumerate(_result_type):
print('Column {}: <{}> ({}) {}'.format(_idx, _coldef.name, _coldef.column_type, _coldef.column_properties))
#_is_replicated = 'false'
_type_obj = gpudb.GPUdbRecordType(columns=_result_type, label=_table_name)
_result_table = gpudb.GPUdbTable(db=_kdbc, _type=_type_obj, name=_table_name,
options={'collection_name': _schema,
'is_replicated': _is_replicated} )
# Convert to records so we can preserve the column dtypes
_insert_records = _df.to_records(index=_use_index)
# Call item() so the types are converted to python native types
_insert_rows = [ list(x.item()) for x in _insert_records ]
if(len(_insert_rows) > 0):
_result_table.insert_records(_insert_rows)
print('Inserted rows into <{}.{}>: {}'.format(_schema, _table_name, len(_insert_rows)))
def load_df(_input_table, _kdbc=KDBC):
"""Load a dataframe from a Kinetica table."""
_table = gpudb.GPUdbTable(_type=None, name=_input_table , db=_kdbc)
_type = _table.get_table_type()
_columns = [_col.name for _col in _type.columns]
#print('Getting records from <{}>'.format(_input_table), end='', flush=True)
sys.stdout.write('Getting {} records from <{}>'.format(_table.count, _input_table))
BATCH_SIZE=10000
_offset = 0
_table_df = pd.DataFrame()
while True:
_response = _kdbc.get_records(table_name=_input_table,
offset=_offset, limit=BATCH_SIZE)
check_response(_response)
_res_decoded = gpudb.GPUdbRecord.decode_binary_data(
_response['type_schema'],
_response['records_binary'])
# print something to show we are working
#print('.', end='', flush=True)
sys.stdout.write('.')
_offset += len(_res_decoded)
_table_df = _table_df.append(_res_decoded)
if _response['has_more_records'] == False:
break;
# reorder dataframe columns
_table_df = _table_df[_columns]
print('')
print('Records Retrieved: {}'.format(_table_df.shape))
return _table_df
| 35.79562 | 115 | 0.636419 |
f0466389a763d8464eb0947ea583db3f0c84a014 | 2,581 | py | Python | batch/batch/driver/k8s_cache.py | MariusDanner/hail | 5ca0305f8243b5888931b1afaa1fbfb617dee097 | [
"MIT"
] | 2 | 2020-12-15T21:20:24.000Z | 2020-12-21T19:46:26.000Z | batch/batch/driver/k8s_cache.py | MariusDanner/hail | 5ca0305f8243b5888931b1afaa1fbfb617dee097 | [
"MIT"
] | 3 | 2017-06-16T18:10:45.000Z | 2017-07-21T17:44:13.000Z | batch/batch/driver/k8s_cache.py | MariusDanner/hail | 5ca0305f8243b5888931b1afaa1fbfb617dee097 | [
"MIT"
] | 2 | 2020-07-28T18:55:19.000Z | 2020-10-19T16:43:03.000Z | import time
import asyncio
import sortedcontainers
from hailtop.utils import retry_transient_errors
| 31.864198 | 79 | 0.588532 |
f0468dc014eafb01e69ffc6248b5e5de49dc570a | 3,376 | py | Python | tools/nntool/interpreter/commands/imageformat.py | gemenerik/gap_sdk | afae64d239db6d73f79c90c2ca2c832b6361f109 | [
"Apache-2.0"
] | null | null | null | tools/nntool/interpreter/commands/imageformat.py | gemenerik/gap_sdk | afae64d239db6d73f79c90c2ca2c832b6361f109 | [
"Apache-2.0"
] | null | null | null | tools/nntool/interpreter/commands/imageformat.py | gemenerik/gap_sdk | afae64d239db6d73f79c90c2ca2c832b6361f109 | [
"Apache-2.0"
] | null | null | null | # Copyright (C) 2020 GreenWaves Technologies, SAS
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import argparse
from copy import deepcopy
from cmd2 import Cmd2ArgumentParser, with_argparser
from interpreter.nntool_shell_base import NNToolShellBase
from quantization.qtype import QType
from utils.node_id import NodeId
from graph.types import ImageFormatParameters, NNEdge, TransposeParameters
from graph.manipulations.formatter import insert_formatter, remove_formatter
| 43.844156 | 98 | 0.672393 |
f0476c6057fe6e189aeed8a5c7b88b67234d582d | 78 | py | Python | svae/__init__.py | APodolskiy/SentenceVAE | afe82504922de700810b24638f7df0dbf1d8fa11 | [
"MIT"
] | null | null | null | svae/__init__.py | APodolskiy/SentenceVAE | afe82504922de700810b24638f7df0dbf1d8fa11 | [
"MIT"
] | null | null | null | svae/__init__.py | APodolskiy/SentenceVAE | afe82504922de700810b24638f7df0dbf1d8fa11 | [
"MIT"
] | null | null | null | import torch.nn as nn
RNN_TYPES = {
'lstm': nn.LSTM,
'gru': nn.GRU
}
| 11.142857 | 21 | 0.564103 |
f04859b27ee91e595f5a5127a619b6f6d8f15b47 | 5,391 | py | Python | extract_embeddings.py | Artem531/opencv-face-recognition-with-YOLOv3 | 53a93711a079ea3739cab068aeaf5c684f6e53c4 | [
"MIT"
] | null | null | null | extract_embeddings.py | Artem531/opencv-face-recognition-with-YOLOv3 | 53a93711a079ea3739cab068aeaf5c684f6e53c4 | [
"MIT"
] | null | null | null | extract_embeddings.py | Artem531/opencv-face-recognition-with-YOLOv3 | 53a93711a079ea3739cab068aeaf5c684f6e53c4 | [
"MIT"
] | null | null | null | # USAGE
# python extract_embeddings.py --dataset dataset --embeddings output/embeddings.pickle \
# --detector face_detection_model --embedding-model openface_nn4.small2.v1.t7
# import the necessary packages
from imutils.face_utils import FaceAligner
from imutils import paths
import numpy as np
import argparse
import imutils
import pickle
import cv2
import os
import dlib
from PIL import Image
from yolo import YOLO, detect_video
from yolo3.utils import letterbox_image
from keras import backend as K
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--dataset", required=True,
help="path to input directory of faces + images")
ap.add_argument("-e", "--embeddings", required=True,
help="path to output serialized db of facial embeddings")
ap.add_argument("-m", "--embedding-model", required=True,
help="path to OpenCV's deep learning face embedding model")
ap.add_argument("-p", "--shape-predictor", required=True,
help="path to facial landmark predictor")
args = vars(ap.parse_args())
# load our serialized face detector from disk
print("[INFO] loading face detector...")
predictor = dlib.shape_predictor(args["shape_predictor"])
#detector = dlib.get_frontal_face_detector()
detector = YOLO()
# load our serialized face embedding model from disk
print("[INFO] loading face recognizer...")
embedder = cv2.dnn.readNetFromTorch(args["embedding_model"])
# grab the paths to the input images in our dataset
print("[INFO] quantifying faces...")
imagePaths = list(paths.list_images(args["dataset"]))
# initialize our lists of extracted facial embeddings and
# corresponding people names
knownEmbeddings = []
knownNames = []
# initialize the total number of faces processed
total = 0
# loop over the image paths
for (i, imagePath) in enumerate(imagePaths):
# extract the person name from the image path
print("[INFO] processing image {}/{}".format(i + 1,
len(imagePaths)))
name = imagePath.split(os.path.sep)[-2]
# load the image, resize it to have a width of 800 pixels (while
# maintaining the aspect ratio), and then grab the image
# dimensions
image = cv2.imread(imagePath)
image = imutils.resize(image, width=800)
#try to rise resolution
#gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
#blurred = cv2.GaussianBlur(gray, (5, 5), 0)
#image = blurred
#clahe = cv2.createCLAHE(clipLimit=4.0, tileGridSize=(8,8))
#image = clahe.apply(image)
#image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
(h, w) = image.shape[:2]
# we're making the assumption that each image has only ONE
# face, so find the bounding box with the largest probability
#align_faces
fa = FaceAligner(predictor, desiredFaceWidth=256)
#gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
#rects = detector(gray, 2)
rects = []
out_boxes, out_scores, out_classes = detect_image(detector, Image.fromarray(image))
for i, c in reversed(list(enumerate(out_classes))):
(x, y, x1, y1) = out_boxes[i]
w = abs(x - x1)
h = abs(y - y1)
startX = int(min(x1, x))
endX = startX + w
startY = int(min(y1, y))
endY = startY + h
left, right, bottom, top = startX, endX, endY, startY
rect = dlib.rectangle(int(top), int(left), int(bottom) , int(right))
rects.append(rect)
for rect in rects:
faceAligned = fa.align(image, gray, rect)
print(faceAligned)
cv2.imshow("Aligned", np.asarray(faceAligned))
cv2.waitKey(0)
face = faceAligned
(fH, fW) = face.shape[:2]
# ensure the face width and height are sufficiently large
if fW < 20 or fH < 20:
continue
# construct a blob for the face ROI, then pass the blob
# through our face embedding model to obtain the 128-d
# quantification of the face
faceBlob = cv2.dnn.blobFromImage(face, 1.0 / 255,
(96, 96), (0, 0, 0), swapRB=True, crop=False)
embedder.setInput(faceBlob)
vec = embedder.forward()
# add the name of the person + corresponding face
# embedding to their respective lists
knownNames.append(name)
knownEmbeddings.append(vec.flatten())
total += 1
# dump the facial embeddings + names to disk
print("[INFO] serializing {} encodings...".format(total))
data = {"embeddings": knownEmbeddings, "names": knownNames}
f = open(args["embeddings"], "wb")
f.write(pickle.dumps(data))
f.close() | 32.475904 | 88 | 0.708959 |
f04885c174b83de4f873553053e8d7a0c7d4a2dc | 5,628 | py | Python | alf/algorithms/diayn_algorithm.py | runjerry/alf | 7e83a29a3102ff04a6ce2c3105ae36f28b090e65 | [
"Apache-2.0"
] | 1 | 2021-03-22T10:53:55.000Z | 2021-03-22T10:53:55.000Z | alf/algorithms/diayn_algorithm.py | Haichao-Zhang/alf | 38a3621337a030f74bb3944d7695e7642e777e10 | [
"Apache-2.0"
] | null | null | null | alf/algorithms/diayn_algorithm.py | Haichao-Zhang/alf | 38a3621337a030f74bb3944d7695e7642e777e10 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2020 Horizon Robotics. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import namedtuple
import gin.tf
import tensorflow as tf
from tf_agents.networks.network import Network
import tf_agents.specs.tensor_spec as tensor_spec
from alf.algorithms.algorithm import Algorithm, AlgorithmStep, LossInfo
from alf.utils.normalizers import ScalarAdaptiveNormalizer
from alf.utils.encoding_network import EncodingNetwork
from alf.data_structures import StepType, ActionTimeStep
DIAYNInfo = namedtuple("DIAYNInfo", ["reward", "loss"])
| 38.813793 | 80 | 0.648543 |
f04907145e0f5329d91ce6e7245421c294f51891 | 16,781 | py | Python | src/genie/libs/parser/linux/route.py | balmasea/genieparser | d1e71a96dfb081e0a8591707b9d4872decd5d9d3 | [
"Apache-2.0"
] | 204 | 2018-06-27T00:55:27.000Z | 2022-03-06T21:12:18.000Z | src/genie/libs/parser/linux/route.py | balmasea/genieparser | d1e71a96dfb081e0a8591707b9d4872decd5d9d3 | [
"Apache-2.0"
] | 468 | 2018-06-19T00:33:18.000Z | 2022-03-31T23:23:35.000Z | src/genie/libs/parser/linux/route.py | balmasea/genieparser | d1e71a96dfb081e0a8591707b9d4872decd5d9d3 | [
"Apache-2.0"
] | 309 | 2019-01-16T20:21:07.000Z | 2022-03-30T12:56:41.000Z | """route.py
Linux parsers for the following commands:
* route
"""
# python
import re
# metaparser
from genie.metaparser import MetaParser
from genie.metaparser.util.schemaengine import Schema, Any, Optional
from netaddr import IPAddress, IPNetwork
# =======================================================
# Schema for 'route'
# =======================================================
# =======================================================
# Parser for 'route'
# =======================================================
# =======================================================
# Parser for 'netstat -rn'
# =======================================================
# =====================================================
# Parser for ip route show table all
# =====================================================
| 38.052154 | 100 | 0.362255 |
f04ab36ef3e94f8716214625f760733bb0b62c82 | 1,437 | py | Python | chapter-7/chassis/demo.py | wallacei/microservices-in-action-copy | f9840464a1f9ec40622989e9e5377742246244f3 | [
"MIT"
] | 115 | 2017-11-06T08:12:07.000Z | 2022-02-25T09:56:59.000Z | chapter-7/chassis/demo.py | wallacei/microservices-in-action-copy | f9840464a1f9ec40622989e9e5377742246244f3 | [
"MIT"
] | 12 | 2017-08-05T14:51:35.000Z | 2020-12-01T11:05:14.000Z | chapter-7/chassis/demo.py | wallacei/microservices-in-action-copy | f9840464a1f9ec40622989e9e5377742246244f3 | [
"MIT"
] | 82 | 2017-08-05T09:41:12.000Z | 2022-02-18T00:57:39.000Z | import json
import datetime
import requests
from nameko.web.handlers import http
from nameko.timer import timer
from statsd import StatsClient
from circuitbreaker import circuit
| 31.933333 | 85 | 0.701461 |
f04b25d10196843175ed158d8658c6dd85f4722b | 2,009 | py | Python | src/autodoc/python/rst/base/block_quote.py | LudditeLabs/autodoc-tool | b4ae7e3b61907e7e9c3a1b534fce055e5860ffab | [
"Apache-2.0"
] | null | null | null | src/autodoc/python/rst/base/block_quote.py | LudditeLabs/autodoc-tool | b4ae7e3b61907e7e9c3a1b534fce055e5860ffab | [
"Apache-2.0"
] | null | null | null | src/autodoc/python/rst/base/block_quote.py | LudditeLabs/autodoc-tool | b4ae7e3b61907e7e9c3a1b534fce055e5860ffab | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Luddite Labs Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Block Quotes
------------
Line blocks are groups of lines beginning with vertical bar ("|") prefixes.
Each vertical bar prefix indicates a new line, so line breaks are preserved.
Initial indents are also significant, resulting in a nested structure.
Inline markup is supported. Continuation lines are wrapped portions
of long lines; they begin with a space in place of the vertical bar.
The left edge of a continuation line must be indented, but need not be aligned
with the left edge of the text above it. A line block ends with a blank line.
Syntax diagram:
+------------------------------+
| (current level of |
| indentation) |
+------------------------------+
+---------------------------+
| block quote |
| (body elements)+ |
| |
| -- attribution text |
| (optional) |
+---------------------------+
http://docutils.sourceforge.net/docs/ref/rst/restructuredtext.html#block-quotes
"""
| 34.637931 | 79 | 0.621702 |
f04b2efa0372d0580af551921d46f98895a3f1a0 | 11,037 | py | Python | userdocker/subcommands/run.py | jsteffen/userdocker | eb3b6a2421ca392ec4485744244d913e51687040 | [
"MIT"
] | null | null | null | userdocker/subcommands/run.py | jsteffen/userdocker | eb3b6a2421ca392ec4485744244d913e51687040 | [
"MIT"
] | null | null | null | userdocker/subcommands/run.py | jsteffen/userdocker | eb3b6a2421ca392ec4485744244d913e51687040 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import argparse
import logging
import os
import re
from .. import __version__
from ..config import ALLOWED_IMAGE_REGEXPS
from ..config import ALLOWED_PORT_MAPPINGS
from ..config import CAPS_ADD
from ..config import CAPS_DROP
from ..config import ENV_VARS
from ..config import ENV_VARS_EXT
from ..config import NV_ALLOW_OWN_GPU_REUSE
from ..config import NV_ALLOWED_GPUS
from ..config import NV_DEFAULT_GPU_COUNT_RESERVATION
from ..config import NV_MAX_GPU_COUNT_RESERVATION
from ..config import PROBE_USED_MOUNTS
from ..config import RUN_PULL
from ..config import USER_IN_CONTAINER
from ..config import VOLUME_MOUNTS_ALWAYS
from ..config import VOLUME_MOUNTS_AVAILABLE
from ..config import VOLUME_MOUNTS_DEFAULT
from ..config import gid
from ..config import gids
from ..config import uid
from ..config import user_name
from ..helpers.cmd import init_cmd
from ..helpers.exceptions import UserDockerException
from ..helpers.execute import exec_cmd
from ..helpers.execute import exit_exec_cmd
from ..helpers.logger import logger
from ..helpers.nvidia import nvidia_get_available_gpus
from ..helpers.parser import init_subcommand_parser
| 33.144144 | 80 | 0.581317 |
f04ba688518b293c3f58f28b313a6e8e7fd63f49 | 214 | py | Python | URI 1017.py | Azefalo/Cluble-de-Programacao-UTFPR | f4a457bae36ac61378766035abc0633f5b3492db | [
"MIT"
] | 1 | 2021-04-19T22:42:00.000Z | 2021-04-19T22:42:00.000Z | URI 1017.py | Azefalo/Cluble-de-Programacao-UTFPR | f4a457bae36ac61378766035abc0633f5b3492db | [
"MIT"
] | null | null | null | URI 1017.py | Azefalo/Cluble-de-Programacao-UTFPR | f4a457bae36ac61378766035abc0633f5b3492db | [
"MIT"
] | null | null | null | # https://www.beecrowd.com.br/judge/en/problems/view/1017
car_efficiency = 12 # Km/L
time = int(input())
average_speed = int(input())
liters = (time * average_speed) / car_efficiency
print(f"{liters:.3f}") | 26.75 | 58 | 0.686916 |
f04ce64c795e8f616352eaaa159edec4673a3240 | 1,432 | py | Python | src/cpp/convert.py | shindavid/splendor | b51b0408967627dbd61f60f57031d1fe21aa9d8f | [
"MIT"
] | 1 | 2017-11-02T18:32:51.000Z | 2017-11-02T18:32:51.000Z | src/cpp/convert.py | shindavid/splendor | b51b0408967627dbd61f60f57031d1fe21aa9d8f | [
"MIT"
] | 1 | 2018-07-05T09:07:40.000Z | 2018-07-05T09:07:40.000Z | src/cpp/convert.py | shindavid/splendor | b51b0408967627dbd61f60f57031d1fe21aa9d8f | [
"MIT"
] | null | null | null | filename = '../py/cards.py'
f = open(filename)
color_map = {
'W' : 'eWhite',
'U' : 'eBlue',
'G' : 'eGreen',
'R' : 'eRed',
'B' : 'eBlack',
'J' : 'eGold',
}
color_index_map = {
'W' : 0,
'U' : 1,
'G' : 2,
'R' : 3,
'B' : 4
}
ID = 0
first = True
for line in f:
if line.count('_add_card'):
if first:
first = False
continue
lp = line.find('(')
rp = line.find(')')
lb = line.find('{')
rb = line.find('}')
cost_str = line[lb+1:rb]
tokens = line[lp+1:rp].split(',')
level = int(tokens[0].strip()) - 1
points = int(tokens[1].strip())
color = color_map[tokens[2].strip()]
print ' {%2d, {%s}, %s, %s, %s},' % (ID, convert(cost_str), points, level, color)
ID += 1
ID = 0
f = open(filename)
first = True
for line in f:
if line.count('_add_noble'):
if first:
first = False
continue
lp = line.find('(')
rp = line.find(')')
lb = line.find('{')
rb = line.find('}')
cost_str = line[lb+1:rb]
print ' {%s, 3, {%s}},' % (ID, convert(cost_str))
ID += 1
| 21.058824 | 88 | 0.513966 |
f04d3917118eeab7daa5965475644fcb12277751 | 230 | py | Python | code_examples/projections/cyl.py | ezcitron/BasemapTutorial | 0db9248b430d39518bdfdb25d713145be4eb966a | [
"CC0-1.0"
] | 99 | 2015-01-14T21:20:48.000Z | 2022-01-25T10:38:37.000Z | code_examples/projections/cyl.py | ezcitron/BasemapTutorial | 0db9248b430d39518bdfdb25d713145be4eb966a | [
"CC0-1.0"
] | 1 | 2017-08-31T07:02:20.000Z | 2017-08-31T07:02:20.000Z | code_examples/projections/cyl.py | ezcitron/BasemapTutorial | 0db9248b430d39518bdfdb25d713145be4eb966a | [
"CC0-1.0"
] | 68 | 2015-01-14T21:21:01.000Z | 2022-01-29T14:53:38.000Z | from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
map = Basemap(projection='cyl')
map.drawmapboundary(fill_color='aqua')
map.fillcontinents(color='coral',lake_color='aqua')
map.drawcoastlines()
plt.show() | 23 | 51 | 0.795652 |
f04d3f1d74f6c269738f89d87766211a982c25ba | 3,926 | py | Python | applications/ShapeOptimizationApplication/python_scripts/analyzer_internal.py | AndreaVoltan/MyKratos7.0 | e977752722e8ef1b606f25618c4bf8fd04c434cc | [
"BSD-4-Clause"
] | 2 | 2020-04-30T19:13:08.000Z | 2021-04-14T19:40:47.000Z | applications/ShapeOptimizationApplication/python_scripts/analyzer_internal.py | AndreaVoltan/MyKratos7.0 | e977752722e8ef1b606f25618c4bf8fd04c434cc | [
"BSD-4-Clause"
] | 1 | 2020-04-30T19:19:09.000Z | 2020-05-02T14:22:36.000Z | applications/ShapeOptimizationApplication/python_scripts/analyzer_internal.py | AndreaVoltan/MyKratos7.0 | e977752722e8ef1b606f25618c4bf8fd04c434cc | [
"BSD-4-Clause"
] | 1 | 2020-06-12T08:51:24.000Z | 2020-06-12T08:51:24.000Z | # ==============================================================================
# KratosShapeOptimizationApplication
#
# License: BSD License
# license: ShapeOptimizationApplication/license.txt
#
# Main authors: Baumgaertner Daniel, https://github.com/dbaumgaertner
# Geiser Armin, https://github.com/armingeiser
#
# ==============================================================================
# Making KratosMultiphysics backward compatible with python 2.6 and 2.7
from __future__ import print_function, absolute_import, division
# Kratos Core and Apps
from KratosMultiphysics import *
from KratosMultiphysics.StructuralMechanicsApplication import *
# Additional imports
import response_function_factory
import time as timer
# ==============================================================================
# ==============================================================================
| 50.333333 | 156 | 0.609272 |
f04e299a6b487778e8fe610c813dd85847139172 | 529 | py | Python | tests/frontend/detector/test_fast.py | swershrimpy/gtsfm | 8d301eb3ef9172345a1ac1369fd4e19764d28946 | [
"Apache-2.0"
] | 122 | 2021-02-07T23:01:58.000Z | 2022-03-30T13:10:35.000Z | tests/frontend/detector/test_fast.py | swershrimpy/gtsfm | 8d301eb3ef9172345a1ac1369fd4e19764d28946 | [
"Apache-2.0"
] | 273 | 2021-01-30T16:45:26.000Z | 2022-03-16T15:02:33.000Z | tests/frontend/detector/test_fast.py | swershrimpy/gtsfm | 8d301eb3ef9172345a1ac1369fd4e19764d28946 | [
"Apache-2.0"
] | 13 | 2021-03-12T03:01:27.000Z | 2022-03-11T03:16:54.000Z | """Tests for frontend's FAST detector class.
Authors: Ayush Baid
"""
import unittest
import tests.frontend.detector.test_detector_base as test_detector_base
from gtsfm.frontend.detector.fast import Fast
if __name__ == "__main__":
unittest.main()
| 22.041667 | 78 | 0.729679 |
f04f803f10f18e34c63851533b89db8888254793 | 2,135 | py | Python | problem/13_Roman_to_Integer.py | YoungYoung619/leetcode | 973fd4a971ddc80a8ceb7b8aff08a2104477e768 | [
"Apache-2.0"
] | null | null | null | problem/13_Roman_to_Integer.py | YoungYoung619/leetcode | 973fd4a971ddc80a8ceb7b8aff08a2104477e768 | [
"Apache-2.0"
] | null | null | null | problem/13_Roman_to_Integer.py | YoungYoung619/leetcode | 973fd4a971ddc80a8ceb7b8aff08a2104477e768 | [
"Apache-2.0"
] | null | null | null | """
Copyright (c) College of Mechatronics and Control Engineering, Shenzhen University.
All rights reserved.
Description :
AuthorTeam Li
"""
"""
13. Roman to Integer
Easy
Roman numerals are represented by seven different symbols: I, V, X, L, C, D and M.
Symbol Value
I 1
V 5
X 10
L 50
C 100
D 500
M 1000
For example, two is written as II in Roman numeral, just two one's added together. Twelve is written as, XII, which is simply X + II. The number twenty seven is written as XXVII, which is XX + V + II.
Roman numerals are usually written largest to smallest from left to right. However, the numeral for four is not IIII. Instead, the number four is written as IV. Because the one is before the five we subtract it making four. The same principle applies to the number nine, which is written as IX. There are six instances where subtraction is used:
I can be placed before V (5) and X (10) to make 4 and 9.
X can be placed before L (50) and C (100) to make 40 and 90.
C can be placed before D (500) and M (1000) to make 400 and 900.
Given a roman numeral, convert it to an integer. Input is guaranteed to be within the range from 1 to 3999.
Example 1:
Input: "III"
Output: 3
Example 2:
Input: "IV"
Output: 4
Example 3:
Input: "IX"
Output: 9
Example 4:
Input: "LVIII"
Output: 58
Explanation: L = 50, V= 5, III = 3.
Example 5:
Input: "MCMXCIV"
Output: 1994
Explanation: M = 1000, CM = 900, XC = 90 and IV = 4.
"""
def romanToInt(s):
"""
:type s: str
:rtype: int
"""
c2num_normal = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1000}
c2num_sp = {'IV': 4, 'IX': 9, 'XL': 40, 'XC': 90, 'CD': 400, 'CM': 900}
return sub_roman2int(0, 0) | 27.371795 | 345 | 0.625761 |
f04fe6a0ae66518e152066d2f7472e765f8bd343 | 2,173 | py | Python | tests/test_cli.py | KoichiYasuoka/pynlpir | 8d5e994796a2b5d513f7db8d76d7d24a85d531b1 | [
"MIT"
] | 537 | 2015-01-12T09:59:57.000Z | 2022-03-29T09:22:30.000Z | tests/test_cli.py | KoichiYasuoka/pynlpir | 8d5e994796a2b5d513f7db8d76d7d24a85d531b1 | [
"MIT"
] | 110 | 2015-01-02T13:17:56.000Z | 2022-03-24T07:43:02.000Z | tests/test_cli.py | KoichiYasuoka/pynlpir | 8d5e994796a2b5d513f7db8d76d7d24a85d531b1 | [
"MIT"
] | 150 | 2015-01-21T01:58:56.000Z | 2022-02-23T16:16:40.000Z | """Unit tests for pynlpir's cli.py file."""
import os
import shutil
import stat
import unittest
try:
from urllib.error import URLError
from urllib.request import urlopen
except ImportError:
from urllib2 import URLError, urlopen
from click.testing import CliRunner
from pynlpir import cli
TEST_DIR = os.path.abspath(os.path.dirname(__file__))
LICENSE_FILE = os.path.join(TEST_DIR, 'data', 'NLPIR.user')
def can_reach_github():
"""Check if we can reach GitHub's website."""
try:
urlopen('http://github.com')
return True
except URLError:
return False
| 32.432836 | 73 | 0.645651 |
f05121798a0e9a1fc1209e3a2d1e90910d1020e8 | 13,746 | py | Python | src/old/parsemod/cft_expr.py | TeaCondemns/cofty | 0228c8f0e85c8b977586cc88123e198c64953dcb | [
"Apache-2.0",
"MIT"
] | 1 | 2022-02-17T18:14:58.000Z | 2022-02-17T18:14:58.000Z | src/old/parsemod/cft_expr.py | TeaCondemns/cofty | 0228c8f0e85c8b977586cc88123e198c64953dcb | [
"Apache-2.0",
"MIT"
] | 2 | 2022-03-12T19:33:44.000Z | 2022-03-13T11:14:12.000Z | src/old/parsemod/cft_expr.py | TeaCondemns/cofty | 0228c8f0e85c8b977586cc88123e198c64953dcb | [
"Apache-2.0",
"MIT"
] | null | null | null | from cft_namehandler import NameHandler, get_value_returned_type, get_local_name, get_abs_composed_name
from parsemod.cft_name import is_name, is_kw, compose_name
from parsemod.cft_syntaxtree_values import str_type
from parsemod.cft_others import extract_tokens
from cft_errors_handler import ErrorsHandler
from compile.cft_compile import get_num_type
from py_utils import isnotfinished
from lexermod.cft_token import *
import parsemod.cft_ops as ops
from copy import deepcopy
def _is_value_expression(
tokens: list[Token] | Token,
errors_handler: ErrorsHandler,
path: str,
namehandler: NameHandler,
i: int = 0
) -> bool:
"""<expr>"""
tokens = extract_tokens(tokens, i)
if tokens is None:
return False
if len(tokens) == 1:
if tokens[0].type in (TokenTypes.NUMBER, TokenTypes.STRING) or is_name(
tokens[0], errors_handler, path, namehandler, debug_info=_is_value_expression.__name__
) or is_kw(tokens[0], ('True', 'False')):
return True
if tokens[0].type == TokenTypes.TUPLE:
for item in tokens[0].value:
if not _is_value_expression(item, errors_handler, path, namehandler):
return False
return True
if tokens[0].type in (TokenTypes.PARENTHESIS, TokenTypes.SQUARE_BRACKETS, TokenTypes.CURLY_BRACES):
return not tokens[0].value or _is_value_expression(tokens[0].value, errors_handler, path, namehandler)
elif ops.is_op(tokens[0], source=ops.LOPS) and _is_value_expression(tokens, errors_handler, path, namehandler, 1):
# LOPS check
return True
else:
iop = -1
for k in range(len(tokens)):
if ops.is_op(tokens[k], source=ops.MIDDLE_OPS):
iop = k
break
if iop != -1:
if (_is_name_call_expression(
tokens[:iop], errors_handler, path, namehandler, without_tail=True
) or _is_value_expression(
tokens[:iop], errors_handler, path, namehandler
)) and _is_value_expression(tokens, errors_handler, path, namehandler, iop + 1):
return True
elif _is_name_call_expression(tokens, errors_handler, path, namehandler, without_tail=True):
# calling name expression check
return True
return False
__all__ = (
'_is_value_expression',
'_generate_expression_syntax_object',
'_is_type_expression'
)
| 32.343529 | 119 | 0.576531 |
f05136d88821cb5ba7ae7358c44d2b30837eb2b2 | 938 | py | Python | src/ychaos/cli/exceptions/__init__.py | vanderh0ff/ychaos | 5148c889912b744ee73907e4dd30c9ddb851aeb3 | [
"Apache-2.0"
] | 8 | 2021-07-21T15:37:48.000Z | 2022-03-03T14:43:09.000Z | src/ychaos/cli/exceptions/__init__.py | vanderh0ff/ychaos | 5148c889912b744ee73907e4dd30c9ddb851aeb3 | [
"Apache-2.0"
] | 102 | 2021-07-20T16:08:29.000Z | 2022-03-25T07:28:37.000Z | src/ychaos/cli/exceptions/__init__.py | vanderh0ff/ychaos | 5148c889912b744ee73907e4dd30c9ddb851aeb3 | [
"Apache-2.0"
] | 8 | 2021-07-20T13:37:46.000Z | 2022-02-18T01:44:52.000Z | # Copyright 2021, Yahoo
# Licensed under the terms of the Apache 2.0 license. See the LICENSE file in the project root for terms
from abc import abstractmethod
from typing import Any, Dict
| 33.5 | 105 | 0.687633 |
f051fa84986a7c9f1470f505282adadb784afefe | 5,289 | py | Python | ibmsecurity/isam/base/fips.py | zone-zero/ibmsecurity | 7d3e38104b67e1b267e18a44845cb756a5302c3d | [
"Apache-2.0"
] | 46 | 2017-03-21T21:08:59.000Z | 2022-02-20T22:03:46.000Z | ibmsecurity/isam/base/fips.py | zone-zero/ibmsecurity | 7d3e38104b67e1b267e18a44845cb756a5302c3d | [
"Apache-2.0"
] | 201 | 2017-03-21T21:25:52.000Z | 2022-03-30T21:38:20.000Z | ibmsecurity/isam/base/fips.py | zone-zero/ibmsecurity | 7d3e38104b67e1b267e18a44845cb756a5302c3d | [
"Apache-2.0"
] | 91 | 2017-03-22T16:25:36.000Z | 2022-02-04T04:36:29.000Z | import logging
import ibmsecurity.utilities.tools
import time
logger = logging.getLogger(__name__)
requires_model = "Appliance"
def get(isamAppliance, check_mode=False, force=False):
"""
Retrieving the current FIPS Mode configuration
"""
return isamAppliance.invoke_get("Retrieving the current FIPS Mode configuration",
"/fips_cfg", requires_model=requires_model)
def set(isamAppliance, fipsEnabled=True, tlsv10Enabled=True, tlsv11Enabled=False, check_mode=False, force=False):
"""
Updating the FIPS Mode configuration
"""
obj = _check(isamAppliance, fipsEnabled, tlsv10Enabled, tlsv11Enabled)
if force is True or obj['value'] is False:
if check_mode is True:
return isamAppliance.create_return_object(changed=True, warnings=obj['warnings'])
else:
return isamAppliance.invoke_put(
"Updating the FIPS Mode configuration",
"/fips_cfg",
{
"fipsEnabled": fipsEnabled,
"tlsv10Enabled": tlsv10Enabled,
"tlsv11Enabled": tlsv11Enabled
},
requires_model=requires_model
)
return isamAppliance.create_return_object(warnings=obj['warnings'])
def restart(isamAppliance, check_mode=False, force=False):
"""
Rebooting and enabling the FIPS Mode configuration
:param isamAppliance:
:param check_mode:
:param force:
:return:
"""
if check_mode is True:
return isamAppliance.create_return_object(changed=True)
else:
return isamAppliance.invoke_put(
"Rebooting and enabling the FIPS Mode configuration",
"/fips_cfg/restart",
{}, requires_model=requires_model
)
def restart_and_wait(isamAppliance, wait_time=300, check_freq=5, check_mode=False, force=False):
"""
Restart after FIPS configuration changes
:param isamAppliance:
:param wait_time:
:param check_freq:
:param check_mode:
:param force:
:return:
"""
if isamAppliance.facts['model'] != "Appliance":
return isamAppliance.create_return_object(
warnings="API invoked requires model: {0}, appliance is of deployment model: {1}.".format(
requires_model, isamAppliance.facts['model']))
warnings = []
if check_mode is True:
return isamAppliance.create_return_object(changed=True)
else:
firmware = ibmsecurity.isam.base.firmware.get(isamAppliance, check_mode=check_mode, force=force)
ret_obj = restart(isamAppliance)
if ret_obj['rc'] == 0:
sec = 0
# Now check if it is up and running
while 1:
ret_obj = ibmsecurity.isam.base.firmware.get(isamAppliance, check_mode=check_mode, force=force,
ignore_error=True)
# check partition last_boot time
if ret_obj['rc'] == 0 and isinstance(ret_obj['data'], list) and len(ret_obj['data']) > 0 and \
(('last_boot' in ret_obj['data'][0] and ret_obj['data'][0]['last_boot'] != firmware['data'][0][
'last_boot'] and ret_obj['data'][0]['active'] == True) or (
'last_boot' in ret_obj['data'][1] and ret_obj['data'][1]['last_boot'] !=
firmware['data'][1]['last_boot'] and ret_obj['data'][1]['active'] == True)):
logger.info("Server is responding and has a different boot time!")
return isamAppliance.create_return_object(warnings=warnings)
else:
time.sleep(check_freq)
sec += check_freq
logger.debug(
"Server is not responding yet. Waited for {0} secs, next check in {1} secs.".format(sec,
check_freq))
if sec >= wait_time:
warnings.append(
"The FIPS restart not detected or completed, exiting... after {0} seconds".format(sec))
break
return isamAppliance.create_return_object(warnings=warnings)
| 36.729167 | 120 | 0.592362 |
f052b9fc28af42e699049bdfe2b0ac01d467c316 | 187 | py | Python | user_details/give_default.py | Shreyanshsachan/College-Predictor | 87068aa1d1a889ced586ff155bc2b5d9a78340f7 | [
"MIT"
] | null | null | null | user_details/give_default.py | Shreyanshsachan/College-Predictor | 87068aa1d1a889ced586ff155bc2b5d9a78340f7 | [
"MIT"
] | null | null | null | user_details/give_default.py | Shreyanshsachan/College-Predictor | 87068aa1d1a889ced586ff155bc2b5d9a78340f7 | [
"MIT"
] | null | null | null | preference_list_of_user=[] | 20.777778 | 31 | 0.84492 |
f0543d6f05ce0f050bd473fdf2e6349ee2b5262a | 953 | py | Python | face_recognition/project/type_hints.py | dgr113/face-recognition | edda6ca8fef567d24ae740afd2399b66166f3431 | [
"MIT"
] | null | null | null | face_recognition/project/type_hints.py | dgr113/face-recognition | edda6ca8fef567d24ae740afd2399b66166f3431 | [
"MIT"
] | null | null | null | face_recognition/project/type_hints.py | dgr113/face-recognition | edda6ca8fef567d24ae740afd2399b66166f3431 | [
"MIT"
] | null | null | null | # coding: utf-8
import numpy as np
import keras.utils
from pathlib import Path
from typing import Tuple, Sequence, Union, Hashable, Iterable, Mapping, Any
### COMMON TYPES
UNIVERSAL_PATH_TYPE = Union[Path, str]
UNIVERSAL_SOURCE_TYPE = Union[UNIVERSAL_PATH_TYPE, Mapping]
CHUNKED_DATA_TYPE = Tuple[Sequence[np.ndarray], Sequence[int]]
ONE_MORE_KEYS = Union[Hashable, Iterable[Hashable]]
VALIDATE_RESUTS_TYPE = Tuple[str, Union[Mapping, None]]
COORDS_TYPE = Tuple[int, int, int, int]
KEYS_OR_NONE_TYPE = Union[Sequence[Hashable], None]
### USER INPUT DATA TYPES
PERSONS_DATA_TYPE = Mapping[str, Mapping[str, str]]
CAMERA_DATA_TYPE = Mapping[str, Any]
FRAME_SHAPE_TYPE = Tuple[int, int, int]
### LEARN MODEL TYPES
MODEL_CONFIG_TYPE = Mapping[str, Union[str, int, list, None]]
TRAIN_DATA_TYPE = Sequence[np.ndarray]
TRAIN_LABELS_TYPE = Sequence[np.ndarray]
TRAIN_DATA_GEN_TYPE = Union[Tuple[TRAIN_DATA_TYPE, TRAIN_LABELS_TYPE], keras.utils.Sequence]
| 31.766667 | 92 | 0.781742 |
f056587ea945052209153f421a5cb7898e82cd98 | 4,021 | py | Python | modules/augmentation.py | AdamMiltonBarker/hias-all-oneapi-classifier | 7afdbcde0941b287df2e153d64e14d06f2341aa2 | [
"MIT"
] | 1 | 2021-04-30T21:13:11.000Z | 2021-04-30T21:13:11.000Z | modules/augmentation.py | AdamMiltonBarker/hias-all-oneapi-classifier | 7afdbcde0941b287df2e153d64e14d06f2341aa2 | [
"MIT"
] | 3 | 2021-09-18T20:02:05.000Z | 2021-09-21T19:18:16.000Z | modules/augmentation.py | AIIAL/oneAPI-Acute-Lymphoblastic-Leukemia-Classifier | 05fb9cdfa5069b16cfe439be6d94d21b9eb21723 | [
"MIT"
] | 1 | 2021-09-19T01:19:40.000Z | 2021-09-19T01:19:40.000Z | #!/usr/bin/env python
""" HIAS AI Model Data Augmentation Class.
Provides data augmentation methods.
MIT License
Copyright (c) 2021 Asociacin de Investigacion en Inteligencia Artificial
Para la Leucemia Peter Moss
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files(the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and / or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
Contributors:
- Adam Milton-Barker - First version - 2021-5-2
"""
import cv2
import random
import numpy as np
from numpy.random import seed
from scipy import ndimage
from skimage import transform as tm
| 32.691057 | 99 | 0.665257 |
f05664f755b3f673d926831f9475fb250a901f2c | 792 | py | Python | pytglib/api/types/rich_text_phone_number.py | iTeam-co/pytglib | e5e75e0a85f89b77762209b32a61b0a883c0ae61 | [
"MIT"
] | 6 | 2019-10-30T08:57:27.000Z | 2021-02-08T14:17:43.000Z | pytglib/api/types/rich_text_phone_number.py | iTeam-co/python-telegram | e5e75e0a85f89b77762209b32a61b0a883c0ae61 | [
"MIT"
] | 1 | 2021-08-19T05:44:10.000Z | 2021-08-19T07:14:56.000Z | pytglib/api/types/rich_text_phone_number.py | iTeam-co/python-telegram | e5e75e0a85f89b77762209b32a61b0a883c0ae61 | [
"MIT"
] | 5 | 2019-12-04T05:30:39.000Z | 2021-05-21T18:23:32.000Z |
from ..utils import Object
| 21.405405 | 54 | 0.585859 |
f0571f9fa4afbc250ffbbd1dd3bed40b228bfe8d | 46 | py | Python | cne/__init__.py | BartWojtowicz/cne | 16612292c1c938fc9ec53a14642fb7d40bcc9e25 | [
"Apache-2.0"
] | null | null | null | cne/__init__.py | BartWojtowicz/cne | 16612292c1c938fc9ec53a14642fb7d40bcc9e25 | [
"Apache-2.0"
] | null | null | null | cne/__init__.py | BartWojtowicz/cne | 16612292c1c938fc9ec53a14642fb7d40bcc9e25 | [
"Apache-2.0"
] | null | null | null | from .cne import CNE
__version__ = "0.0.dev"
| 11.5 | 23 | 0.695652 |
f0574e6e18439c3b0140a8ed7ddefec8cd1bf416 | 299 | py | Python | tests/core/scenario_finder/file_filters/test_file_filter.py | nikitanovosibirsk/vedro | e975a1c1eb065bc6caa32c41c0d7576ee6d284db | [
"Apache-2.0"
] | 2 | 2021-08-24T12:49:30.000Z | 2022-01-23T07:21:25.000Z | tests/core/scenario_finder/file_filters/test_file_filter.py | nikitanovosibirsk/vedro | e975a1c1eb065bc6caa32c41c0d7576ee6d284db | [
"Apache-2.0"
] | 20 | 2015-12-09T11:04:23.000Z | 2022-03-20T09:18:17.000Z | tests/core/scenario_finder/file_filters/test_file_filter.py | nikitanovosibirsk/vedro | e975a1c1eb065bc6caa32c41c0d7576ee6d284db | [
"Apache-2.0"
] | 3 | 2015-12-09T07:31:23.000Z | 2022-01-28T11:03:24.000Z | from pytest import raises
from vedro._core._scenario_finder._file_filters import FileFilter
| 24.916667 | 79 | 0.769231 |
f058c8ff047b3d9464b8a867f927fcd03d622d8f | 31,894 | py | Python | concept_disc/pubmed_dump.py | nmonath/concept_discovery | 766905684e598159cc6fb58967ed411888b93ce5 | [
"MIT"
] | 3 | 2020-09-10T13:48:23.000Z | 2021-08-19T21:42:50.000Z | concept_disc/pubmed_dump.py | nmonath/concept_discovery | 766905684e598159cc6fb58967ed411888b93ce5 | [
"MIT"
] | null | null | null | concept_disc/pubmed_dump.py | nmonath/concept_discovery | 766905684e598159cc6fb58967ed411888b93ce5 | [
"MIT"
] | 3 | 2020-10-16T21:57:04.000Z | 2020-12-26T00:59:32.000Z | """
Parse PubMed Dump
Ref:
https://www.nlm.nih.gov/databases/download/pubmed_medline.html
https://www.nlm.nih.gov/bsd/licensee/elements_alphabetical.html
https://www.nlm.nih.gov/bsd/licensee/elements_descriptions.html#medlinecitation
"""
from collections import defaultdict
from concurrent import futures
import glob
import gzip
import multiprocessing
import os
from pathlib import Path
import re
from threading import Thread
from typing import Dict, Generator, List, Optional, Sequence, Set, Union
# noinspection PyPep8Naming
import xml.etree.ElementTree as ET
from xml.sax.saxutils import escape
from .misc import PersistentObject
# -----------------------------------------------------------------------------
# Globals
# -----------------------------------------------------------------------------
BASE_DIR = os.path.expanduser('~/Home/Projects/ConceptRecogn')
AB3P_DIR = os.path.join(BASE_DIR, 'Tools', 'Ab3P')
AB3P_CMD = './identify_abbr'
SPACES_PATT = re.compile(r'\s+')
SENTINEL = '_SENTINEL_'
# -----------------------------------------------------------------------------
# Classes
# -----------------------------------------------------------------------------
# /
def get_mesh_headings_xml(self) -> List[ET.Element]:
return self.article_xml.findall("./MedlineCitation/MeshHeadingList")
def get_supplemental_mesh_xml(self) -> List[ET.Element]:
"""
This info includes Supplemental Records on: Protocols, Diseases, Organisms
"""
return self.article_xml.findall("./MedlineCitation/SupplMeshList")
def get_chemicals_xml(self) -> List[ET.Element]:
return self.article_xml.findall("./MedlineCitation/ChemicalList")
def get_keywords_xml(self) -> List[ET.Element]:
return self.article_xml.findall("./MedlineCitation/KeywordList")
def _parse_title_abstract(self):
if self._title_parsed:
return
is_english = True
title = extract_subelem_text(self.article_xml.find("./MedlineCitation/Article/ArticleTitle"))
if not title or title == "Not Available":
title = extract_subelem_text(self.article_xml.find("./MedlineCitation/Article/ARTICLETITLE"))
if title:
title = title.strip()
if title.startswith("[") and title.endswith("]"):
title = title.strip("[]")
is_english = False
if title.endswith("(author's transl)"):
title = title[:-len("(author's transl)")].strip()
if title == "In Process Citation":
title = ""
self._title = title
self._is_english = is_english
self._abstract = extract_subelem_text(self.article_xml.find("./MedlineCitation/Article/Abstract"))
self._title_parsed = True
return
def to_xml(self) -> ET.Element:
"""
Output format as parsed by `Article`
"""
doc = ET.Element("Article", pmid=self.pmid)
if self.source:
doc.set("source", self.source)
ET.SubElement(doc, "Title").text = self.title
ET.SubElement(doc, "Abstract").text = self.abstract
for children in [self.get_mesh_headings_xml(),
self.get_supplemental_mesh_xml(),
self.get_chemicals_xml(),
self.get_keywords_xml()]:
if children:
doc.extend(children)
return doc
def __str__(self):
return "pmid = {:s}\ntitle = {:s}\nabstract = {:s}".format(self.pmid, self.title, self.abstract)
# /
# /
# -----------------------------------------------------------------------------
# Article - from PubMed or MeSH-Dump
# -----------------------------------------------------------------------------
# /
# /
# /
# /
# /
# /
# -----------------------------------------------------------------------------
# Functions
# -----------------------------------------------------------------------------
def extract_subelem_text(xelem):
"""
Extracts and combines text from sub-elements of `xelem`.
:param xml.etree.ElementTree.Element xelem: xml.etree.ElementTree.Element.
:return: str
Special Cases
-------------
<title>GeneReviews<sup></sup></title> => 'GeneReviews '
R<sub>0</sub> => R0
<i>text</i> => text
<b>text</b> => text
<u>text</u> => text
will be extracted as 'GeneReviews '.
This is not strictly correct, but when tokenizing, will generate separate token for 'GeneReviews',
which is desirable.
"""
txt = None
if xelem is not None:
txt = ''
for subelem in xelem.iter():
if subelem.tag in ('abstract', 'title', 'p', 'sup', 'list-item'):
if txt and not txt.endswith(' '):
txt += ' '
elif subelem.tag == 'AbstractText':
if txt and not txt.endswith('\n'):
txt += '\n'
label = subelem.get("Label")
if label and label.upper() != "UNLABELLED":
txt += label + ":\n"
elif subelem.tag == "CopyrightInformation":
continue
if subelem.text:
txt += subelem.text
if subelem is not xelem and subelem.tag == 'title' and not txt.endswith(('. ', ': ')):
txt += ': '
if subelem.tail:
# Remove "\n" from subelem.tail
txt += re.sub(r"\s+", " ", subelem.tail)
if not txt:
txt = None
return clean_text(txt)
def clean_text(txt):
if txt is not None:
# Collapse multiple non-newline whitespaces to single BLANK
txt = re.sub(r'((?!\n)\s)+', ' ', txt.strip())
# Remove SPACE around newline
txt = re.sub(r' ?\n ?', '\n', txt)
# Collapse multiple newlines
txt = re.sub(r'\n+', '\n', txt)
# Remove SPACE preceding [,:.], IF there is also space after the punct.
txt = re.sub(r' ([,:.]) ', r'\1 ', txt)
return txt
def parse_dump_file(pubmed_dump_file: str) -> List[PubmedDocument]:
is_gzipped = False
open_fn = open
if pubmed_dump_file.endswith(".gz"):
is_gzipped = True
open_fn = gzip.open
with open_fn(pubmed_dump_file) as f:
ftxt = f.read()
if is_gzipped:
# noinspection PyUnresolvedReferences
ftxt = ftxt.decode("UTF-8")
root = ET.fromstring(ftxt)
pubmed_docs = []
# Ignore elements "PubmedBookArticle"
for doc_root in root.iterfind("./PubmedArticle"):
doc = PubmedDocument.from_xml(doc_root)
pubmed_docs.append(doc)
return pubmed_docs
def lazy_parse_dump_file(pubmed_dump_file: str):
"""
Generator for LazyPubmedDocument
:param pubmed_dump_file:
"""
is_gzipped = False
open_fn = open
if pubmed_dump_file.endswith(".gz"):
is_gzipped = True
open_fn = gzip.open
with open_fn(pubmed_dump_file) as f:
ftxt = f.read()
if is_gzipped:
# noinspection PyUnresolvedReferences
ftxt = ftxt.decode("UTF-8")
root = ET.fromstring(ftxt)
# Ignore elements "PubmedBookArticle"
for doc_root in root.iterfind("./PubmedArticle"):
doc = LazyPubmedDocument.from_pubmed_xml(doc_root, source=pubmed_dump_file)
yield doc
return
def extract_from_pubmed_dump(pubmed_dump_file: str,
output_dir: str,
pmids_file: str = None,
max_docs: int = 0,
verbose=False):
"""
Extracts Doc from PubMed dump, and writes it to `output_dir`.
:param pubmed_dump_file:
:param output_dir:
:param pmids_file:
:param max_docs:
:param verbose:
:return:
"""
pmids = None
if pmids_file is not None:
with open(os.path.expanduser(pmids_file)) as f:
pmids = set([line.strip() for line in f])
output_dir = os.path.expanduser(output_dir)
if not Path(output_dir).exists():
print("Creating dir:", output_dir)
Path(output_dir).mkdir()
if verbose:
print("Extracting from pubmed dump:", pubmed_dump_file, flush=True)
n_docs = 0
for doc in lazy_parse_dump_file(pubmed_dump_file):
if pmids and doc.pmid not in pmids:
continue
doc_file = f"{output_dir}/{doc.pmid}.xml"
ET.ElementTree(doc.to_xml()).write(doc_file, encoding="unicode", xml_declaration=True)
if verbose:
print(" ", doc.pmid, flush=True)
n_docs += 1
if 0 < max_docs <= n_docs:
break
return n_docs
def extract_proc_one(pubmed_dump_files_or_patt: Union[str, List[str]],
output_dir: str,
pmids_file: str,
proc_nbr: int,
nprocs: int):
"""
Called from `extract_from_pubmed_dump_mp`, does the tasks for one process (`proc_nbr`) out of `nprocs` processes.
:param pubmed_dump_files_or_patt:
:param output_dir:
:param pmids_file:
:param proc_nbr: in range [0, nprocs - 1]
:param nprocs: >= 1
:return: proc_nbr, Nbr docs written
"""
assert 0 <= proc_nbr < nprocs
if isinstance(pubmed_dump_files_or_patt, List):
pubmed_dump_files = [os.path.expanduser(f) for f in pubmed_dump_files_or_patt]
else:
print(f"extract_proc_one[{proc_nbr}]: pubmed_dump_files_or_patt =", pubmed_dump_files_or_patt,
flush=True)
pubmed_dump_files = glob.glob(os.path.expanduser(pubmed_dump_files_or_patt))
print("extract_proc_one[{}]: nbr dump files = {:,d}".format(proc_nbr, len(pubmed_dump_files)), flush=True)
# Ensure each process sees same ordering
pubmed_dump_files = sorted(pubmed_dump_files)
tot_docs_found = 0
# Process every `nprocs`-th file starting at index `proc_nbr`
for fi in range(proc_nbr, len(pubmed_dump_files), nprocs):
tot_docs_found += extract_from_pubmed_dump(pubmed_dump_files[fi], output_dir, pmids_file, verbose=False)
return proc_nbr, tot_docs_found
def extract_from_pubmed_dump_mp(pubmed_dump_files_or_patt: Union[str, List[str]],
output_dir: str,
pmids_file: str,
nprocs: int):
"""
Run `nprocs` processes to extract docs of specified PMID.
:param pubmed_dump_files_or_patt: Glob pattern or list of paths containing Pubmed-Dump
:param output_dir: Where each doc will be written as a file: "{output_dir}/{pmid}.xml"
:param pmids_file: One PMID per line
:param nprocs:
"""
print("extract_from_pubmed_dump_mp:")
print(" pubmed_dump_files_or_patt =", pubmed_dump_files_or_patt)
print(" output_dir =", output_dir)
print(" pmids_file =", pmids_file)
output_dir = os.path.expanduser(output_dir)
if not Path(output_dir).exists():
print("Creating dir:", output_dir)
Path(output_dir).mkdir()
print('Starting {} processes ...'.format(nprocs), flush=True)
# Using a process pool to start the sub-processes. Allows gathering return values.
# With this method, Queue instance must be inherited by the sub-processes (e.g. as a global);
# passing queue as an arg results in RuntimeError.
with futures.ProcessPoolExecutor(max_workers=nprocs) as executor:
results = executor.map(extract_proc_one,
[pubmed_dump_files_or_patt] * nprocs,
[output_dir] * nprocs,
[pmids_file] * nprocs,
range(nprocs), [nprocs] * nprocs)
# Get return values ... possible if processes started using ProcessPoolExecutor
tot_docs_found = 0
for (proc_nbr, docs_found) in results:
print('... Subprocess {:d} found {:,d} docs'.format(proc_nbr, docs_found))
tot_docs_found += docs_found
print('Total nbr docs written = {:,d}'.format(tot_docs_found))
return
def build_index(pubmed_dump_files_or_patt: Union[str, List[str]],
output_file: str,
nprocs: int):
# Import class here so that load from pickle does not report errors
# noinspection PyUnresolvedReferences
from cr.pubmed.pubmed_dump import PubmedDumpIndex
PubmedDumpIndex.build_save_index(pubmed_dump_files_or_patt, output_file, nprocs)
return
# ======================================================================================================
# Main
# ======================================================================================================
# Invoke as: python -m pubmed_dump CMD ...
if __name__ == '__main__':
import argparse
from datetime import datetime
from .misc import print_cmd
_argparser = argparse.ArgumentParser(
description='PubMed Dump Parser.',
)
_subparsers = _argparser.add_subparsers(dest='subcmd',
title='Available commands',
)
# Make the sub-commands required
_subparsers.required = True
# ... extract [-n NBR_PROCS] DUMP_PATH_PATTERN PMIDS_FILE OUTPUT_DIR
_sub_cmd_parser = _subparsers.add_parser('extract', help="Extract articles for specific PMIDs.")
_sub_cmd_parser.add_argument('-n', '--nbr_procs', type=int, default=4,
help="Nbr of sub-processes.")
_sub_cmd_parser.add_argument('dump_path_pattern', type=str,
help="Pattern for path to PubMed Dump files")
_sub_cmd_parser.add_argument('pmids_file', type=str,
help="Path to file containing PMIDs")
_sub_cmd_parser.add_argument('output_dir', type=str,
help="Output dir")
# ... build_index [-n NBR_PROCS] DUMP_PATH_PATTERN PMIDS_FILE OUTPUT_DIR
_sub_cmd_parser = _subparsers.add_parser('build_index',
help="Build and save PubmedDumpIndex.",
description=("e.g.: " +
"python -m pubmed_dump build_index -n 10 " +
"'../../PubMed/Data/D20191215/*.xml.gz' " +
"../../PubMed/Data/D20191215/pubmed_dump_index.pkl"))
_sub_cmd_parser.add_argument('-n', '--nbr_procs', type=int, default=4,
help="Nbr of sub-processes.")
_sub_cmd_parser.add_argument('dump_path_pattern', type=str,
help="Pattern for path to PubMed Dump files")
_sub_cmd_parser.add_argument('output_file', type=str,
help="Path to where PubmedDumpIndex will be written as a Pickle file")
# ...
_args = _argparser.parse_args()
# .................................................................................................
start_time = datetime.now()
print()
print_cmd()
if _args.subcmd == 'extract':
extract_from_pubmed_dump_mp(_args.dump_path_pattern, _args.output_dir, _args.pmids_file, _args.nbr_procs)
elif _args.subcmd == 'build_index':
build_index(_args.dump_path_pattern, _args.output_file, _args.nbr_procs)
else:
raise NotImplementedError(f"Command not implemented: {_args.subcmd}")
# /
print('\nTotal Run time =', datetime.now() - start_time)
| 34.331539 | 120 | 0.58199 |
f05afdbd5aec954079117e24e6a1f75f80dba71c | 1,523 | py | Python | Consumer_test.py | image-store-org/image-store-py-web-api-consumer-test | 59d805e8a7b459a97ede7285f6e4a67e87cfba02 | [
"MIT"
] | null | null | null | Consumer_test.py | image-store-org/image-store-py-web-api-consumer-test | 59d805e8a7b459a97ede7285f6e4a67e87cfba02 | [
"MIT"
] | null | null | null | Consumer_test.py | image-store-org/image-store-py-web-api-consumer-test | 59d805e8a7b459a97ede7285f6e4a67e87cfba02 | [
"MIT"
] | null | null | null | import sys
sys.path.append('dependencies/image-store-py-web-api-consumer')
from Consumer import Consumer
if __name__ == '__main__':
consumer = Consumer_test()
consumer.get()
consumer.get_id(1)
consumer.post()
consumer.get_latest()
consumer.put(10) | 30.46 | 69 | 0.570584 |
f05b7050495370891bf951394304c4e6b993404b | 864 | py | Python | Algorithms/MostCommonWord/mostCommonWord.py | riddhi-27/HacktoberFest2020-Contributions | 0a5c39169723b3ea3b6447d4005896900dd789bc | [
"MIT"
] | null | null | null | Algorithms/MostCommonWord/mostCommonWord.py | riddhi-27/HacktoberFest2020-Contributions | 0a5c39169723b3ea3b6447d4005896900dd789bc | [
"MIT"
] | null | null | null | Algorithms/MostCommonWord/mostCommonWord.py | riddhi-27/HacktoberFest2020-Contributions | 0a5c39169723b3ea3b6447d4005896900dd789bc | [
"MIT"
] | null | null | null | """Returns words from the given paragraph which has been repeated most,
incase of more than one words, latest most common word is returned. """
import string
print(mostCommonWord("HacktoberFest is live! Riddhi is participating in HACKtoBERfEST.Happy Coding.")) #Output: hacktoberfest | 34.56 | 125 | 0.66088 |
f05bdaed59cf5073cab62db01710a16ba5ff7771 | 7,597 | py | Python | app/views.py | PaulMurrayCbr/GameNight | 838c19dda765027abbe8e12e331268b01cb859c2 | [
"Unlicense"
] | null | null | null | app/views.py | PaulMurrayCbr/GameNight | 838c19dda765027abbe8e12e331268b01cb859c2 | [
"Unlicense"
] | null | null | null | app/views.py | PaulMurrayCbr/GameNight | 838c19dda765027abbe8e12e331268b01cb859c2 | [
"Unlicense"
] | null | null | null | from app import app, db
from flask import render_template, flash, redirect, get_flashed_messages
import forms
import models
import Character
from flask.globals import request
from sqlalchemy.orm.exc import NoResultFound, MultipleResultsFound
| 26.939716 | 94 | 0.612874 |
f05d0c3401f69142c582ade92cb02f323289bd68 | 183 | py | Python | dingomata/config/__init__.py | tigershadowclaw/discord-dingomata | 0b20d7b75a0af3387b19b17c336e5e14356d2f14 | [
"MIT"
] | null | null | null | dingomata/config/__init__.py | tigershadowclaw/discord-dingomata | 0b20d7b75a0af3387b19b17c336e5e14356d2f14 | [
"MIT"
] | null | null | null | dingomata/config/__init__.py | tigershadowclaw/discord-dingomata | 0b20d7b75a0af3387b19b17c336e5e14356d2f14 | [
"MIT"
] | null | null | null | from .bot import GuildConfig, ServiceConfig, get_logging_config, service_config
__all__ = [
"ServiceConfig",
"GuildConfig",
"get_logging_config",
"service_config",
]
| 20.333333 | 79 | 0.721311 |
f05da004efb57fa8123a5d8084bba03a6cd27ce9 | 623 | py | Python | create_tacacs.py | cromulon-actual/ise_automation | de3fbb762c3e1f4f41d81dda3bd2d33a11db1d58 | [
"MIT"
] | null | null | null | create_tacacs.py | cromulon-actual/ise_automation | de3fbb762c3e1f4f41d81dda3bd2d33a11db1d58 | [
"MIT"
] | null | null | null | create_tacacs.py | cromulon-actual/ise_automation | de3fbb762c3e1f4f41d81dda3bd2d33a11db1d58 | [
"MIT"
] | null | null | null | from ciscoisesdk import IdentityServicesEngineAPI
from ciscoisesdk.exceptions import ApiError
from dotenv import load_dotenv
import os
from pprint import pprint as ppr
load_dotenv()
admin = os.getenv("ISE_ADMIN")
pw = os.getenv("ISE_PW")
base_url = os.getenv("ISE_URL")
api = IdentityServicesEngineAPI(
username=admin, password=pw, base_url=base_url, version="3.0.0", verify=False)
print("=" * 50)
# Get Admin Users
search_result = api.admin_user.get_all()
ppr(search_result.response)
print("=" * 50)
# Get All TACACS Users
search_result = api.tacacs_profile.get_all()
ppr(search_result.response)
print("=" * 50)
| 23.074074 | 82 | 0.764045 |
f05e57abf8788d483966f72cb158032481ce2596 | 4,121 | py | Python | 8_plot_data_perstation.py | sdat2/Yellowstone2 | 4008145b7372f5f5901de584656ceea44e73934a | [
"MIT"
] | null | null | null | 8_plot_data_perstation.py | sdat2/Yellowstone2 | 4008145b7372f5f5901de584656ceea44e73934a | [
"MIT"
] | null | null | null | 8_plot_data_perstation.py | sdat2/Yellowstone2 | 4008145b7372f5f5901de584656ceea44e73934a | [
"MIT"
] | null | null | null | # Program 8_plot_data_perstation.py written by Sanne Cottaar (sc845@cam.ac.uk)
file_name= '8_plot_data_perstation.py'
# Uses receiver functions computed to produce a nice graph for every directory in DATARF
import obspy
from obspy import read
from obspy.core import Stream
from obspy.core import trace
import matplotlib.pyplot as plt
import os.path
import time
import glob
import shutil
import numpy as np
from obspy import UTCDateTime
import receiver_function as rf
direc = 'DataRF'
flag = 'SV'
filt = 'jgf1'
stadirs = glob.glob(direc+'/*')
for stadir in stadirs:
print(stadir)
with open(stadir+'/selected_RFs_jgf1.dat','r') as f:
goodrfs= f.read().replace('\n', '')
# loop through events
stalist=glob.glob(stadir+'/*.PICKLE')
print(stalist)
c=0
# Loop through data
if(len(stalist)>0):
for i in range(len(stalist)): #range(cat.count()):
print(stalist[i])
seis=read(stalist[i],format='PICKLE')
distdg=seis[0].stats['dist']
if stalist[i] in goodrfs:
good=True
print('YAY',seis[0].stats['event'].magnitudes[0].mag)
else:
good=False
print('NO',seis[0].stats['event'].magnitudes[0].mag)
tshift=UTCDateTime(seis[0].stats['starttime'])-seis[0].stats['event'].origins[0].time
#Ptime=Ptime
plt.subplot(1,3,1)
vertical = seis.select(channel='BHZ')[0]
vertical.filter('bandpass', freqmin=0.01,freqmax=.1, corners=2, zerophase=True)
windowed=vertical[np.where(vertical.times()>seis[0].stats.traveltimes['P']-100) and np.where(vertical.times()<seis[0].stats.traveltimes['P']+100)]
norm=np.max(np.abs(windowed))
if good:
plt.plot(vertical.times()-seis[0].stats.traveltimes['P'], vertical.data/norm+np.round(distdg),'k')
else:
plt.plot(vertical.times()-seis[0].stats.traveltimes['P'], vertical.data/norm+np.round(distdg),'r')
#plt.plot(seis[0].stats.traveltimes['P'],np.round(distdg),'.b')
#plt.plot(seis[0].stats.traveltimes['S'],np.round(distdg),'.g')
plt.xlim([-25,150])
plt.ylim([30,92])
plt.subplot(1,3,2)
radial = seis.select(channel='BHR')[0]
radial.filter('bandpass', freqmin=0.01,freqmax=.1, corners=2, zerophase=True)
windowed=vertical[np.where(radial.times()>seis[0].stats.traveltimes['P']-100) and np.where(radial.times()<seis[0].stats.traveltimes['P']+100)]
norm=np.max(np.abs(windowed))
if good:
plt.plot(radial.times()-seis[0].stats.traveltimes['P'], radial.data/norm+np.round(distdg),'k')
else:
plt.plot(radial.times()-seis[0].stats.traveltimes['P'], radial.data/norm+np.round(distdg),'r')
plt.xlim([-25,150])
plt.plot(seis[0].stats.traveltimes['P'],np.round(distdg),'.b')
plt.plot(seis[0].stats.traveltimes['S'],np.round(distdg),'.g')
plt.ylim([30,92])
plt.subplot(1,3,3)
RF=getattr(seis[0],filt)['iterativedeconvolution']
time=getattr(seis[0],filt)['time']
if good:
plt.plot(time, RF/np.max(np.abs(RF))+np.round(distdg),'k')
else:
plt.plot(time, RF/np.max(np.abs(RF))+np.round(distdg),'r')
plt.subplot(1,3,1)
plt.title('vertical')
plt.ylabel('distance')
plt.xlabel('time')
plt.subplot(1,3,2)
plt.title('radial')
plt.ylabel('distance')
plt.xlabel('time')
plt.subplot(1,3,3)
plt.title('receiver functions')
plt.ylabel('distance')
plt.xlabel('time')
#plt.xlim([-150,1000])
plt.show()
| 40.009709 | 162 | 0.542344 |
f061361346e5c53e6f9bfc725e3bc9a264fe2453 | 9,053 | py | Python | archon/__init__.py | HyechurnJang/archon | 2cda56436ed6dea65d38774f7c9ed6c3315dbc03 | [
"Apache-2.0"
] | 1 | 2018-03-07T08:33:23.000Z | 2018-03-07T08:33:23.000Z | archon/__init__.py | HyechurnJang/archon | 2cda56436ed6dea65d38774f7c9ed6c3315dbc03 | [
"Apache-2.0"
] | 2 | 2017-03-14T01:02:55.000Z | 2017-03-14T01:07:29.000Z | archon/__init__.py | HyechurnJang/archon | 2cda56436ed6dea65d38774f7c9ed6c3315dbc03 | [
"Apache-2.0"
] | 4 | 2017-02-03T04:53:07.000Z | 2020-04-20T07:52:47.000Z | # -*- coding: utf-8 -*-
################################################################################
# _____ _ _____ _ #
# / ____(_) / ____| | | #
# | | _ ___ ___ ___ | (___ _ _ ___| |_ ___ _ __ ___ ___ #
# | | | / __|/ __/ _ \ \___ \| | | / __| __/ _ \ '_ ` _ \/ __| #
# | |____| \__ \ (_| (_) | ____) | |_| \__ \ || __/ | | | | \__ \ #
# \_____|_|___/\___\___/ |_____/ \__, |___/\__\___|_| |_| |_|___/ #
# __/ | #
# |___/ #
# _ __ _____ _ _____ ______ #
# | |/ / / ____| | |/ ____| ____| #
# | ' / ___ _ __ ___ __ _ | (___ ___ | | (___ | |__ #
# | < / _ \| '__/ _ \/ _` | \___ \ / _ \| |\___ \| __| #
# | . \ (_) | | | __/ (_| | ____) | (_) | |____) | |____ #
# |_|\_\___/|_| \___|\__,_| |_____/ \___/|_|_____/|______| #
# #
################################################################################
# #
# Copyright (c) 2016 Cisco Systems #
# All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT #
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the #
# License for the specific language governing permissions and limitations #
# under the License. #
# #
################################################################################
import re
import json
from pygics import Burst
from django.contrib import admin
from django.contrib.auth.decorators import login_required
from django.http import JsonResponse
from archon.settings import SESSION_COOKIE_AGE
from archon.view import *
ARCHON_DEBUG = False
def pageview(manager_class, **async_path):
return wrapper
def modelview(model):
admin.site.register(model, admin.ModelAdmin)
| 43.946602 | 117 | 0.435436 |
f0618ae5c1b87db23e1c15aeed2890efe625454b | 283 | py | Python | src_Python/EtabsAPIaface0/a01comtypes/Excel03c.py | fjmucho/APIdeEtabsYPython | a5c7f7fe1861c4ac3c9370ef06e291f94c6fd523 | [
"MIT"
] | null | null | null | src_Python/EtabsAPIaface0/a01comtypes/Excel03c.py | fjmucho/APIdeEtabsYPython | a5c7f7fe1861c4ac3c9370ef06e291f94c6fd523 | [
"MIT"
] | null | null | null | src_Python/EtabsAPIaface0/a01comtypes/Excel03c.py | fjmucho/APIdeEtabsYPython | a5c7f7fe1861c4ac3c9370ef06e291f94c6fd523 | [
"MIT"
] | null | null | null | import sys
import comtypes
from comtypes.client import CreateObject
try:
# Connecting | coneccion
xl = CreateObject("Excel.Application")
except (OSError, comtypes.COMError):
print("No tiene instalada el programa(Excel).")
sys.exit(-1)
xl.Visible = True
print (xl) | 20.214286 | 49 | 0.720848 |
f0625e6b2d07feed6a373b43052746c1a7b2640c | 984 | py | Python | home/tests/add-remove sector.py | caggri/FOFviz | 776ab387d832a86eea1a1b9064040d9b012494a7 | [
"MIT"
] | 2 | 2020-05-24T22:28:53.000Z | 2020-05-25T21:58:24.000Z | home/tests/add-remove sector.py | caggri/FOFviz | 776ab387d832a86eea1a1b9064040d9b012494a7 | [
"MIT"
] | null | null | null | home/tests/add-remove sector.py | caggri/FOFviz | 776ab387d832a86eea1a1b9064040d9b012494a7 | [
"MIT"
] | 1 | 2021-10-16T12:26:29.000Z | 2021-10-16T12:26:29.000Z | from selenium import webdriver
import time
chromedriver = "C:/Users/deniz/chromedriver/chromedriver"
driver = webdriver.Chrome(chromedriver)
driver.get('http://127.0.0.1:8000/')
dashboard = '//*[@id="accordionSidebar"]/li[1]/a'
sectors_1 = '//*[@id="sectors"]'
sectors_1_element = '//*[@id="sectors"]/option[4]'
add_sector = '//*[@id="select_filter_form"]/div[1]/input[1]'
remove_sector = '//*[@id="select_filter_form"]/div[1]/input[2]'
sectors_2 = '//*[@id="sectors2"]'
sectors_2_element = '//*[@id="sectors2"]/option[4]'
time.sleep(2)
driver.find_element_by_xpath(dashboard).click()
time.sleep(5)
driver.find_element_by_xpath(sectors_1).click()
time.sleep(2)
driver.find_element_by_xpath(sectors_1_element).click()
time.sleep(5)
driver.find_element_by_xpath(add_sector).click()
time.sleep(5)
driver.find_element_by_xpath(sectors_2).click()
time.sleep(2)
driver.find_element_by_xpath(sectors_2_element).click()
time.sleep(5)
driver.find_element_by_xpath(remove_sector).click()
| 29.818182 | 63 | 0.747967 |
f065f569bc87da0b1005e3822cbd92500b510024 | 1,713 | py | Python | netensorflow/api_samples/ann_creation_and_usage.py | psigelo/NeTensorflow | ec8bc09cc98346484d1b682a3dfd25c68c4ded61 | [
"MIT"
] | null | null | null | netensorflow/api_samples/ann_creation_and_usage.py | psigelo/NeTensorflow | ec8bc09cc98346484d1b682a3dfd25c68c4ded61 | [
"MIT"
] | null | null | null | netensorflow/api_samples/ann_creation_and_usage.py | psigelo/NeTensorflow | ec8bc09cc98346484d1b682a3dfd25c68c4ded61 | [
"MIT"
] | null | null | null | import tensorflow as tf
from netensorflow.ann.ANN import ANN
from netensorflow.ann.macro_layer.MacroLayer import MacroLayer
from netensorflow.ann.macro_layer.layer_structure.InputLayerStructure import InputLayerStructure
from netensorflow.ann.macro_layer.layer_structure.LayerStructure import LayerStructure, LayerType
from netensorflow.ann.macro_layer.layer_structure.layers.FullConnected import FullConnected
from netensorflow.ann.macro_layer.layer_structure.layers.FullConnectedWithSoftmaxLayer import FullConnectedWithSoftmaxLayer
'''
ann Creation and simple usage, the goal of this code is simply run the most simpler artificial neural network
'''
if __name__ == '__main__':
main()
| 37.23913 | 123 | 0.782837 |
f0693b36a74b3acf0a861ff3c1c73f7355633501 | 3,449 | py | Python | auth-backend.py | alexanderbittner/spotify-tracks | 9095d0224f7e313d164a5da24add2b806afc1b31 | [
"MIT"
] | null | null | null | auth-backend.py | alexanderbittner/spotify-tracks | 9095d0224f7e313d164a5da24add2b806afc1b31 | [
"MIT"
] | null | null | null | auth-backend.py | alexanderbittner/spotify-tracks | 9095d0224f7e313d164a5da24add2b806afc1b31 | [
"MIT"
] | null | null | null | import json
from flask import Flask, request, redirect, g, render_template
import requests
from urllib.parse import quote
# Adapted from https://github.com/drshrey/spotify-flask-auth-example
# Authentication Steps, paramaters, and responses are defined at https://developer.spotify.com/web-api/authorization-guide/
app = Flask(__name__)
# Client & Token Files
CLIENT_ID_FILE = 'auth/client-id'
CLIENT_SECRET_FILE = 'auth/client-secret'
TOKEN_FILE = 'auth/token'
REFRESH_FILE = 'auth/refresh-token'
# Spotify URLS
SPOTIFY_AUTH_URL = "https://accounts.spotify.com/authorize"
SPOTIFY_TOKEN_URL = "https://accounts.spotify.com/api/token"
SPOTIFY_API_BASE_URL = "https://api.spotify.com"
API_VERSION = "v1"
SPOTIFY_API_URL = "{}/{}".format(SPOTIFY_API_BASE_URL, API_VERSION)
# Server-side Parameters
CLIENT_SIDE_URL = "http://127.0.0.1"
PORT = 876
REDIRECT_URI = "{}:{}/callback/q".format(CLIENT_SIDE_URL, PORT)
SCOPE = "user-read-playback-state user-modify-playback-state"
STATE = ""
SHOW_DIALOG_bool = True
SHOW_DIALOG_str = str(SHOW_DIALOG_bool).lower()
# Client Keys
with open(CLIENT_ID_FILE, 'r') as id:
CLIENT_ID = id.read()
with open(CLIENT_SECRET_FILE, 'r') as secret:
CLIENT_SECRET = secret.read()
auth_query_parameters = {
"response_type": "code",
"redirect_uri": REDIRECT_URI,
"scope": SCOPE,
"client_id": CLIENT_ID
}
if __name__ == "__main__":
app.run(debug=True, port=PORT) | 31.354545 | 123 | 0.701653 |
f069eb952ff8678a357b70b75757dce90f676973 | 3,342 | py | Python | Regression/utils_testing.py | saucec0de/sifu | 7924844e1737c7634016c677237bccd7e7651818 | [
"MIT"
] | 5 | 2021-03-26T08:19:43.000Z | 2021-12-18T18:04:04.000Z | Regression/utils_testing.py | saucec0de/sifu | 7924844e1737c7634016c677237bccd7e7651818 | [
"MIT"
] | null | null | null | Regression/utils_testing.py | saucec0de/sifu | 7924844e1737c7634016c677237bccd7e7651818 | [
"MIT"
] | null | null | null | import yaml
import os
### Sample Contents of config.yaml:
# 0002_info_leakage:
# category: Sifu C/C++
# points: 100
# description: Leave no trace
# vulnerability: CWE-14 * Information Leakage
# directory: Challenges/C_CPP/0002_info_leakage
# send_dir: true
# file: func_0009.c
# fname: func.c
# chal_id: c94062933919
# root: template
# root_file: chal_files.html
# run: ./run.py
# flag: f296-5420-65a9-7fc8
# type: c_makefile
# disable: false
# feedback: collect
# addHeader: |
# #define __OVERWRITE
# #include "utils.h"
# #include "deprecated.h"
# #include "redirect.h"
# #include "log.h"
localPath = os.path.join(os.path.dirname(__file__))
def FilesToJson(files, path=localPath):
"""
returns a {filename: contents} dict for
the given files on the given path
"""
contents = {}
# for multiple files, iterate over each
if type(files)==list:
for file in files:
with open(os.path.join(path, file)) as f:
contents[file]=f.read()
# for just one, do the deed
elif type(files)==str:
with open(os.path.join(path, files)) as f:
contents[files]=f.read()
# if we're here, we screwed up
else:
raise TypeError('[utils_testing] excuse me')
return contents
def makeIOforTest(path, inFileNames, outFileNames):
"""
Use to generate the test parametrization lists
----
Inputs: root path, expected input file names, expected output file names
Output: lists of one dict per param set (to be used with zip when parametrizing)
{
in_params:
[{inSet1_file1: inSet1_file1_contents, ..},
{inSet2_file2: inSet2__file2_contents}]
out_params:
[{outSet1_file1: outSet1_file1_contents, ..},
{outSet2_file2: outSet2__file2_contents}]
}
"""
test_in = []
test_out = []
for (dirpath, _, filenames) in os.walk(path):
if 'tc-' in dirpath:
files_in = {}
files_out = {}
for file in inFileNames:
files_in[file] = fileContentsToStr(os.path.join(dirpath,file))
for file in outFileNames:
files_out[file] = fileContentsToStr(os.path.join(dirpath,file))
test_in.append(files_in)
test_out.append(files_out)
return {'in_params': test_in,
'out_params': test_out}
if __name__=='__main__':
# local 'testing'
print("chalID for '0002_info_leakage' is:", chalNameToChalID('0002_info_leakage') )
print("files and filenames:\n", getFilesForChalID(chalNameToChalID('0002_info_leakage')))
print(FilesToJson(getFilesForChalID(chalNameToChalID('0002_info_leakage'))['fileNames'], path='../Challenges/C_CPP/0001_buffer_overflow'))
print("\n\n")
EgPathAsSeenByTests = '0002_info_leakage'
inFiles = ['database.json', 'func_0009.c']
outFiles = ['ai.json', 'log.txt']
outFiles_noLog = ['ai.json']
print(makeIOforTest('IO/0002_info_leakage', inFiles, outFiles))
| 28.810345 | 142 | 0.595751 |
f06a56919cbaa9b5814f0dd5b244fec4364f26b3 | 423 | py | Python | python/arachne/runtime/rpc/logger.py | fixstars/arachne | 03c00fc5105991d0d706b935d77e6f9255bae9e7 | [
"MIT"
] | 3 | 2022-03-29T03:02:20.000Z | 2022-03-29T03:48:38.000Z | python/arachne/runtime/rpc/logger.py | fixstars/arachne | 03c00fc5105991d0d706b935d77e6f9255bae9e7 | [
"MIT"
] | null | null | null | python/arachne/runtime/rpc/logger.py | fixstars/arachne | 03c00fc5105991d0d706b935d77e6f9255bae9e7 | [
"MIT"
] | 1 | 2022-03-29T05:44:12.000Z | 2022-03-29T05:44:12.000Z | import logging
| 26.4375 | 88 | 0.72104 |
f06ae02416b8f8f9bb909dbd1c4d484476e5b8f7 | 4,498 | py | Python | examples/pykey60/code-1.py | lesley-byte/pykey | ce21b5b6c0da938bf24891e5acb196d6779c433a | [
"MIT"
] | null | null | null | examples/pykey60/code-1.py | lesley-byte/pykey | ce21b5b6c0da938bf24891e5acb196d6779c433a | [
"MIT"
] | null | null | null | examples/pykey60/code-1.py | lesley-byte/pykey | ce21b5b6c0da938bf24891e5acb196d6779c433a | [
"MIT"
] | null | null | null | #pylint: disable = line-too-long
import os
import time
import board
import neopixel
import keypad
import usb_hid
import pwmio
import rainbowio
from adafruit_hid.keyboard import Keyboard
from pykey.keycode import KB_Keycode as KC
from adafruit_hid.keyboard_layout_us import KeyboardLayoutUS
# Hardware definition: GPIO where RGB LED is connected.
pixel_pin = board.NEOPIXEL
num_pixels = 61
pixels = neopixel.NeoPixel(pixel_pin, num_pixels, brightness=1, auto_write=False)
cyclecount = 0
buzzer = pwmio.PWMOut(board.SPEAKER, variable_frequency=True)
OFF = 0
ON = 2**15
# Hardware definition: Switch Matrix Setup.
keys = keypad.KeyMatrix(
row_pins=(board.ROW1, board.ROW2, board.ROW3, board.ROW4, board.ROW5),
column_pins=(board.COL1, board.COL2, board.COL3, board.COL4, board.COL5, board.COL6, board.COL7,
board.COL8, board.COL9, board.COL10, board.COL11, board.COL12, board.COL13, board.COL14),
columns_to_anodes=True,
)
# CONFIGURABLES ------------------------
MACRO_FOLDER = '/layers'
# CLASSES AND FUNCTIONS ----------------
# Neopixel update function
def update_pixels(color):
for i in range(num_pixels):
pixels[i] = color
pixels.show()
# INITIALIZATION -----------------------
# Load all the macro key setups from .py files in MACRO_FOLDER
layers = []
files = os.listdir(MACRO_FOLDER)
files.sort()
for filename in files:
print(filename)
if filename.endswith('.py'):
try:
module = __import__(MACRO_FOLDER + '/' + filename[:-3])
layers.append(Layer(module.layer))
except (SyntaxError, ImportError, AttributeError, KeyError, NameError,
IndexError, TypeError) as err:
print(err)
pass
if not layers:
print('NO MACRO FILES FOUND')
while True:
pass
layer_count = len(layers)
# print(layer_count)
# setup variables
keyboard = Keyboard(usb_hid.devices)
keyboard_layout = KeyboardLayoutUS(keyboard)
active_keys = []
not_sleeping = True
layer_index = 0
buzzer.duty_cycle = ON
buzzer.frequency = 440 #
time.sleep(0.05)
buzzer.frequency = 880 #
time.sleep(0.05)
buzzer.frequency = 440 #
time.sleep(0.05)
buzzer.duty_cycle = OFF
while not_sleeping:
key_event = keys.events.get()
if key_event:
key_number = key_event.key_number
cyclecount = cyclecount +1
rainbow_cycle(cyclecount)
# keep track of keys being pressed for layer determination
if key_event.pressed:
active_keys.append(key_number)
else:
active_keys.remove(key_number)
# reset the layers and identify which layer key is pressed.
layer_keys_pressed = []
for active_key in active_keys:
group = layers[0].macros[active_key][2]
for item in group:
if isinstance(item, int):
if (item >= KC.LAYER_0) and (item <= KC.LAYER_F) :
layer_keys_pressed.append(item - KC.LAYER_0)
layer_index = get_active_layer(layer_keys_pressed, layer_count)
# print(layer_index)
# print(layers[layer_index].macros[key_number][1])
group = layers[layer_index].macros[key_number][2]
color = layers[layer_index].macros[key_number][0]
if key_event.pressed:
update_pixels(color)
for item in group:
if isinstance(item, int):
keyboard.press(item)
else:
keyboard_layout.write(item)
else:
for item in group:
if isinstance(item, int):
if item >= 0:
keyboard.release(item)
#update_pixels(0x000000)
time.sleep(0.002)
| 28.289308 | 106 | 0.631392 |
f06b5ca0b13a5293cc2597359395e328535fbb92 | 433 | py | Python | tags.py | Manugs51/TFM_Metaforas | 3fb459cf80c71e6fbb1c2a58d20bc03a05a760bd | [
"MIT"
] | null | null | null | tags.py | Manugs51/TFM_Metaforas | 3fb459cf80c71e6fbb1c2a58d20bc03a05a760bd | [
"MIT"
] | null | null | null | tags.py | Manugs51/TFM_Metaforas | 3fb459cf80c71e6fbb1c2a58d20bc03a05a760bd | [
"MIT"
] | null | null | null |
UNIVERSAL_POS_TAGS = {
'VERB': 'verbo',
'NOUN': 'nombre',
'PRON': 'pronombre',
'ADJ' : 'adjetivo',
'ADV' : 'adverbio',
'ADP' : 'aposicin',
'CONJ': 'conjuncin',
'DET' : 'determinante',
'NUM' : 'numeral',
'PRT' : 'partcula gramatical',
'X' : 'desconocido',
'.' : 'signo de puntuacin',
}
BABEL = {
'v': 'verbo',
'n': 'nombre',
'a': 'adjetivo',
'r': 'adverbio',
} | 19.681818 | 35 | 0.484988 |
f06b803afcbf533b08fafb38257aef61240b8c46 | 942 | py | Python | pyramid_oereb/contrib/data_sources/standard/sources/availability.py | pyramidoereb/pyramid_oereb | 764c03e98e01ebc709cd17bd0ffd817bfe318892 | [
"BSD-2-Clause"
] | 2 | 2018-01-23T13:16:12.000Z | 2018-01-26T06:27:29.000Z | pyramid_oereb/contrib/data_sources/standard/sources/availability.py | camptocamp/pyramid_oereb | 2d33aceb796f0afada6728820fa9d4691f7e273a | [
"BSD-2-Clause"
] | 298 | 2017-08-30T07:12:10.000Z | 2019-01-31T10:52:07.000Z | pyramid_oereb/contrib/data_sources/standard/sources/availability.py | pyramidoereb/pyramid_oereb | 764c03e98e01ebc709cd17bd0ffd817bfe318892 | [
"BSD-2-Clause"
] | 4 | 2017-12-01T09:51:42.000Z | 2018-11-21T11:02:47.000Z |
from pyramid_oereb.core.sources import BaseDatabaseSource
from pyramid_oereb.core.sources.availability import AvailabilityBaseSource
| 34.888889 | 108 | 0.632696 |
f06c0a436a6bd375e7b53e8c96203ec37cc92572 | 1,624 | py | Python | tests/fixtures/specification.py | FlyingBird95/openapi_generator | df4649b9723eb89fa370b02220356b7596794069 | [
"MIT"
] | 3 | 2022-01-10T12:43:36.000Z | 2022-01-13T18:08:15.000Z | tests/fixtures/specification.py | FlyingBird95/openapi_generator | df4649b9723eb89fa370b02220356b7596794069 | [
"MIT"
] | 6 | 2021-12-09T20:08:19.000Z | 2021-12-21T13:31:54.000Z | tests/fixtures/specification.py | FlyingBird95/openapi-builder | df4649b9723eb89fa370b02220356b7596794069 | [
"MIT"
] | 2 | 2021-12-17T17:26:06.000Z | 2021-12-17T17:39:00.000Z | from pytest_factoryboy import register
from tests.factories.specification import (
CallbackFactory,
ComponentsFactory,
ContactFactory,
DiscriminatorFactory,
EncodingFactory,
ExampleFactory,
ExternalDocumentationFactory,
HeaderFactory,
InfoFactory,
LicenseFactory,
LinkFactory,
MediaTypeFactory,
OAuthFlowFactory,
OAuthFlowsFactory,
OpenAPIFactory,
OperationFactory,
ParameterFactory,
PathItemFactory,
PathsFactory,
ReferenceFactory,
RequestBodyFactory,
ResponseFactory,
ResponsesFactory,
SchemaFactory,
SecurityRequirementFactory,
SecuritySchemeFactory,
ServerFactory,
ServerVariableFactory,
TagFactory,
)
register(OpenAPIFactory)
register(InfoFactory)
register(ContactFactory)
register(LicenseFactory)
register(ServerFactory)
register(ServerVariableFactory)
register(ComponentsFactory)
register(PathsFactory)
register(PathItemFactory)
register(OperationFactory)
register(ExternalDocumentationFactory)
register(ParameterFactory)
register(RequestBodyFactory)
register(MediaTypeFactory)
register(EncodingFactory)
register(ResponsesFactory)
register(ResponseFactory)
register(CallbackFactory)
register(ExampleFactory)
register(LinkFactory)
register(HeaderFactory)
register(TagFactory)
register(ReferenceFactory)
register(SchemaFactory)
register(SchemaFactory, "second_schema")
register(DiscriminatorFactory)
register(SecuritySchemeFactory)
register(OAuthFlowsFactory, "oauth_flows")
register(OAuthFlowFactory, "oauth_flow")
register(OAuthFlowFactory, "second_oauth_flow")
register(SecurityRequirementFactory)
| 24.606061 | 47 | 0.816502 |
f06e460c939f7ca739d389122382d4b13d1f8d29 | 3,856 | py | Python | app.py | samstruthers35/sqlalchemy-challenge | 0022e7459fc59a7bee85489f8d264a8aee9c01c8 | [
"ADSL"
] | null | null | null | app.py | samstruthers35/sqlalchemy-challenge | 0022e7459fc59a7bee85489f8d264a8aee9c01c8 | [
"ADSL"
] | null | null | null | app.py | samstruthers35/sqlalchemy-challenge | 0022e7459fc59a7bee85489f8d264a8aee9c01c8 | [
"ADSL"
] | null | null | null | import datetime as dt
import numpy as np
import pandas as pd
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
from flask import Flask, jsonify, render_template
engine = create_engine('sqlite:///hawaii.sqlite')
Base = automap_base()
Base.prepare(engine, reflect=True)
Station = Base.classes.station
Measurement = Base.classes.measurement
session = Session(engine)
app = Flask(__name__)
if __name__ == "__main__":
app.run(debug=True) | 33.824561 | 117 | 0.625778 |
f06e635c1ec15823a66500dd05606d30ee6110ce | 4,096 | py | Python | WebSocket_Chat_Room/chat_room_v001/handlers/login.py | MMingLeung/Python_Study | 4ff1d02d2b6dd54e96f7179fa000548936b691e7 | [
"MIT"
] | 3 | 2017-12-27T14:08:17.000Z | 2018-02-10T13:01:08.000Z | WebSocket_Chat_Room/chat_room_v001/handlers/login.py | MMingLeung/Python_Study | 4ff1d02d2b6dd54e96f7179fa000548936b691e7 | [
"MIT"
] | 4 | 2017-05-24T10:37:05.000Z | 2021-06-10T18:35:32.000Z | WebSocket_Chat_Room/chat_room_v001/handlers/login.py | MMingLeung/Python_Study | 4ff1d02d2b6dd54e96f7179fa000548936b691e7 | [
"MIT"
] | 1 | 2018-02-14T19:05:30.000Z | 2018-02-14T19:05:30.000Z | #!/usr/bin/env python
#! -*- coding: utf-8 -*-
'''
Handler for login
'''
import tornado.web
from lib.db_controller import DBController
from lib.CUSTOMIZED_SESSION.my_session import SessionFactory
| 41.795918 | 138 | 0.541992 |
f06ebdf27eb473116d5a5a69d7c99a59502c6586 | 409 | py | Python | hackerrank/python/introduction/function.py | wingkwong/competitive-programming | e8bf7aa32e87b3a020b63acac20e740728764649 | [
"MIT"
] | 18 | 2020-08-27T05:27:50.000Z | 2022-03-08T02:56:48.000Z | hackerrank/python/introduction/function.py | wingkwong/competitive-programming | e8bf7aa32e87b3a020b63acac20e740728764649 | [
"MIT"
] | null | null | null | hackerrank/python/introduction/function.py | wingkwong/competitive-programming | e8bf7aa32e87b3a020b63acac20e740728764649 | [
"MIT"
] | 1 | 2020-10-13T05:23:58.000Z | 2020-10-13T05:23:58.000Z |
year = int(input())
print(is_leap(year)) | 34.083333 | 75 | 0.628362 |
f06f16ee399ccb9faac16cda8b08d3cc4df552cb | 1,480 | py | Python | projectenv/main/forms.py | rzsaglam/project-env | f4c02b15cf924ba5d69d8a4a89efcc686b73aa9c | [
"MIT"
] | null | null | null | projectenv/main/forms.py | rzsaglam/project-env | f4c02b15cf924ba5d69d8a4a89efcc686b73aa9c | [
"MIT"
] | null | null | null | projectenv/main/forms.py | rzsaglam/project-env | f4c02b15cf924ba5d69d8a4a89efcc686b73aa9c | [
"MIT"
] | null | null | null | from django import forms
from django.contrib.auth import models
from django.db.models.base import Model
from django.forms import ModelForm, fields
from .models import Paint
from django import forms
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm, AuthenticationForm
| 27.924528 | 85 | 0.667568 |
f06f2cf97d8da48c7ae640dd4974c12d832537f5 | 3,398 | py | Python | njdate/hebdfind.py | schorrm/njdate | 5a31d944973904b75f1dbac811fc7393aaa4ed7c | [
"MIT"
] | 4 | 2019-07-16T19:58:42.000Z | 2021-11-17T14:50:17.000Z | njdate/hebdfind.py | schorrm/njdate | 5a31d944973904b75f1dbac811fc7393aaa4ed7c | [
"MIT"
] | null | null | null | njdate/hebdfind.py | schorrm/njdate | 5a31d944973904b75f1dbac811fc7393aaa4ed7c | [
"MIT"
] | null | null | null | # Takes two years, and runs an aggressive search for dates in between those two years (inclusive).
import njdate.gematria as gematria
import njdate.ej_generic as ej_generic
import string
specpunc = string.punctuation.replace('"','').replace("'","")
tr_table = str.maketrans("","",specpunc)
# For dropped Tafs etc, so we need to add 400 years after what we've found, etc
| 57.59322 | 110 | 0.700706 |
f0706f06dae68a2eb12befe8740b73ce25344c53 | 10,323 | py | Python | tests/test_cli.py | redglue/brickops | 77fbe0da295f69b2b8bfebd0ec2c8b3bfdb1046b | [
"BSD-3-Clause"
] | null | null | null | tests/test_cli.py | redglue/brickops | 77fbe0da295f69b2b8bfebd0ec2c8b3bfdb1046b | [
"BSD-3-Clause"
] | 3 | 2019-07-23T16:38:14.000Z | 2021-06-02T03:55:23.000Z | tests/test_cli.py | aquicore/apparate | bc0d9a5db2ffb863ddde4ff61ac2ac0dbc8f1bad | [
"BSD-3-Clause"
] | null | null | null | import logging
from os.path import expanduser, join
from unittest import mock
import pytest
from click.testing import CliRunner
from configparser import ConfigParser
from apparate.configure import configure
from apparate.cli_commands import upload, upload_and_update
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger('apparate.cli_commands')
| 28.675 | 79 | 0.625303 |
f0726f920f21d92a489cfdea0b278639f7b0a413 | 4,632 | py | Python | pg_dicreate.py | zhuyeaini9/pytorch_test | 9654f7da144c71a65ec2665bc7128aaca5325302 | [
"Apache-2.0"
] | null | null | null | pg_dicreate.py | zhuyeaini9/pytorch_test | 9654f7da144c71a65ec2665bc7128aaca5325302 | [
"Apache-2.0"
] | null | null | null | pg_dicreate.py | zhuyeaini9/pytorch_test | 9654f7da144c71a65ec2665bc7128aaca5325302 | [
"Apache-2.0"
] | null | null | null | import torch
import torch.nn.functional as F
import torch.nn as nn
import gym
from gym import spaces
import torch.optim as optim
from torch.distributions import Categorical
import random
import numpy as np
agent = PGAgent('CartPole-v0')
agent.run_n_step(300)
| 31.087248 | 98 | 0.617012 |
f072cdc953dde5ba78b66b40195edc1332c89bcf | 346 | py | Python | functions/dissectData/lambda_handler.py | zinedine-zeitnot/anomaly-detection | 2287f6488d47884d97ff618c24c379d869eb51f5 | [
"MIT"
] | 3 | 2021-04-30T12:51:01.000Z | 2021-06-04T12:51:32.000Z | functions/dissectData/lambda_handler.py | zinedine-zeitnot/anomaly-detection | 2287f6488d47884d97ff618c24c379d869eb51f5 | [
"MIT"
] | null | null | null | functions/dissectData/lambda_handler.py | zinedine-zeitnot/anomaly-detection | 2287f6488d47884d97ff618c24c379d869eb51f5 | [
"MIT"
] | null | null | null | from data_dissector import DataDissector
| 28.833333 | 69 | 0.731214 |
f0730fe6794bb60447a6e7f8e2d6de7cd6fc45d8 | 1,220 | py | Python | Chapter 3-Regression/2.py | FatiniNadhirah5/Datacamp-Machine-Learning-with-Apache-Spark-2019 | a0ef5f34c5a0aea222359a5085386f6a21611e7e | [
"FSFAP"
] | 8 | 2020-05-02T20:24:38.000Z | 2021-04-30T21:44:22.000Z | Chapter 3-Regression/2.py | FatiniNadhirah5/Machine-Learning-with-Apache-Spark | a0ef5f34c5a0aea222359a5085386f6a21611e7e | [
"FSFAP"
] | null | null | null | Chapter 3-Regression/2.py | FatiniNadhirah5/Machine-Learning-with-Apache-Spark | a0ef5f34c5a0aea222359a5085386f6a21611e7e | [
"FSFAP"
] | 9 | 2020-05-17T17:44:37.000Z | 2022-03-20T12:58:42.000Z | # Flight duration model: Just distance
# In this exercise you'll build a regression model to predict flight duration (the duration column).
# For the moment you'll keep the model simple, including only the distance of the flight (the km column) as a predictor.
# The data are in flights. The first few records are displayed in the terminal. These data have also been split into training and testing sets and are available as flights_train and flights_test.
# Instructions
# 100 XP
# Create a linear regression object. Specify the name of the label column. Fit it to the training data.
# Make predictions on the testing data.
# Create a regression evaluator object and use it to evaluate RMSE on the testing data.
from pyspark.ml.regression import LinearRegression
from pyspark.ml.evaluation import RegressionEvaluator
# Create a regression object and train on training data
regression = LinearRegression(labelCol='duration').fit(flights_train)
# Create predictions for the testing data and take a look at the predictions
predictions = regression.transform(flights_test)
predictions.select('duration', 'prediction').show(5, False)
# Calculate the RMSE
RegressionEvaluator(labelCol='duration').evaluate(predictions) | 48.8 | 195 | 0.80082 |
f07320187da09dd13226ebf15b281c23c4b206d4 | 486 | py | Python | gipsy/admin.py | marwahaha/gipsy-1 | 5d31c37cff26b9b26cd6d24e1b6de13c81ebbe6e | [
"MIT"
] | 10 | 2015-02-11T02:11:33.000Z | 2018-03-22T13:08:33.000Z | gipsy/admin.py | marwahaha/gipsy-1 | 5d31c37cff26b9b26cd6d24e1b6de13c81ebbe6e | [
"MIT"
] | 9 | 2015-01-22T15:45:44.000Z | 2015-10-19T14:18:09.000Z | gipsy/admin.py | marwahaha/gipsy-1 | 5d31c37cff26b9b26cd6d24e1b6de13c81ebbe6e | [
"MIT"
] | 7 | 2015-04-28T15:20:57.000Z | 2019-07-16T03:45:12.000Z | from django.contrib import admin
| 27 | 69 | 0.693416 |
f073a799e8b36554db301e779cfd3eed55011853 | 4,727 | py | Python | QDeblend/process/host_profiles.py | brandherd/QDeblend3D | 4e195ca027cf9fb65962ce66bf5d1f3e119b4f18 | [
"MIT"
] | null | null | null | QDeblend/process/host_profiles.py | brandherd/QDeblend3D | 4e195ca027cf9fb65962ce66bf5d1f3e119b4f18 | [
"MIT"
] | null | null | null | QDeblend/process/host_profiles.py | brandherd/QDeblend3D | 4e195ca027cf9fb65962ce66bf5d1f3e119b4f18 | [
"MIT"
] | null | null | null | import numpy, math
from scipy import special
"""
The Sersic Profile
Formulae for Sersic profile taken from Graham & Driver (2005)
bibcode: 2005PASA...22..118G
""" | 34.757353 | 80 | 0.557436 |
f07411bf6835efa66845aedc9d0915e9f4597ba2 | 1,138 | py | Python | UnitTests/FullAtomModel/CoordsTransform/test_forward.py | johahi/TorchProteinLibrary | b1fc9faa9b51c4550e5f754d075766ba38e0f8a0 | [
"MIT"
] | null | null | null | UnitTests/FullAtomModel/CoordsTransform/test_forward.py | johahi/TorchProteinLibrary | b1fc9faa9b51c4550e5f754d075766ba38e0f8a0 | [
"MIT"
] | null | null | null | UnitTests/FullAtomModel/CoordsTransform/test_forward.py | johahi/TorchProteinLibrary | b1fc9faa9b51c4550e5f754d075766ba38e0f8a0 | [
"MIT"
] | null | null | null | import sys
import os
import torch
import numpy as np
from TorchProteinLibrary.FullAtomModel.CoordsTransform import CoordsTranslate, getRandomTranslation, getBBox, CoordsRotate, getRandomRotation
from TorchProteinLibrary.FullAtomModel import Angles2Coords, Coords2TypedCoords
if __name__=='__main__':
sequences = ['GGGGGG', 'GGAARRRRRRRRR']
angles = torch.zeros(2, 7,len(sequences[1]), dtype=torch.double)
angles[:,0,:] = -1.047
angles[:,1,:] = -0.698
angles[:,2:,:] = 110.4*np.pi/180.0
a2c = Angles2Coords()
protein, res_names, atom_names, num_atoms = a2c(angles, sequences)
test_translation(protein, num_atoms)
test_rotation(protein, num_atoms)
| 25.863636 | 141 | 0.748682 |
f076212c69c217204a0f335bc5923354550eed68 | 671 | py | Python | tutorials/W0D1_PythonWorkshop1/solutions/W0D1_Tutorial1_Solution_93456241.py | eduardojdiniz/CompNeuro | 20269e66540dc4e802273735c97323020ee37406 | [
"CC-BY-4.0",
"BSD-3-Clause"
] | 2,294 | 2020-05-11T12:05:35.000Z | 2022-03-28T21:23:34.000Z | tutorials/W0D1_PythonWorkshop1/solutions/W0D1_Tutorial1_Solution_93456241.py | pellet/course-content | bb383857992469e0e7a9c36639ac0d05e842d9bd | [
"CC-BY-4.0",
"BSD-3-Clause"
] | 629 | 2020-05-11T15:42:26.000Z | 2022-03-29T12:23:35.000Z | tutorials/W0D1_PythonWorkshop1/solutions/W0D1_Tutorial1_Solution_93456241.py | pellet/course-content | bb383857992469e0e7a9c36639ac0d05e842d9bd | [
"CC-BY-4.0",
"BSD-3-Clause"
] | 917 | 2020-05-11T12:47:53.000Z | 2022-03-31T12:14:41.000Z |
# Set random number generator
np.random.seed(2020)
# Initialize step_end, n, t_range, v and i
step_end = int(t_max / dt)
n = 50
t_range = np.linspace(0, t_max, num=step_end)
v_n = el * np.ones([n, step_end])
i = i_mean * (1 + 0.1 * (t_max / dt)**(0.5) * (2 * np.random.random([n, step_end]) - 1))
# Loop for step_end - 1 steps
for step in range(1, step_end):
# Compute v_n
v_n[:, step] = v_n[:, step - 1] + (dt / tau) * (el - v_n[:, step - 1] + r * i[:, step])
# Plot figure
with plt.xkcd():
plt.figure()
plt.title('Multiple realizations of $V_m$')
plt.xlabel('time (s)')
plt.ylabel('$V_m$ (V)')
plt.plot(t_range, v_n.T, 'k', alpha=0.3)
plt.show() | 25.807692 | 90 | 0.600596 |
f076aaf49a3d8fba6fb5ba17c6020bb113d2de01 | 5,417 | py | Python | src/jsonengine/main.py | youhengzhou/json-crud-engine | 8ee614af6dddbe1236a78a7debf71048f476a3ff | [
"MIT"
] | 2 | 2021-07-02T04:33:36.000Z | 2022-01-09T23:40:30.000Z | src/jsonengine/main.py | youhengzhou/json-crud-engine | 8ee614af6dddbe1236a78a7debf71048f476a3ff | [
"MIT"
] | null | null | null | src/jsonengine/main.py | youhengzhou/json-crud-engine | 8ee614af6dddbe1236a78a7debf71048f476a3ff | [
"MIT"
] | null | null | null | # JSON engine 21 9 16
# database
# eng.json
# engine
# eng.py
import os
import json
path = os.getcwd() + '\\json_engine_database\\'
path_string = ''
| 31.132184 | 75 | 0.568027 |
f076d7596035ea99f16b2ee688410ac4e2c0be9a | 2,292 | py | Python | tests/coro.py | dshean/sliderule-python | 3cf9a6c65987705354cb536d71f85a32fbb24d15 | [
"BSD-3-Clause"
] | 1 | 2021-04-09T22:01:33.000Z | 2021-04-09T22:01:33.000Z | tests/coro.py | dshean/sliderule-python | 3cf9a6c65987705354cb536d71f85a32fbb24d15 | [
"BSD-3-Clause"
] | null | null | null | tests/coro.py | dshean/sliderule-python | 3cf9a6c65987705354cb536d71f85a32fbb24d15 | [
"BSD-3-Clause"
] | null | null | null | # python
import sys
import h5coro
###############################################################################
# DATA
###############################################################################
# set resource
resource = "file:///data/ATLAS/ATL06_20200714160647_02950802_003_01.h5"
# expected single read
h_li_exp_1 = [3432.17578125, 3438.776611328125, 3451.01123046875, 3462.688232421875, 3473.559326171875]
# expected parallel read
h_li_exp_2 = { '/gt1l/land_ice_segments/h_li': [3432.17578125, 3438.776611328125, 3451.01123046875, 3462.688232421875, 3473.559326171875],
'/gt2l/land_ice_segments/h_li': [3263.659912109375, 3258.362548828125, 3.4028234663852886e+38, 3233.031494140625, 3235.200927734375],
'/gt3l/land_ice_segments/h_li': [3043.489013671875, 3187.576171875, 3.4028234663852886e+38, 4205.04248046875, 2924.724365234375]}
###############################################################################
# UTILITY FUNCTIONS
###############################################################################
###############################################################################
# MAIN
###############################################################################
if __name__ == '__main__':
# Open H5Coro File #
h5file = h5coro.file(resource)
# Perform Single Read #
h_li_1 = h5file.read("/gt1l/land_ice_segments/h_li", 0, 19, 5)
check_results(h_li_1, h_li_exp_1)
# Perform Parallel Read #
datasets = [["/gt1l/land_ice_segments/h_li", 0, 19, 5],
["/gt2l/land_ice_segments/h_li", 0, 19, 5],
["/gt3l/land_ice_segments/h_li", 0, 19, 5]]
h_li_2 = h5file.readp(datasets)
check_results(h_li_2, h_li_exp_2)
| 38.2 | 149 | 0.505236 |
f077b57af7bb1555b754ae7c06fad787a7e42f43 | 30,165 | py | Python | lib/interpreter.py | xraypy/_xraylarch_attic | a78a2d257bccb081ad15c43c831dee51d0b4845a | [
"BSD-3-Clause"
] | 1 | 2019-04-23T02:30:00.000Z | 2019-04-23T02:30:00.000Z | lib/interpreter.py | xraypy/_xraylarch_attic | a78a2d257bccb081ad15c43c831dee51d0b4845a | [
"BSD-3-Clause"
] | null | null | null | lib/interpreter.py | xraypy/_xraylarch_attic | a78a2d257bccb081ad15c43c831dee51d0b4845a | [
"BSD-3-Clause"
] | null | null | null | """
Main Larch interpreter
Safe(ish) evaluator of python expressions, using ast module.
The emphasis here is on mathematical expressions, and so
numpy functions are imported if available and used.
"""
from __future__ import division, print_function
import os
import sys
import ast
import math
import numpy
from . import builtins
from . import site_config
from .symboltable import SymbolTable, Group, isgroup
from .larchlib import LarchExceptionHolder, Procedure, DefinedVariable
from .utils import Closure
OPERATORS = {ast.Is: lambda a, b: a is b,
ast.IsNot: lambda a, b: a is not b,
ast.In: lambda a, b: a in b,
ast.NotIn: lambda a, b: a not in b,
ast.Add: lambda a, b: a + b,
ast.BitAnd: lambda a, b: a & b,
ast.BitOr: lambda a, b: a | b,
ast.BitXor: lambda a, b: a ^ b,
ast.Div: lambda a, b: a / b,
ast.FloorDiv: lambda a, b: a // b,
ast.LShift: lambda a, b: a << b,
ast.RShift: lambda a, b: a >> b,
ast.Mult: lambda a, b: a * b,
ast.Pow: lambda a, b: a ** b,
ast.Sub: lambda a, b: a - b,
ast.Mod: lambda a, b: a % b,
ast.And: lambda a, b: a and b,
ast.Or: lambda a, b: a or b,
ast.Eq: lambda a, b: a == b,
ast.Gt: lambda a, b: a > b,
ast.GtE: lambda a, b: a >= b,
ast.Lt: lambda a, b: a < b,
ast.LtE: lambda a, b: a <= b,
ast.NotEq: lambda a, b: a != b,
ast.Invert: lambda a: ~a,
ast.Not: lambda a: not a,
ast.UAdd: lambda a: +a,
ast.USub: lambda a: -a}
| 37.286774 | 87 | 0.521498 |
f077d6be5215d3bb3ca0fa34f9524a7653266e11 | 4,422 | py | Python | deepinsight/util/tetrode.py | ealmenzar/DeepInsight | 99aebb90b8183aa9f028c35e0381e73f8cd840f3 | [
"MIT"
] | null | null | null | deepinsight/util/tetrode.py | ealmenzar/DeepInsight | 99aebb90b8183aa9f028c35e0381e73f8cd840f3 | [
"MIT"
] | null | null | null | deepinsight/util/tetrode.py | ealmenzar/DeepInsight | 99aebb90b8183aa9f028c35e0381e73f8cd840f3 | [
"MIT"
] | null | null | null | """
DeepInsight Toolbox
Markus Frey
https://github.com/CYHSM/DeepInsight
Licensed under MIT License
"""
import numpy as np
import pandas as pd
import h5py
from . import hdf5
from . import stats
def read_open_ephys(fp_raw_file):
"""
Reads ST open ephys files
Parameters
----------
fp_raw_file : str
File path to open ephys file
Returns
-------
continouos : (N,M) array_like
Continous ephys with N timepoints and M channels
timestamps : (N,1) array_like
Timestamps for each sample in continous
positions : (N,5) array_like
Position of animal with two LEDs and timestamps
info : object
Additional information about experiments
"""
fid_ephys = h5py.File(fp_raw_file, mode='r')
# Load timestamps and continuous data, python 3 keys() returns view
recording_key = list(fid_ephys['acquisition']['timeseries'].keys())[0]
processor_key = list(fid_ephys['acquisition']['timeseries'][recording_key]['continuous'].keys())[0]
# Load raw ephys and timestamps
# not converted to microvolts, need to multiply by 0.195. We don't multiply here as we cant load full array into memory
continuous = fid_ephys['acquisition']['timeseries'][recording_key]['continuous'][processor_key]['data']
timestamps = fid_ephys['acquisition']['timeseries'][recording_key]['continuous'][processor_key]['timestamps']
# We can also read position directly from the raw file
positions = fid_ephys['acquisition']['timeseries'][recording_key]['tracking']['ProcessedPos']
# Read general settings
info = fid_ephys['general']['data_collection']['Settings']
return (continuous, timestamps, positions, info)
def read_tetrode_data(fp_raw_file):
"""
Read ST data from openEphys recording system
Parameters
----------
fp_raw_file : str
File path to open ephys file
Returns
-------
raw_data : (N,M) array_like
Continous ephys with N timepoints and M channels
raw_timestamps : (N,1) array_like
Timestamps for each sample in continous
output : (N,4) array_like
Position of animal with two LEDs
output_timestamps : (N,1) array_like
Timestamps for positions
info : object
Additional information about experiments
"""
(raw_data, raw_timestamps, positions, info) = read_open_ephys(fp_raw_file)
output_timestamps = positions[:, 0]
output = positions[:, 1:5]
bad_channels = info['General']['badChan']
bad_channels = [int(n) for n in bad_channels[()].decode('UTF-8').split(',')]
good_channels = np.delete(np.arange(0, 128), bad_channels)
info = {'channels': good_channels, 'bad_channels': bad_channels, 'sampling_rate': 30000}
return (raw_data, raw_timestamps, output, output_timestamps, info)
def preprocess_output(fp_hdf_out, raw_timestamps, output, output_timestamps, average_window=1000, sampling_rate=512):
"""
Write behaviours to decode into HDF5 file
Parameters
----------
fp_hdf_out : str
File path to HDF5 file
raw_timestamps : (N,1) array_like
Timestamps for each sample in continous
output : (N,1) array_like
Orientation
output_timestamps : (N,1) array_like
Timestamps for positions
average_window : int, optional
Downsampling factor for raw data and orientation, by default 1000
sampling_rate : int, optional
Sampling rate of raw ephys, by default 30000
"""
hdf5_file = h5py.File(fp_hdf_out, mode='a')
# Get size of wavelets
input_length = hdf5_file['inputs/wavelets'].shape[0]
# Get orientation and calculates alignment to the raw_data with downsampling factor average_window
raw_timestamps = raw_timestamps[()] # Slightly faster than np.array
output_orientation = np.interp(raw_timestamps[np.arange(0, raw_timestamps.shape[0],
average_window)], output_timestamps, output) #np.arange with average_window makes a reduction on the data
raw_orientation = np.array([output_orientation]).transpose()
# Create and save datasets in HDF5 File
hdf5.create_or_update(hdf5_file, dataset_name="outputs/raw_orientation",
dataset_shape=[input_length, 1], dataset_type=np.float16, dataset_value=raw_orientation[0: input_length, :])
hdf5_file.flush()
hdf5_file.close()
| 35.95122 | 161 | 0.685889 |
f0791e37af8f0e6bb45c78c7fc37667ac15c9e8a | 627 | py | Python | test/scripts/functions.py | JetBrains-Research/jpt-nb-corpus | d93ac84ff885b30ef736cd82f5ce8b09c28ef3d1 | [
"MIT"
] | 3 | 2022-03-25T10:17:22.000Z | 2022-03-27T14:13:03.000Z | test/scripts/functions.py | JetBrains-Research/Matroskin | 053ed3d7e9dffb0aee4012bc49a194e0c60217c7 | [
"MIT"
] | null | null | null | test/scripts/functions.py | JetBrains-Research/Matroskin | 053ed3d7e9dffb0aee4012bc49a194e0c60217c7 | [
"MIT"
] | 1 | 2021-07-06T16:22:11.000Z | 2021-07-06T16:22:11.000Z | # Explicit API functions
from api_functions import api_function1, api_function2
from package3 import api_function3
# API Packages
import package1, package2
import package3
from package4 import api_class1
# Defined functions
| 24.115385 | 60 | 0.77193 |
f07a7df9283116337443c3a5f4f80b400ad900a1 | 4,848 | py | Python | tests/data/test_make_dataset.py | dnsosa/drug-lit-contradictory-claims | c03faa7269050344b631b12302214a3175384e98 | [
"MIT"
] | null | null | null | tests/data/test_make_dataset.py | dnsosa/drug-lit-contradictory-claims | c03faa7269050344b631b12302214a3175384e98 | [
"MIT"
] | null | null | null | tests/data/test_make_dataset.py | dnsosa/drug-lit-contradictory-claims | c03faa7269050344b631b12302214a3175384e98 | [
"MIT"
] | null | null | null | """Tests for making datasets for contradictory-claims."""
# -*- coding: utf-8 -*-
import os
import unittest
from contradictory_claims.data.make_dataset import load_drug_virus_lexicons, load_mancon_corpus_from_sent_pairs, \
load_med_nli, load_multi_nli
from .constants import drug_lex_path, mancon_sent_pairs, mednli_dev_path, mednli_test_path, mednli_train_path, \
multinli_test_path, multinli_train_path, sample_drug_lex_path, sample_mancon_sent_pairs, \
sample_multinli_test_path, sample_multinli_train_path, sample_virus_lex_path, virus_lex_path
def test_load_mancon_corpus_from_sent_pairs_sample(self):
"""Test that ManConCorpus is loaded as expected."""
x_train, y_train, x_test, y_test = load_mancon_corpus_from_sent_pairs(sample_mancon_sent_pairs)
self.assertEqual(len(x_train), 39)
self.assertEqual(y_train.shape, (39, 3))
self.assertEqual(len(x_test), 10)
self.assertEqual(y_test.shape, (10, 3))
def test_load_drug_virus_lexicons(self):
"""Test that the virus and drug lexicons are loaded properly."""
drug_names, virus_names = load_drug_virus_lexicons(sample_drug_lex_path, sample_virus_lex_path)
drugs = ["hydroxychloroquine", "remdesivir", "ritonavir", "chloroquine", "lopinavir"]
virus_syns = ["COVID-19", "SARS-CoV-2", "Coronavirus Disease 2019"]
self.assertTrue(set(drugs).issubset(set(drug_names)))
self.assertTrue(set(virus_syns).issubset(set(virus_names)))
| 49.469388 | 114 | 0.697401 |
f07b043e271471be2b35cb22503d63d12af2440e | 3,111 | py | Python | detection.py | aar0npham/FuryColorDetection | 5a8ae6a5f9dc8ccf42c78d4f29038e4e6889a858 | [
"Apache-2.0"
] | null | null | null | detection.py | aar0npham/FuryColorDetection | 5a8ae6a5f9dc8ccf42c78d4f29038e4e6889a858 | [
"Apache-2.0"
] | null | null | null | detection.py | aar0npham/FuryColorDetection | 5a8ae6a5f9dc8ccf42c78d4f29038e4e6889a858 | [
"Apache-2.0"
] | null | null | null | # import modules
import numpy as np
import argparse
import cv2
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-b", "--buffer", type=int, default=64,
help="max buffer size")
args = vars(ap.parse_args())
# define the lower and upper boundaries of the colors in the HSV color space
lower = {'red': (166, 84, 141), 'green': (66, 122, 129), 'blue': (97, 100, 117), 'yellow': (23, 59, 119), 'orange': (0, 50, 80)} # assign new item lower['blue'] = (93, 10, 0)
upper = {'red': (186, 255, 255), 'green': (86, 255, 255), 'blue': (117, 255, 255), 'yellow': (54, 255, 255), 'orange': (20, 255, 255)}
# define standard colors for circle around the object
colors = {'red': (0, 0, 255), 'green': (0, 255, 0), 'blue': (255, 0, 0), 'yellow': (0, 255, 217), 'orange': (0, 140, 255)}
camera = cv2.VideoCapture(0 + cv2.CAP_DSHOW)
# keep looping
while True:
# grab the current frame
(grabbed, frame) = camera.read()
# resize the frame, blur it, and convert it to the HSV
# color space
frame = cv2.resize(frame, (640, 480))
blurred = cv2.GaussianBlur(frame, (11, 11), 0)
hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)
# for each color in dictionary check object in frame
for key, value in upper.items():
# construct a mask for the color from dictionary`1, then perform
# a series of dilations and erosions to remove any small
# blobs left in the mask
kernel = np.ones((9, 9), np.uint8)
mask = cv2.inRange(hsv, lower[key], upper[key])
mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)
# find contours in the mask and initialize the current
# (x, y) center of the ball
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)[-2]
center = None
# only proceed if at least one contour was found
if len(cnts) > 0:
# find the largest contour in the mask, then use
# it to compute the minimum enclosing circle and
# centroid
c = max(cnts, key=cv2.contourArea)
((x, y), radius) = cv2.minEnclosingCircle(c)
M = cv2.moments(c)
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
# only proceed if the radius meets a minimum size. Correct this value for your obect's size
if radius > 0.5:
# draw the circle and centroid on the frame,
# then update the list of tracked points
cv2.circle(frame, (int(x), int(y)), int(radius), colors[key], 2)
cv2.putText(frame, key, (int(x - radius), int(y - radius)), cv2.FONT_HERSHEY_SIMPLEX, 0.6, colors[key], 2)
# show the frame to our screen
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# if the 'q' key is pressed, stop the loop
if key == ord("q"):
break
# cleanup the camera and close any open windows
camera.release()
cv2.destroyAllWindows()
| 40.402597 | 175 | 0.603664 |
f07bcc1be66ad63b427b651f681533f05db82f52 | 430 | py | Python | topics/migrations/0003_topic_word.py | acdh-oeaw/mmp | 7ef8f33eafd3a7985328d374130f1cbe31f77df0 | [
"MIT"
] | 2 | 2021-06-02T11:27:54.000Z | 2021-08-25T10:29:04.000Z | topics/migrations/0003_topic_word.py | acdh-oeaw/mmp | 7ef8f33eafd3a7985328d374130f1cbe31f77df0 | [
"MIT"
] | 86 | 2021-01-29T12:31:34.000Z | 2022-03-28T11:41:04.000Z | topics/migrations/0003_topic_word.py | acdh-oeaw/mmp | 7ef8f33eafd3a7985328d374130f1cbe31f77df0 | [
"MIT"
] | null | null | null | # Generated by Django 3.2 on 2021-10-21 19:26
from django.db import migrations, models
| 21.5 | 63 | 0.597674 |
f07d5c996cff48d3e4ce4edaac97743f3de1a7ce | 171 | py | Python | src/ihtt/__init__.py | dekoza/i-hate-time-tracking | adb6018b56c836317535f2e2346dfb8d9cce3aac | [
"Apache-2.0"
] | null | null | null | src/ihtt/__init__.py | dekoza/i-hate-time-tracking | adb6018b56c836317535f2e2346dfb8d9cce3aac | [
"Apache-2.0"
] | null | null | null | src/ihtt/__init__.py | dekoza/i-hate-time-tracking | adb6018b56c836317535f2e2346dfb8d9cce3aac | [
"Apache-2.0"
] | null | null | null | """
I Hate Time Tracking package.
Get time tracking out of your way.
"""
from typing import List
__all__: List[str] = [] # noqa: WPS410 (the only __variable__ we use)
| 17.1 | 70 | 0.695906 |
f07e0ced31d9f3b5a75c59dd3ef793ba14212ab0 | 2,831 | py | Python | tests/base.py | octue/octue-sdk-python | 31c6e9358d3401ca708f5b3da702bfe3be3e52ce | [
"MIT"
] | 5 | 2020-10-01T12:43:10.000Z | 2022-03-14T17:26:25.000Z | tests/base.py | octue/octue-sdk-python | 31c6e9358d3401ca708f5b3da702bfe3be3e52ce | [
"MIT"
] | 322 | 2020-06-24T15:55:22.000Z | 2022-03-30T11:49:28.000Z | tests/base.py | octue/octue-sdk-python | 31c6e9358d3401ca708f5b3da702bfe3be3e52ce | [
"MIT"
] | null | null | null | import os
import subprocess
import unittest
import uuid
import warnings
from tempfile import TemporaryDirectory, gettempdir
from octue.cloud.emulators import GoogleCloudStorageEmulatorTestResultModifier
from octue.mixins import MixinBase, Pathable
from octue.resources import Datafile, Dataset, Manifest
from tests import TEST_BUCKET_NAME
| 42.253731 | 118 | 0.716001 |
f07eec804d533d3b03eb1442655922fd39f8fdb2 | 6,457 | py | Python | sstcam_sandbox/d190717_alpha/plot_wobble_animation_goldfish.py | watsonjj/CHECLabPySB | 91330d3a6f510a392f635bd7f4abd2f77871322c | [
"BSD-3-Clause"
] | null | null | null | sstcam_sandbox/d190717_alpha/plot_wobble_animation_goldfish.py | watsonjj/CHECLabPySB | 91330d3a6f510a392f635bd7f4abd2f77871322c | [
"BSD-3-Clause"
] | null | null | null | sstcam_sandbox/d190717_alpha/plot_wobble_animation_goldfish.py | watsonjj/CHECLabPySB | 91330d3a6f510a392f635bd7f4abd2f77871322c | [
"BSD-3-Clause"
] | 1 | 2021-03-30T09:46:56.000Z | 2021-03-30T09:46:56.000Z | from CHECLabPy.plotting.setup import Plotter
from CHECLabPy.plotting.camera import CameraImage
from CHECLabPy.utils.files import create_directory
from CHECLabPy.utils.mapping import get_ctapipe_camera_geometry
from sstcam_sandbox import get_plot, get_data
from os.path import join
from matplotlib import pyplot as plt
from tqdm import tqdm
import numpy as np
import pandas as pd
import warnings
from CHECOnsky.calib import obtain_cleaning_mask
from CHECLabPy.calib import TimeCalibrator
from mpl_toolkits.axes_grid1 import make_axes_locatable
from IPython import embed
if __name__ == '__main__':
main()
| 34.715054 | 81 | 0.61654 |
f07f197c27b7ad864308c5332ee3a30042155d95 | 15,797 | py | Python | tempest/cmd/run.py | Juniper/tempest | f8316c9c28e029063c036e1cf83947af068e7703 | [
"Apache-2.0"
] | null | null | null | tempest/cmd/run.py | Juniper/tempest | f8316c9c28e029063c036e1cf83947af068e7703 | [
"Apache-2.0"
] | null | null | null | tempest/cmd/run.py | Juniper/tempest | f8316c9c28e029063c036e1cf83947af068e7703 | [
"Apache-2.0"
] | 5 | 2016-06-24T20:03:52.000Z | 2020-02-05T10:14:54.000Z | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Runs tempest tests
This command is used for running the tempest tests
Test Selection
==============
Tempest run has several options:
* **--regex/-r**: This is a selection regex like what testr uses. It will run
any tests that match on re.match() with the regex
* **--smoke/-s**: Run all the tests tagged as smoke
There are also the **--blacklist-file** and **--whitelist-file** options that
let you pass a filepath to tempest run with the file format being a line
separated regex, with '#' used to signify the start of a comment on a line.
For example::
# Regex file
^regex1 # Match these tests
.*regex2 # Match those tests
The blacklist file will be used to construct a negative lookahead regex and
the whitelist file will simply OR all the regexes in the file. The whitelist
and blacklist file options are mutually exclusive so you can't use them
together. However, you can combine either with a normal regex or the *--smoke*
flag. When used with a blacklist file the generated regex will be combined to
something like::
^((?!black_regex1|black_regex2).)*$cli_regex1
When combined with a whitelist file all the regexes from the file and the CLI
regexes will be ORed.
You can also use the **--list-tests** option in conjunction with selection
arguments to list which tests will be run.
You can also use the **--load-list** option that lets you pass a filepath to
tempest run with the file format being in a non-regex format, similar to the
tests generated by the **--list-tests** option. You can specify target tests
by removing unnecessary tests from a list file which is generated from
**--list-tests** option.
Test Execution
==============
There are several options to control how the tests are executed. By default
tempest will run in parallel with a worker for each CPU present on the machine.
If you want to adjust the number of workers use the **--concurrency** option
and if you want to run tests serially use **--serial/-t**
Running with Workspaces
-----------------------
Tempest run enables you to run your tempest tests from any setup tempest
workspace it relies on you having setup a tempest workspace with either the
``tempest init`` or ``tempest workspace`` commands. Then using the
``--workspace`` CLI option you can specify which one of your workspaces you
want to run tempest from. Using this option you don't have to run Tempest
directly with you current working directory being the workspace, Tempest will
take care of managing everything to be executed from there.
Running from Anywhere
---------------------
Tempest run provides you with an option to execute tempest from anywhere on
your system. You are required to provide a config file in this case with the
``--config-file`` option. When run tempest will create a .testrepository
directory and a .testr.conf file in your current working directory. This way
you can use testr commands directly to inspect the state of the previous run.
Test Output
===========
By default tempest run's output to STDOUT will be generated using the
subunit-trace output filter. But, if you would prefer a subunit v2 stream be
output to STDOUT use the **--subunit** flag
Combining Runs
==============
There are certain situations in which you want to split a single run of tempest
across 2 executions of tempest run. (for example to run part of the tests
serially and others in parallel) To accomplish this but still treat the results
as a single run you can leverage the **--combine** option which will append
the current run's results with the previous runs.
"""
import io
import os
import sys
import tempfile
import threading
from cliff import command
from os_testr import regex_builder
from os_testr import subunit_trace
from oslo_serialization import jsonutils as json
import six
from testrepository.commands import run_argv
from tempest import clients
from tempest.cmd import cleanup_service
from tempest.cmd import init
from tempest.cmd import workspace
from tempest.common import credentials_factory as credentials
from tempest import config
CONF = config.CONF
SAVED_STATE_JSON = "saved_state.json"
| 43.043597 | 79 | 0.599734 |
f07f1c21b8f06d89cde1866e0e0a9e2404549ae4 | 10,586 | py | Python | src/python/vrprim/photosphere/conv.py | cmbruns/vr_samples | 8dee056766bccca1a602c6dd58fd0a641c5033a5 | [
"MIT"
] | 1 | 2017-01-29T21:15:23.000Z | 2017-01-29T21:15:23.000Z | src/python/vrprim/photosphere/conv.py | cmbruns/vr_samples | 8dee056766bccca1a602c6dd58fd0a641c5033a5 | [
"MIT"
] | 2 | 2017-01-29T20:34:39.000Z | 2017-01-29T23:26:05.000Z | src/python/vrprim/photosphere/conv.py | cmbruns/vr_samples | 8dee056766bccca1a602c6dd58fd0a641c5033a5 | [
"MIT"
] | null | null | null | """
Convert spherical panorama in equirectangular format into cubemap format
"""
from math import pi, log2
import numpy
from libtiff import TIFF
import png
import glfw
from OpenGL import GL
from OpenGL.GL import shaders
from OpenGL.GL.EXT.texture_filter_anisotropic import GL_MAX_TEXTURE_MAX_ANISOTROPY_EXT, GL_TEXTURE_MAX_ANISOTROPY_EXT
from PIL import Image
if __name__ == "__main__":
if True:
tif = TIFF.open('1w180.9.tiff', 'r')
arr = tif.read_image()
tif.close()
else:
jpeg = Image.open('_0010782_stitch2.jpg')
arr = numpy.array(jpeg)
cube = main(arr)
if cube.dtype == numpy.uint16:
img = png.from_array(cube, 'RGBA')
img.save('cube.png')
else:
Image.fromarray(cube).save('cube.jpg', quality=95)
| 38.919118 | 129 | 0.53344 |
f08133a0ab8681553c9936415f848d5882f36db1 | 1,150 | py | Python | src/controllers/storage.py | koddas/python-oop-consistency-lab | 8ee3124aa230359d296fdfbe0c23773602769c8c | [
"MIT"
] | null | null | null | src/controllers/storage.py | koddas/python-oop-consistency-lab | 8ee3124aa230359d296fdfbe0c23773602769c8c | [
"MIT"
] | null | null | null | src/controllers/storage.py | koddas/python-oop-consistency-lab | 8ee3124aa230359d296fdfbe0c23773602769c8c | [
"MIT"
] | null | null | null | from entities.serializable import Serializable | 28.75 | 78 | 0.578261 |
f081d74683e4da50d27ee2a254cfa3157f59305b | 924 | py | Python | tests/functional/modules/test_zos_tso_command.py | IBM/zos-core-collection-ftp | 017d2e031d64984571bd9bb330f49adaced387a6 | [
"Apache-2.0"
] | 4 | 2021-03-17T02:24:02.000Z | 2022-01-28T22:08:17.000Z | tests/functional/modules/test_zos_tso_command.py | IBM/zos-core-collection-ftp | 017d2e031d64984571bd9bb330f49adaced387a6 | [
"Apache-2.0"
] | null | null | null | tests/functional/modules/test_zos_tso_command.py | IBM/zos-core-collection-ftp | 017d2e031d64984571bd9bb330f49adaced387a6 | [
"Apache-2.0"
] | null | null | null | from __future__ import absolute_import, division, print_function
__metaclass__ = type
import os
import sys
import warnings
import ansible.constants
import ansible.errors
import ansible.utils
import pytest
from pprint import pprint
# The positive path test
| 30.8 | 68 | 0.737013 |
f0827a9bc1fab116569d8485fa3cf7975cc20e07 | 1,859 | py | Python | Medium/78.py | Hellofafar/Leetcode | 7a459e9742958e63be8886874904e5ab2489411a | [
"CNRI-Python"
] | 6 | 2017-09-25T18:05:50.000Z | 2019-03-27T00:23:15.000Z | Medium/78.py | Hellofafar/Leetcode | 7a459e9742958e63be8886874904e5ab2489411a | [
"CNRI-Python"
] | 1 | 2017-10-29T12:04:41.000Z | 2018-08-16T18:00:37.000Z | Medium/78.py | Hellofafar/Leetcode | 7a459e9742958e63be8886874904e5ab2489411a | [
"CNRI-Python"
] | null | null | null | # ------------------------------
# 78. Subsets
#
# Description:
# Given a set of distinct integers, nums, return all possible subsets (the power set).
# Note: The solution set must not contain duplicate subsets.
#
# For example,
# If nums = [1,2,3], a solution is:
# [
# [3],
# [1],
# [2],
# [1,2,3],
# [1,3],
# [2,3],
# [1,2],
# []
# ]
#
# Version: 1.0
# 01/20/18 by Jianfa
# ------------------------------
# Used for testing
if __name__ == "__main__":
test = Solution()
nums = [1,3,5]
test.subsets(nums)
# ------------------------------
# Summary:
# Borrow the combine idea from 77.py. The major difference is here a number list is provided.
# The number list may include discontinuous integers. So the parameter "start" here means index
# rather than number itself. | 24.142857 | 96 | 0.483593 |
f0827ff350329e8456da34903e3aafb85e4c8ff7 | 10,707 | py | Python | blueprints/finance/views.py | shuxiang/MT-WMS | 38ef18baed6d9eddb88d43da2eeed55988410daf | [
"Apache-2.0"
] | 1 | 2022-03-11T05:42:25.000Z | 2022-03-11T05:42:25.000Z | blueprints/finance/views.py | shuxiang/MT-WMS | 38ef18baed6d9eddb88d43da2eeed55988410daf | [
"Apache-2.0"
] | null | null | null | blueprints/finance/views.py | shuxiang/MT-WMS | 38ef18baed6d9eddb88d43da2eeed55988410daf | [
"Apache-2.0"
] | null | null | null | #coding=utf8
import json
from sqlalchemy import func, or_
from pprint import pprint
from datetime import datetime, timedelta
from random import randint
from flask import Blueprint, g, request, jsonify
from flask_login import LoginManager, login_user, logout_user, login_required, current_user
from extensions.database import db
from extensions.permissions import admin_perm, manager_perm, normal_perm
from models.inv import Inv, Good, Category, InvRfid
from models.inv import InvMove, InvAdjust, InvCount
from models.stockin import Stockin
from models.stockout import Stockout
from models.finance import Money, MoneySummary, MoneyAccount
from models.auth import Partner
from utils.flask_tools import json_response, gen_csv
from utils.functions import gen_query, clear_empty, json2mdict, json2mdict_pop
from utils.functions import update_model_with_fields, m2dict, copy_and_update_model, common_poplist
from utils.functions import gen_query
from utils.base import Dict, DictNone
from blueprints.finance.action import FinanceAction
import settings
bp_finance = Blueprint("finance", __name__)
# -
# /
# /
# -
# -12
# - | 39.21978 | 161 | 0.631082 |
f082e2a2d09bf1830b2b1fcb472bd7a239f75622 | 716 | py | Python | layers/modules/fast_mask_iou.py | sebastian-ruiz/yolact | 0fdce34ddd38d8895445444f04c5a9d4e0384a3e | [
"MIT"
] | 1 | 2021-11-08T14:06:49.000Z | 2021-11-08T14:06:49.000Z | layers/modules/fast_mask_iou.py | sebastian-ruiz/yolact | 0fdce34ddd38d8895445444f04c5a9d4e0384a3e | [
"MIT"
] | null | null | null | layers/modules/fast_mask_iou.py | sebastian-ruiz/yolact | 0fdce34ddd38d8895445444f04c5a9d4e0384a3e | [
"MIT"
] | null | null | null | import torch
from torch import nn
import torch.nn.functional as F
#locals
from data.config import Config
from utils.functions import make_net
from utils.script_module_wrapper import ScriptModuleWrapper, script_method_wrapper
| 28.64 | 108 | 0.706704 |
b2b1db3b982c41901d0ae5c563cb502c2d0bce3e | 3,366 | py | Python | audio_pouring/utils/network.py | lianghongzhuo/MultimodalPouring | 6495c7de9afad396f39bd7ac25e1a150e74479d2 | [
"MIT"
] | 5 | 2020-03-12T16:36:32.000Z | 2021-01-28T18:23:19.000Z | audio_pouring/utils/network.py | lianghongzhuo/MultimodalPouring | 6495c7de9afad396f39bd7ac25e1a150e74479d2 | [
"MIT"
] | null | null | null | audio_pouring/utils/network.py | lianghongzhuo/MultimodalPouring | 6495c7de9afad396f39bd7ac25e1a150e74479d2 | [
"MIT"
] | 1 | 2020-03-11T17:09:28.000Z | 2020-03-11T17:09:28.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author : Hongzhuo Liang
# E-mail : liang@informatik.uni-hamburg.de
# Description:
# Date : 15/10/2019: 22:13
# File Name : network
import argparse
import numpy as np
import torch
| 49.5 | 120 | 0.659834 |
b2b1ff5ef4ba336018262956f57a372c5c93879b | 4,312 | py | Python | FederatedSDNSecurity/main.py | Beaconproj/CrossCloudVNFSimulation | 97023e05b57e54503259ae866608de6189b8c9a9 | [
"MIT"
] | 1 | 2021-09-25T04:17:55.000Z | 2021-09-25T04:17:55.000Z | FederatedSDNSecurity/main.py | Beaconproj/CrossCloudVNFSimulation | 97023e05b57e54503259ae866608de6189b8c9a9 | [
"MIT"
] | null | null | null | FederatedSDNSecurity/main.py | Beaconproj/CrossCloudVNFSimulation | 97023e05b57e54503259ae866608de6189b8c9a9 | [
"MIT"
] | null | null | null |
'''
Created on 12 janv. 2016
@author: phm
'''
import FederatedSDN
import FederatedSDNSecurity
import FederatedSecurityAgent
import VNFManager
import CloudManager
import ssl, socket
inputMessage="Press enter to continue"
federationCloudManagers=[]
cloudFederationMembers=[
["cloud_man_1","vnf_manager_1", "network_segment_1", [["" ]],[""]],
["cloud_man_2","vnf_manager_2", "network_segment_2", [[""]],[""]],
["cloud_man_3","vnf_manager_3", "network_segment_3", [[""]],[""]]
]
print "-----------Initial setup of Cloud_1, cloud_2 and cloud_3 -----------"
cloudMember=""
for cloudMember in cloudFederationMembers:
# create a cloud manager
cloud_manager=CloudManager.CloudManager(cloudMember[0])
print "Cloud manager", cloud_manager.getName(), "in federation"
federationCloudManagers.append(cloud_manager)
# set the network segments
cloud_manager.setNetworkSegments(cloudMember[2])
# create a VNF manager
vnfManager=VNFManager.VNFManager(cloudMember[1])
cloud_manager.setVNFManager(vnfManager)
print "------------ start Federated SDN ---------------"
# create a federated SDN
fedSDN=FederatedSDN.FederatedSDN("fedSDN_1")
print "FederatedSDN", fedSDN.getIdentifier(), "created"
# create a Federated SDN security
fedSDNSecurity=FederatedSDNSecurity.FederatedSDNSecurity("fedSDNSec_1")
print "FederatedSDNSecurity", fedSDNSecurity.getName(), "created"
print "------------- Create a Federated Cloud Network --------------------"
# get the network segments to be federated
network_segments=["network_segment_1","network_segment_2"]
#cloud_member=""
#for cloud_member in cloudFederationMembers:
# network_segments.append(cloud_member[2])
#print "network segments:", network_segments
fedSDN.createNetworkFederation("FedCloudNetwork_1", network_segments)
print "Federated network", fedSDN.getNetworkFederationSegments("FedCloudNetwork_1"), "created"
# Associate a FederatedSecurityAgent with each network segment"
cloudManager=""
for cloudManager in federationCloudManagers:
network_segment=cloudManager.getNetworkSegments()
fedSecAg=FederatedSecurityAgent.FederatedSecurityAgent("fedSecAg_"+network_segment[0])
#print "SecAgent", fedSecAg.getName(), "created"
fedSecAg.setVNFManager(cloudManager.getVNFManager())
fedSecAg.setNetworkSegment(cloudManager.getNetworkSegments())
fedSDNSecurity.addSecurityAgent(fedSecAg)
#print "----------- Analyse existing security VNF of federation network segments ----------"
#print "------------- Adapt VNF to respect global security policy: start new VNF and re-configure existing VNF --------"
print "------------- Deploy, configure and start VNF to respect global security policy --------"
wait = raw_input(inputMessage)
fedSDNSecurity.readYAMLfile("YAML1.txt")
#fedSDNSecurity.readYAMLfileV2("Cloud1-2-Heat.yaml")
print "-------- Verify that global security policy is correctly implemented in each federation cloud network ----------"
wait = raw_input(inputMessage)
fedSDNSecurity.verifySecurityPolicy(fedSDN)
print "------------- Run the network federation --------------"
wait = raw_input(inputMessage)
print "VM_1: send packet to VM_2 with protocol HTTP"
print "VM_2: received packet from VM_1"
print " "
print "VM_1: send packet to VM_2 with protocol SKYPE"
print "DPI_1: unauthorized protocol detected: SKYPE"
print "FW_1: reconfiguring firewall on network network_segment_1 to block SKYPE protocol"
print "------------- now add a new network_segment_3 to the federation and extend the security policy--------------"
wait = raw_input(inputMessage)
# add network segment to federation
fedSDN.addNetworkSegment("network_segment_3")
print "Federated network", fedSDN.getNetworkFederationSegments("FedCloudNetwork_1"), "extended"
fedSDNSecurity.readYAMLfile("YAML2.txt")
print "-------- Verify that global security policy is implemented VNF per network Segment ----------"
wait = raw_input(inputMessage)
fedSDNSecurity.verifySecurityPolicy(fedSDN)
print "------------- Run the network federation --------------"
wait = raw_input(inputMessage)
print "VM_1: send packet to VM_3 with protocol X "
print "ENCRYPT_1: VM_3 is in untrusted cloud: encrypt packet"
print "DECRYPT_3: packet for VM_3 from VM_1 is encrypted: decrypt packet using key XXX "
| 30.8 | 120 | 0.73539 |
b2b2407d4c36f7d2b4d5556ee9ab15297445f03f | 5,121 | py | Python | WCET_stats.py | FTOD/ZExp | f7e2e1ab3ce1964022cb1c5d8c9d0b1ce1ee7b56 | [
"MIT"
] | null | null | null | WCET_stats.py | FTOD/ZExp | f7e2e1ab3ce1964022cb1c5d8c9d0b1ce1ee7b56 | [
"MIT"
] | null | null | null | WCET_stats.py | FTOD/ZExp | f7e2e1ab3ce1964022cb1c5d8c9d0b1ce1ee7b56 | [
"MIT"
] | null | null | null | import parsetools
from benchDesc import benchsDesc
import matplotlib.pyplot as plt
import matplotlib
import getopt, sys
try:
opts, args = getopt.getopt(sys.argv[1:], "h", ["arch="])
except getopt.GetoptError as err:
print(err)
sys.exit(2)
file_postfix = ""
for o,a in opts:
if o == "--arch":
if a == "simple":
file_postfix = file_postfix + "_simple"
elif a == "complex":
file_postfix = file_postfix + "_complex"
else:
print ("ERROR, the architecture must be either simple or complex")
p = parsetools.BoundedEventsCountParser()
res = p.parse_all_files("../log_2020_09/log")
res = benchsDesc.regrouping_parallel_res(res)
bounded_count = res
print("BOUNDED=", bounded_count)
p = parsetools.UnboundedEventsCountParser()
res = p.parse_all_files("../log_2020_09/log")
res = benchsDesc.regrouping_parallel_res(res)
unbounded_count = res
print("UNBOUNDED=", unbounded_count)
p = parsetools.WcetResParser()
res = p.parse_all_files("../log_2020_09/log_xddilp_15"+file_postfix)
res = benchsDesc.regrouping_parallel_res(res)
wcet_xdd = res
#add a single result
print(res)
print(len(res))
p = parsetools.WcetResParser()
res = p.parse_all_files("../log_2020_09/log_hlts_15"+file_postfix)
res = benchsDesc.regrouping_parallel_res(res)
wcet_hlts = res
print(res)
print(len(res))
p = parsetools.WcetResParser()
res = p.parse_all_files("../log_2020_09/log_WCETmax_15"+file_postfix)
res = benchsDesc.regrouping_parallel_res(res)
wcet_max = res
print(res)
print(len(res))
p = parsetools.WcetResParser()
res = p.parse_all_files("../log_2020_09/log_exhaustive_15"+file_postfix)
res = benchsDesc.regrouping_parallel_res(res)
wcet_exhau = res
print(res)
print(len(res))
x = list(range(1,len(res)+1))
print(x)
print("=======================================================")
BIGGER_SIZE = 11
BIGGER_BIGGER_SIZE=15
matplotlib.rc('font', size=BIGGER_SIZE) # controls default text sizes
matplotlib.rc('axes', titlesize=BIGGER_SIZE) # fontsize of the axes title
matplotlib.rc('axes', labelsize=BIGGER_SIZE) # fontsize of the x and y labels
matplotlib.rc('xtick', labelsize=BIGGER_SIZE) # fontsize of the tick labels
matplotlib.rc('ytick', labelsize=BIGGER_SIZE) # fontsize of the tick labels
matplotlib.rc('legend', fontsize=BIGGER_BIGGER_SIZE) # legend fontsize
matplotlib.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
fig = plt.figure()
#unbound_ratio = [ float(x[1]) / float(x[1]+y[1]) for x,y in zip(unbounded_count,bounded_count)]
unbound_ratio = [( x[0], float(x[1]) / float(x[1]+y[1]) ) for x,y in zip(unbounded_count,bounded_count)]
unbound_ratio.sort(key = lambda i:i[1])
print("***************************")
print(unbound_ratio)
print("***************************")
label_order = [x[0] for x in unbound_ratio]
print(label_order)
unbound_ratio = [x[1] for x in unbound_ratio]
wcet_xdd.sort(key = lambda i: label_order.index(i[0]))
wcet_hlts.sort(key = lambda i: label_order.index(i[0]))
wcet_max.sort(key = lambda i: label_order.index(i[0]))
wcet_exhau.sort(key = lambda i: label_order.index(i[0]))
wcet_xdd = [x[1] for x in wcet_xdd]
wcet_hlts = [x[1] for x in wcet_hlts]
wcet_max = [x[1] for x in wcet_max]
wcet_exhau = [x[1] for x in wcet_exhau]
wcet_xdd = [(y-x)/y for x,y in zip(wcet_xdd,wcet_max)]
wcet_hlts = [(y-x)/y for x,y in zip(wcet_hlts,wcet_max)]
## Rounding, due to imprecision of Etime
wcet_hlts = [ 0.0 if x < 0.0 else x for x in wcet_hlts ]
wcet_exhau = [(y-x)/y for x,y in zip(wcet_exhau,wcet_max)]
print("=======================================================")
print(wcet_xdd)
print(len(res))
print("=======================================================")
print(wcet_exhau)
print(len(res))
print("=======================================================")
print(wcet_hlts)
print(len(res))
ax = fig.add_subplot(111)
width = 0.2
ax.bar([y-width for y in x],wcet_xdd,label='xdd',width=width, color ="1.0" , edgecolor='black')
ax.bar([y for y in x],wcet_exhau,label='exhaustive',width=width, color = "0.7", edgecolor='black')
ax.bar([y+width for y in x],wcet_hlts,label='Etime',width=width, color = "0",edgecolor='black')
#ax.bar([y+0.2 for y in x],wcet_max,label='MAX',width=0.5,color='darkgray')
ax.set_ylabel('WCET / WCET of max partitioning',fontsize=12)
#ax.set_xlabel('benchmark',fontsize=12)
ax.set_xticks(x)
ax.set_xticklabels(label_order,rotation=80)
ax.legend(loc='upper left')
#plt.yscale('log')
plt.ylim(top=0.6)
unbound_ratio = [x for x in unbound_ratio]
ax1 = ax.twinx()
ax1.set_ylabel("percentage on unbounded events")
ax1.plot(x,unbound_ratio,'o-',color='black')
plt.subplots_adjust(bottom=0.17,top=0.70,right=0.965,left=0.042)
plt.yticks(fontsize=15)
"""
plt.tick_params(
axis='x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom=True, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
labelbottom=False
) # labels along the bottom edge are off
"""
plt.show()
#ax = df.plot.scatter(x='evt',)
| 31.036364 | 104 | 0.670767 |
b2b2c142b45b87b8147bfd47d58eb146d6e75472 | 610 | py | Python | schoolport/app_core/migrations/0024_auto_20210513_1020.py | yotink522/schoolport | c6cfd0230ca05fb44f77c2f27c7e200828547bd5 | [
"MIT"
] | null | null | null | schoolport/app_core/migrations/0024_auto_20210513_1020.py | yotink522/schoolport | c6cfd0230ca05fb44f77c2f27c7e200828547bd5 | [
"MIT"
] | null | null | null | schoolport/app_core/migrations/0024_auto_20210513_1020.py | yotink522/schoolport | c6cfd0230ca05fb44f77c2f27c7e200828547bd5 | [
"MIT"
] | null | null | null | # Generated by Django 3.1.7 on 2021-05-13 02:20
from django.db import migrations, models
| 25.416667 | 75 | 0.603279 |
b2b4b4908763c3a4a83c42ca39a61f42cc6d7104 | 800 | py | Python | galaxy_kickstart/binary_datatypes_to_be_added_in_galaxy.py | pajanne/galaxy-kickstart | a2e8a9d5f4e610f00548deab691d71290aa3a3b7 | [
"MIT"
] | 2 | 2016-08-04T19:16:17.000Z | 2016-08-04T19:45:58.000Z | galaxy_kickstart/binary_datatypes_to_be_added_in_galaxy.py | pajanne/galaxy-kickstart | a2e8a9d5f4e610f00548deab691d71290aa3a3b7 | [
"MIT"
] | null | null | null | galaxy_kickstart/binary_datatypes_to_be_added_in_galaxy.py | pajanne/galaxy-kickstart | a2e8a9d5f4e610f00548deab691d71290aa3a3b7 | [
"MIT"
] | null | null | null |
Binary.register_unsniffable_binary_ext("fq.gz")
| 33.333333 | 107 | 0.62625 |
b2b541552dee04f9e9bcd11e4c109a74ce0c81b7 | 1,697 | py | Python | timesketch/lib/cypher/insertable_string.py | Marwolf/timesketch | 8fbbb3d0a5a50dc0214fc56a9bbec82050908103 | [
"Apache-2.0"
] | null | null | null | timesketch/lib/cypher/insertable_string.py | Marwolf/timesketch | 8fbbb3d0a5a50dc0214fc56a9bbec82050908103 | [
"Apache-2.0"
] | null | null | null | timesketch/lib/cypher/insertable_string.py | Marwolf/timesketch | 8fbbb3d0a5a50dc0214fc56a9bbec82050908103 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing the InsertableString class."""
| 39.465116 | 80 | 0.696523 |
b2b6aed7dde137dbec9d46784b9eb3493640ecc8 | 1,322 | py | Python | networks/rnn.py | uchikun2493/nn_modules | ad3486b842fc543561d39227de5daaa475d3513a | [
"MIT"
] | null | null | null | networks/rnn.py | uchikun2493/nn_modules | ad3486b842fc543561d39227de5daaa475d3513a | [
"MIT"
] | null | null | null | networks/rnn.py | uchikun2493/nn_modules | ad3486b842fc543561d39227de5daaa475d3513a | [
"MIT"
] | null | null | null | # pytorch
import torch
import torch.nn as nn
import torch.nn.functional as F
# **********************************************
# rnn class
# **********************************************
| 24.481481 | 61 | 0.533283 |
b2b6b4e0d84391cba2fed8691df94512a2cc5b7a | 1,141 | py | Python | config/settings/production.py | rimphyd/Django-OPAC | d86f2e28fee7f2ec551aeeb98ec67caefc06a3fb | [
"MIT"
] | 1 | 2020-11-26T05:25:46.000Z | 2020-11-26T05:25:46.000Z | config/settings/production.py | rimphyd/Django-OPAC | d86f2e28fee7f2ec551aeeb98ec67caefc06a3fb | [
"MIT"
] | null | null | null | config/settings/production.py | rimphyd/Django-OPAC | d86f2e28fee7f2ec551aeeb98ec67caefc06a3fb | [
"MIT"
] | null | null | null | import django_heroku
from config.settings.base import *
DEBUG = False
SECRET_KEY = os.environ['SECRET_KEY']
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '%(asctime)s %(levelname)s %(module)s %(message)s'
},
},
'handlers': {
'console': {
'level': 'INFO',
'class': 'logging.StreamHandler',
'formatter': 'verbose',
},
},
'loggers': {
'opac': {
'handlers': ['console'],
'level': 'INFO',
'propagate': False,
},
'django': {
'handlers': ['console'],
'level': 'INFO',
'propagate': False,
},
},
}
SECURE_HSTS_PRELOAD = True
SECURE_HSTS_SECONDS = 31536000
SECURE_HSTS_INCLUDE_SUBDOMAINS = True
SECURE_CONTENT_TYPE_NOSNIFF = True
SECURE_BROWSER_XSS_FILTER = True
SECURE_SSL_REDIRECT = True
SESSION_COOKIE_SECURE = True
CSRF_COOKIE_SECURE = True
X_FRAME_OPTIONS = 'DENY'
django_heroku.settings(locals())
| 20.017544 | 72 | 0.576687 |
b2b7e4ac8602126a7252025c382dc07c1f558b19 | 1,181 | py | Python | fabrikApi/views/assembly/list.py | demokratiefabrik/fabrikApi | a56bb57d59a5e7cbbeeb77889c02d82f2a04c682 | [
"MIT"
] | null | null | null | fabrikApi/views/assembly/list.py | demokratiefabrik/fabrikApi | a56bb57d59a5e7cbbeeb77889c02d82f2a04c682 | [
"MIT"
] | null | null | null | fabrikApi/views/assembly/list.py | demokratiefabrik/fabrikApi | a56bb57d59a5e7cbbeeb77889c02d82f2a04c682 | [
"MIT"
] | null | null | null | """ Assemblies List View. """
import logging
from datetime import datetime
from cornice.service import Service
from fabrikApi.models.assembly import DBAssembly
from fabrikApi.models.mixins import arrow
# from fabrikApi.util.cors import CORS_LOCATION, CORS_MAX_AGE
logger = logging.getLogger(__name__)
# SERVICES
assemblies = Service(cors_origins=('*',),
name='assemblies',
description='List Assemblies.',
path='/assemblies')
| 25.673913 | 88 | 0.702794 |
b2b8a861bf96a35529dc0c381016dc12ddf8518f | 7,217 | py | Python | layers/layers.py | yangzonglin1994/yangzl-deep-text-matching | 2beadd1c2ebf2b169558b9978e0cbc66d1d25fc6 | [
"MIT"
] | 2 | 2018-08-10T20:02:44.000Z | 2018-08-10T20:02:50.000Z | layers/layers.py | yangzonglin1994/yangzl-text-matching | 2beadd1c2ebf2b169558b9978e0cbc66d1d25fc6 | [
"MIT"
] | 1 | 2018-07-30T08:54:35.000Z | 2018-07-30T08:54:35.000Z | layers/layers.py | yangzonglin1994/yangzl-text-matching | 2beadd1c2ebf2b169558b9978e0cbc66d1d25fc6 | [
"MIT"
] | null | null | null | import tensorflow as tf
from keras import backend as K
from keras.engine.topology import Layer
from keras.initializers import Ones, Zeros
from layers import transformer
| 35.033981 | 100 | 0.620479 |