hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a124d29567d8f21e6a4afe3634fc6ada1ceff28c | 4,176 | py | Python | pade/tests/v1/script_2_revisado_local.py | AndreCrescenzo/multi-agents | 1c76595aab2fd37cc2de0d44a3b3daadcfe6f4f3 | [
"MIT"
] | 72 | 2018-10-02T21:02:36.000Z | 2022-02-19T11:24:10.000Z | pade/tests/v1/script_2_revisado_local.py | AndreCrescenzo/multi-agents | 1c76595aab2fd37cc2de0d44a3b3daadcfe6f4f3 | [
"MIT"
] | 47 | 2018-10-24T14:57:02.000Z | 2022-03-16T00:09:31.000Z | pade/tests/v1/script_2_revisado_local.py | AndreCrescenzo/multi-agents | 1c76595aab2fd37cc2de0d44a3b3daadcfe6f4f3 | [
"MIT"
] | 36 | 2018-06-03T11:29:27.000Z | 2022-03-15T23:53:19.000Z | # -*- encoding: utf-8 -*-
from utils import display_message, set_ams, start_loop, config_loop
#config_loop(gui=True)
from agent import Agent
from messages import ACLMessage
from aid import AID
from protocols import FipaContractNetProtocol
from filters import Filter
from pickle import loads, dumps
from time import sleep
#===============================================================================
# What is needed to create an agent with standardized protocols behaviours?
# First, the protocol class needs to be defined
# Second, this protocol class needs to be associated with the agent's
# behaviour
#===============================================================================
if __name__ == '__main__':
booksList_Saraiva = [{'title' : 'The Lord of the Rings', 'author' : 'J. R. R. Tolkien', 'qty' : 10, 'how much is' : 53.50},
{'title' : 'Harry Potter', 'author' : 'J. K. Roling', 'qty' : 10, 'how much is' : 33.70},
{'title' : 'Game of Thrones', 'author' : 'A. M. M. Martin', 'qty' : 10,'how much is' : 23.80}
]
bookslist_Cultura = [{'title' : 'The Lord of the Rings', 'author' : 'J. R. R. Tolkien', 'qty' : 10, 'how much is' : 43.50},
{'title' : 'Harry Potter', 'author' : 'J. K. Roling', 'qty' : 10, 'how much is' : 31.70},
{'title' : 'Game of Thrones', 'author' : 'A. M. M. Martin', 'qty' : 10, 'how much is' : 53.80}
]
bookStoresInfo = [(AID(name='Cultura'), bookslist_Cultura),
(AID(name='Saraiva'), booksList_Saraiva)]
order = {'title' : 'The Lord of the Rings', 'author' : 'J. R. R. Tolkien', 'qty' : 5}
#set_ams('localhost', 8000)
agents = []
#saraiva = BookstoreAgent(AID(name='Saraiva@192.168.0.100:2002'), booksList_Saraiva)
saraiva = BookstoreAgent(AID(name='Saraiva'), booksList_Saraiva)
saraiva.set_ams()
agents.append(saraiva)
#cultura = BookstoreAgent(AID(name='Cultura@192.168.0.100:2003'), bookslist_Cultura)
cultura = BookstoreAgent(AID(name='Cultura'), bookslist_Cultura)
cultura.set_ams()
agents.append(cultura)
start_loop(agents) | 41.76 | 127 | 0.58477 |
a1251c76fb85b2d7c8033d0baea28470e0f14346 | 9,320 | py | Python | beta/dump/pbasis.py | addschile/pymctdh | 20a93ce543526de1919757defceef16f9005f423 | [
"MIT"
] | null | null | null | beta/dump/pbasis.py | addschile/pymctdh | 20a93ce543526de1919757defceef16f9005f423 | [
"MIT"
] | null | null | null | beta/dump/pbasis.py | addschile/pymctdh | 20a93ce543526de1919757defceef16f9005f423 | [
"MIT"
] | null | null | null | from copy import deepcopy
from numba import jit,njit
import numpy as np
import pymctdh.opfactory as opfactory
from pymctdh.cy.sparsemat import CSRmat#,matvec
def matadd(nrows,op1,a,op2,b):
"""
"""
if op1 is None:
opout = deepcopy(op2)
opout.data *= b
else:
data = []
JA = []
IA = [0]
ind1 = 0
ind2 = 0
for i in range(nrows):
op1_col = op1.JA[op1.IA[i]:op1.IA[i+1]]
op2_col = op2.JA[op2.IA[i]:op2.IA[i+1]]
inds = np.union1d(op1_col,op2_col)
IA.append( IA[i]+len(inds) )
for ind in inds:
JA.append( ind )
dat = 0.0
if ind in op1_col:
dat += a*op1.data[ind1]
ind1 +=1
if ind in op2_col:
dat += b*op2.data[ind2]
ind2 +=1
data.append( dat )
data = np.array(data)
IA = np.array(IA, dtype=np.intc)
JA = np.array(JA, dtype=np.intc)
opout = CSRmat(data, IA, JA)
return opout
#@njit(fastmath=True)
def kron(nrows1,IA1,JA1,data1,nrows2,IA2,JA2,data2):
"""
"""
data = []
JA = []
IA = [0]
d_ind1 = 0
for i in range(nrows1):
ncol1 = IA1[i+1]-IA1[i]
for j in range(ncol1):
col_ind1 = JA1[d_ind1]
d_ind2 = 0
for k in range(nrows2):
ncol2 = IA2[i+1]-IA2[i]
IA.append( IA[-1] + ncol2 )
for l in range(ncol2):
data.append( data1[d_ind1]*data2[d_ind2] )
JA.append( JA1[d_ind1]*nrows2 + JA2[d_ind2] )
d_ind2 += 1
d_ind += 1
return CSRmat(np.array(data), np.array(IA, dtype=int), np.array(JA, dtype=int))
if __name__ == "__main__":
# no mode combination
pbf = PBasis(['ho',22,1.0,1.0])
pbf.make_operators(['q','KE','q^2'])
print(pbf.params['basis'])
print(pbf.params['npbf'])
print(pbf.params['mass'])
print(pbf.params['omega'])
opkeys = pbf.ops.keys()
for op in opkeys:
print(op)
print(pbf.ops[op].shape)
print('')
print('')
# mode combination
pbf = PBasis(['ho',[6,6],1.0,1.0,True])
pbf.make_operators(['(q)*(1)','(1)*(q)'])
print(pbf.params['basis'])
print(pbf.params['npbf'])
print(pbf.params['mass'])
print(pbf.params['omega'])
opkeys = pbf.ops.keys()
for op in opkeys:
print(op)
print(pbf.ops[op].shape)
print('')
print('')
# mode combination
pbf = PBasis(['ho',[6,6],[1.0,2.0],[1.0,2.0],True])
pbf.make_operators(['(q)*(1)','(1)*(q)'])
print(pbf.params['basis'])
print(pbf.params['npbf'])
print(pbf.params['mass'])
print(pbf.params['omega'])
opkeys = pbf.ops.keys()
for op in opkeys:
print(op)
print(pbf.ops[op].shape)
print('')
print('')
| 32.932862 | 83 | 0.483584 |
a12707fafb28025c41d88777cd9bef6fd6e1e539 | 13,018 | py | Python | handyrl/envs/kaggle/hungry_geese.py | HantianZheng/HandyRL | 2a109faab4745b936e4176e079da4c98dff592e8 | [
"MIT"
] | 1 | 2021-07-14T07:57:51.000Z | 2021-07-14T07:57:51.000Z | handyrl/envs/kaggle/hungry_geese.py | HantianZheng/HandyRL | 2a109faab4745b936e4176e079da4c98dff592e8 | [
"MIT"
] | null | null | null | handyrl/envs/kaggle/hungry_geese.py | HantianZheng/HandyRL | 2a109faab4745b936e4176e079da4c98dff592e8 | [
"MIT"
] | null | null | null | # Copyright (c) 2020 DeNA Co., Ltd.
# Licensed under The MIT License [see LICENSE for details]
# kaggle_environments licensed under Copyright 2020 Kaggle Inc. and the Apache License, Version 2.0
# (see https://github.com/Kaggle/kaggle-environments/blob/master/LICENSE for details)
# wrapper of Hungry Geese environment from kaggle
import random
import itertools
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import handyrl.envs.kaggle.public_flood_goose as pfg
# You need to install kaggle_environments, requests
from kaggle_environments import make
from kaggle_environments.envs.hungry_geese.hungry_geese import Observation, Configuration, Action, GreedyAgent
from ...environment import BaseEnvironment
'''
class GeeseNet(nn.Module):
def __init__(self):
super().__init__()
layers, filters = 12, 32
self.conv0 = TorusConv2d(53, filters, (3, 3), True) # TBD
self.blocks = nn.ModuleList([TorusConv2d(filters, filters, (3, 3), True) for _ in range(layers)])
self.head_p = nn.Linear(filters, 4, bias=False)
self.head_v = nn.Linear(filters * 2, 1, bias=False)
def forward(self, x, _=None):
h = F.relu_(self.conv0(x))
for block in self.blocks:
h = F.relu_(h + block(h))
h_head = (h * x[:,:1]).view(h.size(0), h.size(1), -1).sum(-1)
h_avg = h.view(h.size(0), h.size(1), -1).mean(-1)
p = self.head_p(h_head)
v = torch.tanh(self.head_v(torch.cat([h_head, h_avg], 1)))
return {'policy': p, 'value': v}
'''
if __name__ == '__main__':
e = Environment()
for _ in range(100):
e.reset()
while not e.terminal():
print(e)
actions = {p: e.legal_actions(p) for p in e.turns()}
print([[e.action2str(a, p) for a in alist] for p, alist in actions.items()])
e.step({p: random.choice(alist) for p, alist in actions.items()})
print(e)
print(e.outcome())
| 38.40118 | 130 | 0.536027 |
a128f73c987352e7a2b67ff853ae7ba81f0f3c24 | 198 | py | Python | py files/normalization.py | kilarinikhil/ComputerVision | 1cb2985f9c5e45bd0763cb676028ea97fce2b27b | [
"Apache-2.0"
] | null | null | null | py files/normalization.py | kilarinikhil/ComputerVision | 1cb2985f9c5e45bd0763cb676028ea97fce2b27b | [
"Apache-2.0"
] | null | null | null | py files/normalization.py | kilarinikhil/ComputerVision | 1cb2985f9c5e45bd0763cb676028ea97fce2b27b | [
"Apache-2.0"
] | 1 | 2020-06-04T18:39:00.000Z | 2020-06-04T18:39:00.000Z | import numpy as np
| 22 | 92 | 0.752525 |
a12a09f22c4f5e88f0c6271dc4b2b3de7f615fa8 | 932 | py | Python | server/migrations/versions/4a916694f1ba_add_initial_image_table.py | brodigan-e/capstone-POV | 8ba8bf49e168a1c27a9a252d0f7af375a4e0bb5b | [
"MIT"
] | 2 | 2020-10-02T20:49:48.000Z | 2020-10-06T01:19:13.000Z | server/migrations/versions/4a916694f1ba_add_initial_image_table.py | brodigan-e/capstone-POV | 8ba8bf49e168a1c27a9a252d0f7af375a4e0bb5b | [
"MIT"
] | 15 | 2020-10-01T05:42:06.000Z | 2020-12-07T22:48:22.000Z | server/migrations/versions/4a916694f1ba_add_initial_image_table.py | brodigan-e/capstone-POV | 8ba8bf49e168a1c27a9a252d0f7af375a4e0bb5b | [
"MIT"
] | 1 | 2020-11-12T20:47:57.000Z | 2020-11-12T20:47:57.000Z | """Add Initial Image Table
Revision ID: 4a916694f1ba
Revises:
Create Date: 2020-10-16 02:24:18.479608
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '4a916694f1ba'
down_revision = None
branch_labels = None
depends_on = None
| 26.628571 | 65 | 0.683476 |
a12aedcd932c89aac78464696ed1d71cb2034b31 | 9,969 | py | Python | skyoffset/multisimplex.py | jonathansick/skyoffset | 369f54d8a237f48cd56f550e80bf1d39b355bfcd | [
"BSD-3-Clause"
] | null | null | null | skyoffset/multisimplex.py | jonathansick/skyoffset | 369f54d8a237f48cd56f550e80bf1d39b355bfcd | [
"BSD-3-Clause"
] | null | null | null | skyoffset/multisimplex.py | jonathansick/skyoffset | 369f54d8a237f48cd56f550e80bf1d39b355bfcd | [
"BSD-3-Clause"
] | null | null | null | import os
import logging
import platform
import time
import multiprocessing
import numpy
import pymongo
# Pure python/numpy
import simplex
from scalarobj import ScalarObjective
# Cython/numpy
import cyscalarobj
import cysimplex
def init_func():
print multiprocessing.current_process().name
def _simplexWorker(argsList):
"""multiprocessing worker function for doing multi-trial simplex solving.
This essentially replaces the multi_start_simplex function in simplex.py
But this exists because it implicitly specifies the target function for the
optimization; multiprocessing can't pickle a function object.
This simplex worker has the ability to restart at the site of convergence
by constructing a simplex that is randomly distributed about the best vertex.
The simplex keeps reconverging from perturbed simplex until the reconverged
minimum matches the previous minimum. That is, I believe I have a global
minimum if the simplex returns to where it started.
"""
startTime = time.clock()
sim, useCython, couplings, kwargs, restartSigma, xTol, n, nTrials, logFilePath, dbArgs = argsList
if useCython:
objf = cyscalarobj.ScalarObjective(couplings)
else:
objf = ScalarObjective(couplings)
# Choose the simplex code
if useCython:
nm_simplex = cysimplex.nm_simplex
else:
nm_simplex = simplex.nm_simplex
#print "Running simplex %i/%i"% (n,nTrials)
Ndim = sim.shape[1]
_evalObjFunc = lambda offsets, objF: objF.compute(offsets)
# These variables keep track of how the code performs
totalFCalls = 0
nRestarts = 0
# Initial simplex compute
_xOpt, _fOpt, _nIters, _nFcalls, _warnflag = nm_simplex(objf,
sim, **kwargs)
bestFOpt = _fOpt
bestXOpt = _xOpt.copy()
totalFCalls += _nFcalls
# These arrays list the running tally of restarts vs best fopt vs total f calls
restartTally = [nRestarts]
bestFOptTally = [bestFOpt]
totalFCallTally = [totalFCalls]
# initiate restarts
while True:
nRestarts += 1
sim = numpy.zeros([Ndim+1, Ndim], dtype=numpy.float64)
sim[0,:] = bestXOpt.copy() # first vertex is the best point
for i in xrange(1,Ndim+1): # rest are randomly distributed.
sim[i,:] = restartSigma*numpy.random.standard_normal(Ndim) + bestXOpt
_xOpt, _fOpt, _nIters, _nFcalls, _warnflag = nm_simplex(objf,
sim, **kwargs)
totalFCalls += _nFcalls
# Ensure that the point has converged
convergenceFrac = (_xOpt - bestXOpt) / bestXOpt
if len(numpy.where(convergenceFrac > xTol)[0]) > 0:
# do another restart of the simplex
if _fOpt < bestFOpt:
# but we did find a new minimum
bestFOpt = _fOpt
bestXOpt = _xOpt.copy()
restartTally.append(nRestarts)
bestFOptTally.append(bestFOpt)
totalFCallTally.append(totalFCalls)
else:
# we're converged
break
# Report this in the log
runtime = time.clock() - startTime
if logFilePath is not None:
logging.basicConfig(filename=logFilePath,level=logging.INFO)
logging.info("%i/%i converged to %.4e in %.2f minutes, %i local restarts" % (n, nTrials, bestFOpt, runtime/60., nRestarts))
# Dictionary stores the history of restarts, as well as teh best solution
# as a field offset dictionary (we're breaking reusability here... just
# to make things faster.)
convergenceHistory = {"total_calls": totalFCalls, "n_restarts": nRestarts,
"runtime": runtime,
"best_offsets": objf.get_best_offsets(),
"best_fopt": bestFOpt,
"restart_hist": restartTally,
"fopt_hist": bestFOptTally,
"fcall_hist": totalFCallTally}
# Connect to MongoDB and add our convergence history!
try:
connection = pymongo.Connection(dbArgs['url'], dbArgs['port'])
db = connection[dbArgs['dbname']]
collection = db[dbArgs['cname']]
collection.insert(convergenceHistory, safe=True)
except pymongo.errors.AutoReconnect:
logging.info("pymongo.errors.AutoReconnect on %i"%n)
# collection.database.connection.disconnect()
| 39.403162 | 131 | 0.634467 |
a12b99b03f4c428fc4fbd3c7f3bfcb53005d0cea | 695 | py | Python | netsuitesdk/api/custom_records.py | cart-com/netsuite-sdk-py | 9c759b631f7a194efb86c06e1935cdc2856200d3 | [
"MIT"
] | null | null | null | netsuitesdk/api/custom_records.py | cart-com/netsuite-sdk-py | 9c759b631f7a194efb86c06e1935cdc2856200d3 | [
"MIT"
] | null | null | null | netsuitesdk/api/custom_records.py | cart-com/netsuite-sdk-py | 9c759b631f7a194efb86c06e1935cdc2856200d3 | [
"MIT"
] | null | null | null | from netsuitesdk.internal.utils import PaginatedSearch
from .base import ApiBase
import logging
logger = logging.getLogger(__name__)
| 33.095238 | 117 | 0.728058 |
a12be00ef3b06e0094c89aa20c5aafe79c822021 | 343 | py | Python | Support/renameCNVNatorOutput.py | zhongmicai/SV_population | 81987865c9b67be5e358cb1b966bb69cc303abee | [
"MIT"
] | 18 | 2019-03-18T00:08:18.000Z | 2021-10-19T06:21:56.000Z | Support/renameCNVNatorOutput.py | zhongmicai/SV_population | 81987865c9b67be5e358cb1b966bb69cc303abee | [
"MIT"
] | 5 | 2018-11-06T15:18:17.000Z | 2020-07-24T09:31:08.000Z | Support/renameCNVNatorOutput.py | zhongmicai/SV_population | 81987865c9b67be5e358cb1b966bb69cc303abee | [
"MIT"
] | 2 | 2019-11-13T10:28:58.000Z | 2021-09-07T08:25:12.000Z | #!/usr/bin/env python3
import os
vcfdir='/home/matt/Plasmodium/Pf_SV/Data'
for ID in os.listdir(vcfdir):
nameID = '_'.join(ID.split('.')[0].split('_')[:-1])
coreID = nameID.split('_')[-1]
if coreID[:3] == 'ERR':
os.system('cp {0}.cnvs {1}_DEL.cnvs'.format(coreID, nameID))
os.system('cp {0}.cnvs {1}_DUP.cnvs'.format(coreID, nameID))
| 28.583333 | 62 | 0.641399 |
a12cb244767dfa01e9b581f3a545006ea34d4ac7 | 1,568 | py | Python | string_1/hello_name.py | nhutnamhcmus/coding-bat-solutions | 5f780a4027a6c3523a72961db1bad547c997fdc6 | [
"MIT"
] | 1 | 2020-09-19T18:02:13.000Z | 2020-09-19T18:02:13.000Z | string_1/hello_name.py | nhutnamhcmus/coding-bat-solutions | 5f780a4027a6c3523a72961db1bad547c997fdc6 | [
"MIT"
] | null | null | null | string_1/hello_name.py | nhutnamhcmus/coding-bat-solutions | 5f780a4027a6c3523a72961db1bad547c997fdc6 | [
"MIT"
] | null | null | null | # =======================================================================================================================================
# VNU-HCM, University of Science
# Department Computer Science, Faculty of Information Technology
# Authors: Nhut-Nam Le (Tich Phan Suy Rong)
# 2020
"""
Given a string name, e.g. "Bob", return a greeting of the form "Hello Bob!".
For example test case:
hello_name('Bob') 'Hello Bob!'
hello_name('Alice') 'Hello Alice!'
hello_name('X') 'Hello X!'
"""
import unittest
if __name__ == "__main__":
unittest.main()
| 27.508772 | 137 | 0.598852 |
a12f2dc13e43b20caf3450c97b9fa9395b547d8a | 335 | py | Python | materials/ch_04/escape_str.py | epsilonxe/RMUTT_09090016 | 863dd8a6471b560831b742da4aec27209c294df5 | [
"MIT"
] | null | null | null | materials/ch_04/escape_str.py | epsilonxe/RMUTT_09090016 | 863dd8a6471b560831b742da4aec27209c294df5 | [
"MIT"
] | null | null | null | materials/ch_04/escape_str.py | epsilonxe/RMUTT_09090016 | 863dd8a6471b560831b742da4aec27209c294df5 | [
"MIT"
] | null | null | null | text1 = '''ABCDEF
GHIJKL
MNOPQRS
TUVWXYZ
'''
text2 = 'ABCDEF\
GHIJKL\
MNOPQRS\
TUVWXYZ'
text3 = 'ABCD\'EF\'GHIJKL'
text4 = 'ABCDEF\nGHIJKL\nMNOPQRS\nTUVWXYZ'
text5 = 'ABCDEF\fGHIJKL\fMNOPQRS\fTUVWXYZ'
print(text1)
print('-' * 25)
print(text2)
print('-' * 25)
print(text3)
print('-' * 25)
print(text4)
print('-' * 25)
print(text5) | 12.884615 | 42 | 0.671642 |
a1300bc0639e795122958402aa1f3b4e0ab96874 | 823 | py | Python | pygears/cookbook/reduce2.py | Risto97/pygears | 19393e85101a16762cb3bbbf3010946ef69217f2 | [
"MIT"
] | null | null | null | pygears/cookbook/reduce2.py | Risto97/pygears | 19393e85101a16762cb3bbbf3010946ef69217f2 | [
"MIT"
] | null | null | null | pygears/cookbook/reduce2.py | Risto97/pygears | 19393e85101a16762cb3bbbf3010946ef69217f2 | [
"MIT"
] | null | null | null | from pygears import gear, Intf
from pygears.common import czip
from pygears.typing import Tuple, Uint, Union, Queue
from pygears.common import fmap, demux, decoupler, fifo, union_collapse
from pygears.cookbook import priority_mux, replicate
TCfg = Tuple[{'reduce_size': Uint['w_reduce_size'], 'init': 't_acc'}]
| 29.392857 | 74 | 0.684083 |
a1309a770978d986e457fb2177d6163ed7ae8ec0 | 313 | py | Python | atcoder/abc166D_i_hate_factorization.py | da-edra/kyopro | ad531d15bcccf6aafdaaef3cc69db850b0f7c471 | [
"BSD-3-Clause"
] | 2 | 2020-08-31T17:19:07.000Z | 2021-01-08T21:35:48.000Z | atcoder/abc166D_i_hate_factorization.py | edglaz/kyopro | b8ac4f6873418ad20ad417e46d731c35a8062c0d | [
"BSD-3-Clause"
] | null | null | null | atcoder/abc166D_i_hate_factorization.py | edglaz/kyopro | b8ac4f6873418ad20ad417e46d731c35a8062c0d | [
"BSD-3-Clause"
] | null | null | null | # unihernandez22
# https://atcoder.jp/contests/abc166/tasks/abc166_d
# math, brute force
n = int(input())
for a in range(n):
breaked = True
for b in range(-1000, 1000):
if a**5 - b**5 == n:
print(a, b)
break;
else:
breaked = False
if breaked:
break
| 20.866667 | 51 | 0.539936 |
a130aee35a17b1d7653613de1de880f9a3444608 | 305 | py | Python | packages/grid/apps/worker/src/main/core/database/groups/groups.py | exityan/PySyft | 35166c487a5be57f9ad28929ed88a8ba6bdd5aeb | [
"Apache-2.0"
] | 425 | 2019-09-22T06:14:53.000Z | 2022-03-30T02:17:34.000Z | packages/grid/apps/worker/src/main/core/database/groups/groups.py | Metrix1010/PySyft | 6477f64b63dc285059c3766deab3993653cead2e | [
"Apache-2.0"
] | 352 | 2019-09-17T15:32:51.000Z | 2022-03-12T01:07:35.000Z | packages/grid/apps/worker/src/main/core/database/groups/groups.py | Metrix1010/PySyft | 6477f64b63dc285059c3766deab3993653cead2e | [
"Apache-2.0"
] | 208 | 2019-09-18T18:32:10.000Z | 2022-03-24T01:10:11.000Z | # grid relative
from .. import BaseModel
from .. import db
| 21.785714 | 70 | 0.655738 |
a130d81a095f620365d47a00f587d3671ea0c357 | 2,416 | py | Python | libraries/urx_python/urx_scripts/demo_apple_tree.py | giacomotomasi/tennisball_demo | f71cd552e64fe21533abe47b986db6999947c3a9 | [
"Apache-2.0"
] | null | null | null | libraries/urx_python/urx_scripts/demo_apple_tree.py | giacomotomasi/tennisball_demo | f71cd552e64fe21533abe47b986db6999947c3a9 | [
"Apache-2.0"
] | null | null | null | libraries/urx_python/urx_scripts/demo_apple_tree.py | giacomotomasi/tennisball_demo | f71cd552e64fe21533abe47b986db6999947c3a9 | [
"Apache-2.0"
] | null | null | null |
import urx
import logging
import time
if __name__ == "__main__":
logging.basicConfig(level=logging.WARN)
#gripper_remove_pos = [0.0755, -0.2824, 0.3477, -0.0387, -3.0754, 0.4400] # rest position (good to place/remove gripper)
rob = urx.Robot("192.168.56.1")
#rob.set_tcp((0,0,0,0,0,0))
#rob.set_payload(0.5, (0,0,0))
home_pos = [-0.0153, -0.4213, 0.3469, 1.2430, 2.6540, -0.9590]
appro1 = [-0.0762, -0.5575, 0.3546, 0.6110, 2.7090, -1.7840]
apple1 = [-0.1042, -0.6244, 0.3209, 1.4510, 1.9160, -1.4980]
get_far1 = [-0.0510, -0.5086, 0.3215, 0.4900, 2.6510, -1.8690]
appro2 = [-0.1767, -0.4281, 0.3204, 1.8210, 2.0030, -1.5280]
apple2 = [-0.2129, -0.4926, 0.2951, 1.8210, 2.0030, -1.5280]
get_far2 = [-0.1324, -0.3790, 0.3112, 1.8210, 2.0030, -1.5280]
appro_place = [0.3571, -0.3540, 0.3563, 1.2360, 2.8850, -0.0780]
place_pos = [0.3571, -0.3540, 0.2983, 1.2360, 2.8850, -0.0780]
try:
v = 0.2
a = 0.3
rob.set_digital_out(0,0) # initialize gripper
# open gripper
rob.set_digital_out(0, 1)
time.sleep(0.5)
rob.set_digital_out(0,0)
pose = rob.getl() #gives a lists with 6 elements (x, y, z, rx, ry, rz) --> rotation vector
#print("robot tcp is at: ", pose)
# move to home position
#rob.movej(joint_pose, acc=a, vel=v) # it takes as inputs the joints goal values!
rob.movej_to_pose(home_pos, acc=a, vel=0.3)
time.sleep(0.01)
# move towards the first apple to pick (approach it, move to a suitable grabbing position, get away)
rob.movej_to_pose(appro1, acc=a, vel=v)
time.sleep(0.01)
rob.movel(apple1, acc=a, vel=v)
# close gripper
rob.set_digital_out(0, 1)
time.sleep(0.5)
rob.set_digital_out(0,0)
time.sleep(1)
rob.movel(get_far1, a, v)
#move towards the place position
rob.movej_to_pose(appro_place, a, vel=0.3)
time.sleep(0.01)
rob.movel(place_pos, a, v)
# open gripper
rob.set_digital_out(0, 1)
time.sleep(0.5)
rob.set_digital_out(0,0)
time.sleep(1)
rob.movel(appro_place, a, v)
# move to home position
rob.movej_to_pose(home_pos, a, v)
pose_final = rob.getl()
print("robot tcp is at (final): ", pose_final)
finally:
rob.close() | 32.213333 | 124 | 0.577815 |
a1311c3c3114e32c6b986776dfaae1a0d9bb6825 | 403 | py | Python | solution/data_structure2/1302/main.py | jungyoonoh/baekjoon-1 | 2b4437a4b5e06244fa47fae6c7b7be0157d0f94f | [
"MIT"
] | 2,236 | 2019-08-05T00:36:59.000Z | 2022-03-31T16:03:53.000Z | solution/data_structure2/1302/main.py | juy4556/baekjoon | bc0b0a0ebaa45a5bbd32751f84c458a9cfdd9f92 | [
"MIT"
] | 225 | 2020-12-17T10:20:45.000Z | 2022-01-05T17:44:16.000Z | solution/data_structure2/1302/main.py | juy4556/baekjoon | bc0b0a0ebaa45a5bbd32751f84c458a9cfdd9f92 | [
"MIT"
] | 602 | 2019-08-05T00:46:25.000Z | 2022-03-31T13:38:23.000Z | # Authored by : gusdn3477
# Co-authored by : -
# Link : http://boj.kr/8adc986ae26b461eadd65abdff3cfba9
import sys
N = int(input())
book = {}
for i in range(N):
name = input()
if name not in book:
book[name] = 1
else:
book[name] += 1
book = list(book.items())
book.sort(key = lambda x : (-x[1],x[0]))
print(book[0][0])
| 19.190476 | 55 | 0.600496 |
a13162f4cb62e368c73037f36a88c321b285f2d8 | 1,152 | py | Python | testflows/_core/utils/sort.py | testflows/TestFlows-Core | 0aa17247dffd2f7199465031ab16cc4f12c9cfb0 | [
"Apache-2.0"
] | 3 | 2020-06-25T19:23:19.000Z | 2021-10-20T19:29:56.000Z | testflows/_core/utils/sort.py | testflows/TestFlows-Core | 0aa17247dffd2f7199465031ab16cc4f12c9cfb0 | [
"Apache-2.0"
] | null | null | null | testflows/_core/utils/sort.py | testflows/TestFlows-Core | 0aa17247dffd2f7199465031ab16cc4f12c9cfb0 | [
"Apache-2.0"
] | 1 | 2020-02-24T12:31:45.000Z | 2020-02-24T12:31:45.000Z | # Copyright 2020 Katteli Inc.
# TestFlows.com Open-Source Software Testing Framework (http://testflows.com)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
def human(l, key=None):
"""Sort in human readable format.
Credit: https://blog.codinghorror.com/sorting-for-humans-natural-sort-order/
:key: optional function to retrieve the key from the element
"""
get_key = key
if get_key is None:
get_key = lambda x: x
convert = lambda text: int(text) if text.isdigit() else text
alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', get_key(key)) ]
l.sort(key=alphanum_key)
return l
| 38.4 | 89 | 0.717882 |
a13291eccf29b835c30e820b06c59c45c1cf58bf | 3,220 | py | Python | tests/build/test_flash.py | cyliangtw/mbed-tools | 69c600c0a5ac1eb0d52b481b5ba020da8bb73d33 | [
"Apache-2.0"
] | 39 | 2020-04-03T13:52:34.000Z | 2022-03-23T13:08:22.000Z | tests/build/test_flash.py | cyliangtw/mbed-tools | 69c600c0a5ac1eb0d52b481b5ba020da8bb73d33 | [
"Apache-2.0"
] | 306 | 2020-02-06T18:08:43.000Z | 2022-03-25T14:50:18.000Z | tests/build/test_flash.py | cyliangtw/mbed-tools | 69c600c0a5ac1eb0d52b481b5ba020da8bb73d33 | [
"Apache-2.0"
] | 23 | 2020-03-17T11:42:23.000Z | 2022-01-30T02:56:18.000Z | #
# Copyright (c) 2020-2021 Arm Limited and Contributors. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
import pathlib
import tempfile
from unittest import TestCase, mock
from mbed_tools.build.flash import flash_binary, _build_binary_file_path, _flash_dev
from mbed_tools.build.exceptions import BinaryFileNotFoundError
from tests.build.factories import DeviceFactory
| 36.590909 | 112 | 0.675776 |
a133567cd81f4bb8edf05a69d95e9fb2d7bf451d | 2,795 | py | Python | packettotal_sdk/search_tools.py | RogerDeng/HoneyBot | 3843ec6d684786091ced053857d1718ef1fa495c | [
"MIT"
] | 67 | 2019-08-16T05:03:19.000Z | 2021-11-25T01:48:23.000Z | packettotal_sdk/search_tools.py | RogerDeng/HoneyBot | 3843ec6d684786091ced053857d1718ef1fa495c | [
"MIT"
] | 1 | 2020-09-01T02:40:31.000Z | 2020-09-01T02:40:31.000Z | packettotal_sdk/search_tools.py | RogerDeng/HoneyBot | 3843ec6d684786091ced053857d1718ef1fa495c | [
"MIT"
] | 16 | 2020-02-20T12:38:40.000Z | 2022-03-22T17:45:25.000Z | import time
import typing
import requests
from sys import stderr
from datetime import datetime
from packettotal_sdk import packettotal_api
| 39.366197 | 120 | 0.594633 |
a133fa0afcdcf42b74dd45b66f95e50ddbf7734f | 41 | py | Python | actfw_core/v4l2/__init__.py | Idein/actfw-core | 44c979bbe5d32d068eed20b7d565a6de2fb9acd3 | [
"MIT"
] | 2 | 2021-03-15T11:44:37.000Z | 2021-05-12T09:58:35.000Z | actfw_core/v4l2/__init__.py | Idein/actfw-core | 44c979bbe5d32d068eed20b7d565a6de2fb9acd3 | [
"MIT"
] | 28 | 2020-12-24T02:53:37.000Z | 2022-03-14T09:02:28.000Z | actfw_core/v4l2/__init__.py | Idein/actfw-core | 44c979bbe5d32d068eed20b7d565a6de2fb9acd3 | [
"MIT"
] | null | null | null | from . import types, video # noqa: F401
| 20.5 | 40 | 0.682927 |
a1357146c1bfe43fcbbabe34684a165daba3ef28 | 4,987 | py | Python | tests/unit/test_s3.py | tejuafonja/SDGym | 7c20c588a4c9f5940885467406e73274a5b01a8e | [
"MIT"
] | 19 | 2019-05-23T14:27:02.000Z | 2019-12-08T16:04:20.000Z | tests/unit/test_s3.py | tejuafonja/SDGym | 7c20c588a4c9f5940885467406e73274a5b01a8e | [
"MIT"
] | 11 | 2019-05-30T21:29:27.000Z | 2019-12-10T16:49:28.000Z | tests/unit/test_s3.py | tejuafonja/SDGym | 7c20c588a4c9f5940885467406e73274a5b01a8e | [
"MIT"
] | 11 | 2019-05-23T14:27:06.000Z | 2020-01-02T14:29:00.000Z | from unittest.mock import Mock, patch
import pandas as pd
from sdgym.s3 import is_s3_path, parse_s3_path, write_csv, write_file
def test_is_s3_path_with_local_dir():
"""Test the ``sdgym.s3.is_s3_path`` function with a local directory.
If the path is not an s3 path, it should return ``False``.
Input:
- path to a local directory
Output:
- False
"""
# setup
path = 'path/to/local/dir'
# run
result = is_s3_path(path)
# asserts
assert not result
def test_is_s3_path_with_s3_bucket():
"""Test the ``sdgym.s3.is_s3_path`` function with an s3 directory.
If the path is an s3 path, it should return ``True``.
Input:
- path to an s3 directory
Output:
- True
"""
# setup
path = 's3://my-bucket/my/path'
# run
result = is_s3_path(path)
# asserts
assert result
def test_parse_s3_path_bucket_only():
"""Test the ``sdgym.s3.parse_s3_path`` function with an s3 path.
If the s3 path contains only the bucket name, the returned tuple
should be ``(bucket_name, '')``.
Input:
- path to s3 bucket
Output:
- ('my-bucket', '')
"""
# setup
expected_bucket_name = 'my-bucket'
expected_key_prefix = ''
path = f's3://{expected_bucket_name}/{expected_key_prefix}'
# run
bucket_name, key_prefix = parse_s3_path(path)
# asserts
assert bucket_name == expected_bucket_name
assert key_prefix == expected_key_prefix
def test_parse_s3_path_bucket_and_dir_path():
"""Test the `sdgym.s3.parse_s3_path`` function with an s3 path.
If the s3 path contains the bucket and a sub directory, the returned
tuple should be ``(bucket_name, subdirectory)``.
Input:
- path to s3 directory
Output:
- ('my-bucket', 'path/to/dir')
"""
# setup
expected_bucket_name = 'my-bucket'
expected_key_prefix = 'path/to/dir'
path = f's3://{expected_bucket_name}/{expected_key_prefix}'
# run
bucket_name, key_prefix = parse_s3_path(path)
# asserts
assert bucket_name == expected_bucket_name
assert key_prefix == expected_key_prefix
def test_write_file(tmpdir):
"""Test the `sdgym.s3.write_file`` function with a local path.
If the path is a local path, a file with the correct
contents should be created at the specified path.
Input:
- contents of the local file
- path to the local file
- aws_key is None
- aws_secret is None
Output:
- None
Side effects:
- file creation at the specified path with the given contents
"""
# setup
content_str = 'test_content'
path = f'{tmpdir}/test.txt'
# run
write_file(content_str.encode('utf-8'), path, None, None)
# asserts
with open(path, 'r') as f:
assert f.read() == content_str
| 23.195349 | 72 | 0.645278 |
a1360dd0640d6fe332d03889c6a40e96f3ddedfb | 3,227 | py | Python | vet_care/scripts/generate_from_history.py | neerajvkn/vet_care | 14914b22e7a83265d736f9f9dc5186271ae62d66 | [
"MIT"
] | 2 | 2020-11-23T11:14:32.000Z | 2021-02-03T06:40:33.000Z | vet_care/scripts/generate_from_history.py | neerajvkn/vet_care | 14914b22e7a83265d736f9f9dc5186271ae62d66 | [
"MIT"
] | null | null | null | vet_care/scripts/generate_from_history.py | neerajvkn/vet_care | 14914b22e7a83265d736f9f9dc5186271ae62d66 | [
"MIT"
] | 7 | 2019-11-16T14:36:33.000Z | 2021-08-25T07:54:51.000Z | import csv
import datetime
import frappe
# bench execute vet_care.scripts.generate_from_history.execute --args "['./data/important_data.csv']"
# bench execute vet_care.scripts.generate_from_history.execute --args "['./data/important_data.csv', ['1010', '2920']]"
| 37.964706 | 119 | 0.634645 |
a1362909e583305f43ba83685760d08284ce8f25 | 594 | py | Python | aws_interface/cloud/auth/delete_sessions.py | hubaimaster/aws-interface | 162dd056546d58b6eb29afcae1c3c2d78e4309b2 | [
"Apache-2.0"
] | 53 | 2018-10-02T05:58:54.000Z | 2020-09-15T08:58:26.000Z | aws_interface/cloud/auth/delete_sessions.py | hubaimaster/aws-interface | 162dd056546d58b6eb29afcae1c3c2d78e4309b2 | [
"Apache-2.0"
] | 52 | 2018-09-26T05:16:09.000Z | 2022-03-11T23:51:14.000Z | aws_interface/cloud/auth/delete_sessions.py | hubaimaster/aws-interface | 162dd056546d58b6eb29afcae1c3c2d78e4309b2 | [
"Apache-2.0"
] | 10 | 2019-03-11T16:35:14.000Z | 2019-10-23T08:03:54.000Z |
from cloud.permission import Permission, NeedPermission
# Define the input output format of the function.
# This information is used when creating the *SDK*.
info = {
'input_format': {
'session_ids': ['str'],
},
'output_format': {
'success': 'bool'
},
'description': 'Delete sessions'
}
| 22.846154 | 56 | 0.6633 |
a137958aa6262c5d4af45fea5f852cfe4e0fb7c7 | 5,509 | py | Python | plugin/autoWHUT.py | PPeanutButter/MediaServer | a6a0b3f424ca3fc4ea73d78db380ec3cc882bfd2 | [
"MIT"
] | 2 | 2021-09-23T15:09:25.000Z | 2022-01-16T01:04:07.000Z | plugin/autoWHUT.py | PPeanutButter/MediaServer | a6a0b3f424ca3fc4ea73d78db380ec3cc882bfd2 | [
"MIT"
] | 1 | 2022-02-23T04:00:16.000Z | 2022-02-23T04:10:06.000Z | plugin/autoWHUT.py | PPeanutButter/MediaServer | a6a0b3f424ca3fc4ea73d78db380ec3cc882bfd2 | [
"MIT"
] | 1 | 2021-09-23T15:09:26.000Z | 2021-09-23T15:09:26.000Z | # coding=<utf-8>
import requests
import re
import socket
import base64
import psutil
import pywifi
from pywifi import const
import subprocess
import os
import time
if __name__ == '__main__':
wifiManager = WifiManager()
wifiManager.start()
| 34.43125 | 117 | 0.554547 |
a137f706cc16a7ddd946b13b277853a20e68de35 | 3,096 | py | Python | active_subspaces/gradients.py | ftalbrecht/active_subspaces | 64817a19db250e4b01bcd51055ad0f7d2a5665b8 | [
"MIT"
] | 51 | 2015-04-24T13:52:00.000Z | 2022-02-16T13:30:39.000Z | active_subspaces/gradients.py | JIMMY-KSU/active_subspaces | 64817a19db250e4b01bcd51055ad0f7d2a5665b8 | [
"MIT"
] | 10 | 2015-02-03T01:00:09.000Z | 2022-03-06T07:48:46.000Z | active_subspaces/gradients.py | JIMMY-KSU/active_subspaces | 64817a19db250e4b01bcd51055ad0f7d2a5665b8 | [
"MIT"
] | 44 | 2015-01-12T06:05:59.000Z | 2022-02-02T18:53:34.000Z | """Utilities for approximating gradients."""
import numpy as np
from utils.misc import process_inputs
from utils.simrunners import SimulationRunner
def local_linear_gradients(X, f, p=None, weights=None):
"""Estimate a collection of gradients from input/output pairs.
Given a set of input/output pairs, choose subsets of neighboring points and
build a local linear model for each subset. The gradients of these local
linear models comprise estimates of sampled gradients.
Parameters
----------
X : ndarray
M-by-m matrix that contains the m-dimensional inputs
f : ndarray
M-by-1 matrix that contains scalar outputs
p : int, optional
how many nearest neighbors to use when constructing the local linear
model (default 1)
weights : ndarray, optional
M-by-1 matrix that contains the weights for each observation (default
None)
Returns
-------
df : ndarray
M-by-m matrix that contains estimated partial derivatives approximated
by the local linear models
Notes
-----
If `p` is not specified, the default value is floor(1.7*m).
"""
X, M, m = process_inputs(X)
if M<=m: raise Exception('Not enough samples for local linear models.')
if p is None:
p = int(np.minimum(np.floor(1.7*m), M))
elif not isinstance(p, int):
raise TypeError('p must be an integer.')
if p < m+1 or p > M:
raise Exception('p must be between m+1 and M')
if weights is None:
weights = np.ones((M, 1)) / M
MM = np.minimum(int(np.ceil(10*m*np.log(m))), M-1)
df = np.zeros((MM, m))
for i in range(MM):
ii = np.random.randint(M)
x = X[ii,:]
D2 = np.sum((X - x)**2, axis=1)
ind = np.argsort(D2)
ind = ind[D2 != 0]
A = np.hstack((np.ones((p,1)), X[ind[:p],:])) * np.sqrt(weights[ii])
b = f[ind[:p]] * np.sqrt(weights[ii])
u = np.linalg.lstsq(A, b)[0]
df[i,:] = u[1:].T
return df
def finite_difference_gradients(X, fun, h=1e-6):
"""Compute finite difference gradients with a given interface.
Parameters
----------
X : ndarray
M-by-m matrix that contains the points to estimate the gradients with
finite differences
fun : function
function that returns the simulation's quantity of interest given inputs
h : float, optional
the finite difference step size (default 1e-6)
Returns
-------
df : ndarray
M-by-m matrix that contains estimated partial derivatives approximated
by finite differences
"""
X, M, m = process_inputs(X)
# points to run simulations including the perturbed inputs
XX = np.kron(np.ones((m+1, 1)),X) + \
h*np.kron(np.vstack((np.zeros((1, m)), np.eye(m))), np.ones((M, 1)))
# run the simulation
if isinstance(fun, SimulationRunner):
F = fun.run(XX)
else:
F = SimulationRunner(fun).run(XX)
df = (F[M:].reshape((m, M)).transpose() - F[:M]) / h
return df.reshape((M,m))
| 31.591837 | 80 | 0.609173 |
a1385e4aefd67a6e8363bc3fce53670aa1ea871f | 6,861 | py | Python | covidaid/tools/read_data.py | sabuj7177/CovidProject | b4b7bcfa5ace165520507f489dc74da7b695e2f0 | [
"Apache-2.0"
] | null | null | null | covidaid/tools/read_data.py | sabuj7177/CovidProject | b4b7bcfa5ace165520507f489dc74da7b695e2f0 | [
"Apache-2.0"
] | null | null | null | covidaid/tools/read_data.py | sabuj7177/CovidProject | b4b7bcfa5ace165520507f489dc74da7b695e2f0 | [
"Apache-2.0"
] | null | null | null | # encoding: utf-8
"""
Read images and corresponding labels.
"""
import torch
from torch.utils.data import Dataset
from PIL import Image
import os
import random
| 36.887097 | 153 | 0.594957 |
a13861d4cfee522305c9e242f88c3b1859a889ba | 7,996 | py | Python | helper/evaluator.py | manipopopo/TC-ResNet | 7dff6f4f865f1e63ff705d8e0267cf3b9a0d70a3 | [
"Apache-2.0"
] | 185 | 2019-04-06T12:54:25.000Z | 2022-03-24T12:06:59.000Z | helper/evaluator.py | manipopopo/TC-ResNet | 7dff6f4f865f1e63ff705d8e0267cf3b9a0d70a3 | [
"Apache-2.0"
] | 23 | 2019-05-15T09:19:01.000Z | 2022-02-10T00:07:03.000Z | helper/evaluator.py | manipopopo/TC-ResNet | 7dff6f4f865f1e63ff705d8e0267cf3b9a0d70a3 | [
"Apache-2.0"
] | 61 | 2019-04-06T12:33:46.000Z | 2022-03-01T06:41:53.000Z | import csv
import sys
from pathlib import Path
from abc import abstractmethod
import numpy as np
import tensorflow as tf
from tqdm import tqdm
import common.tf_utils as tf_utils
import metrics.manager as metric_manager
from common.model_loader import Ckpt
from common.utils import format_text
from common.utils import get_logger
from helper.base import AudioBase
from metrics.summaries import BaseSummaries
from metrics.summaries import Summaries
| 37.539906 | 119 | 0.674212 |
a139c61e93bd3d976aaa5d706da3d269f7d52385 | 7,483 | py | Python | src/sync.py | neybar/icloud-drive-docker | c7e59400c01b304c0f8ed7fd0b3ea2a623623b2e | [
"BSD-3-Clause"
] | null | null | null | src/sync.py | neybar/icloud-drive-docker | c7e59400c01b304c0f8ed7fd0b3ea2a623623b2e | [
"BSD-3-Clause"
] | null | null | null | src/sync.py | neybar/icloud-drive-docker | c7e59400c01b304c0f8ed7fd0b3ea2a623623b2e | [
"BSD-3-Clause"
] | null | null | null | __author__ = 'Mandar Patil (mandarons@pm.me)'
import datetime
import os
import re
import time
from pathlib import Path
from shutil import copyfileobj, rmtree
from pyicloud import PyiCloudService, utils, exceptions
from src import config_parser
from src import notify
| 42.276836 | 117 | 0.647735 |
a13a98235b9b2f72025d1bf03dbd61547e3c8d9f | 2,163 | py | Python | sphinx-sources/Examples/Interference/MultiSlit.py | jccmak/lightpipes | 1a296fe08bdd97fc9a0e11f92bab25c85f68e57d | [
"BSD-3-Clause"
] | null | null | null | sphinx-sources/Examples/Interference/MultiSlit.py | jccmak/lightpipes | 1a296fe08bdd97fc9a0e11f92bab25c85f68e57d | [
"BSD-3-Clause"
] | null | null | null | sphinx-sources/Examples/Interference/MultiSlit.py | jccmak/lightpipes | 1a296fe08bdd97fc9a0e11f92bab25c85f68e57d | [
"BSD-3-Clause"
] | null | null | null | #! python3
import numpy as np
import matplotlib.pyplot as plt
from LightPipes import *
"""
MultiSlit.py
Demonstrates the RowOfFields command. Two wavelengths are used to show
the principles of a grating.
cc Fred van Goor, June 2020.
"""
wavelength=1000*nm
Dlambda=150*nm
size=11*mm
N=2000
N2=int(N/2)
SlitSeparation=0.5*mm
f=30*cm
Nslits=20
SlitHeight=5*mm
SlitWidth=0.1*mm
Nheight=int(SlitHeight/size*N)
Nwidth=int(SlitWidth/size*N)
Fslit=np.ones((Nheight,Nwidth))
F1=Begin(size,wavelength,N)
F1=RowOfFields(F1,Fslit,Nslits,SlitSeparation)
Islits=Intensity(F1)
F1=Lens(F1,f)
F1=Forvard(F1,f)
F11=Interpol(F1,size,N,magnif=4)
Iscreen1=Intensity(F11)
F2=Begin(size,wavelength+Dlambda,N)
F2=RowOfFields(F2,Fslit,Nslits,SlitSeparation)
F2=Lens(F2,f)
F2=Forvard(F2,f)
F22=Interpol(F2,size,N,magnif=4)
Iscreen2=Intensity(F22)
X=np.arange(N)
X=(X/N-1/2)*size/mm
s= r'LightPipes for Python,' + '\n' +\
r'MultiSlit.py'+ '\n\n'\
r'size = {:4.2f} mm'.format(size/mm) + '\n' +\
r'$\lambda$ = {:4.2f} nm'.format(wavelength/nm) + '\n' +\
r'$\Delta\lambda$ = {:4.2f} nm'.format(Dlambda/nm) + '\n' +\
r'N = {:d}'.format(N) + '\n' +\
r'width of the slits: {:4.2f} mm'.format(SlitWidth/mm) + '\n' +\
r'height of the slits: {:4.2f} mm'.format(SlitHeight/mm) + '\n' +\
r'separation of the slits: {:4.2f} mm'.format(SlitSeparation/mm) + '\n' +\
r'number of slits: {:d}'.format(Nslits) + '\n' +\
r'focal length lens: {:4.2f} cm'.format(f/cm) + '\n\n' +\
r'${\copyright}$ Fred van Goor, May 2020'
fig=plt.figure(figsize=(10,6))
ax1 = fig.add_subplot(221)
ax2 = fig.add_subplot(222);#ax2.set_ylim(bottom=900,top=1100)
ax3 = fig.add_subplot(223)
ax4 = fig.add_subplot(224)
ax1.imshow(Islits,cmap='gray',aspect='equal');ax1.axis('off'); ax1.set_title('Screen with slits')
ax2.imshow(Iscreen1+Iscreen2,cmap='jet',aspect='equal');ax2.axis('off'); ax2.set_title('Intensity distribution at the focus of the lens')
#ax2.margins(x=0, y=-0.45)
ax3.plot(X,(Iscreen1+Iscreen2)[N2]); ax3.set_xlabel('x [mm]'); ax3.set_ylabel('Intensity [a.u.]'); ax3.set_title('Cross section of intensity at the focus')
ax4.text(0,0,s); ax4.axis('off')
plt.show()
| 31.808824 | 155 | 0.680536 |
a13baec342fa639fe6142ecd977281a346771177 | 389 | py | Python | genshimacro/__init__.py | trac-hacks/trac-GenshiMacro | d9da1a50f6d73904fdda2e9e7cbc4c056b929267 | [
"BSD-3-Clause"
] | 1 | 2015-02-19T21:08:53.000Z | 2015-02-19T21:08:53.000Z | genshimacro/__init__.py | ejucovy/trac-GenshiMacro | d9da1a50f6d73904fdda2e9e7cbc4c056b929267 | [
"BSD-3-Clause"
] | null | null | null | genshimacro/__init__.py | ejucovy/trac-GenshiMacro | d9da1a50f6d73904fdda2e9e7cbc4c056b929267 | [
"BSD-3-Clause"
] | null | null | null | from genshi.template import MarkupTemplate
from trac.core import *
from trac.web.chrome import Chrome
from trac.wiki.macros import WikiMacroBase
| 29.923077 | 75 | 0.742931 |
a13d6b6264ad2abf3168edf6c36418b077a9e067 | 2,110 | py | Python | scripts/WIPS2015/WIPS_anydiag_time.py | eclee25/flu-SDI-exploratory-age | 2f5a4d97b84d2116e179e85fe334edf4556aa946 | [
"MIT"
] | 3 | 2018-03-29T23:02:43.000Z | 2020-08-10T12:01:50.000Z | scripts/WIPS2015/WIPS_anydiag_time.py | eclee25/flu-SDI-exploratory-age | 2f5a4d97b84d2116e179e85fe334edf4556aa946 | [
"MIT"
] | null | null | null | scripts/WIPS2015/WIPS_anydiag_time.py | eclee25/flu-SDI-exploratory-age | 2f5a4d97b84d2116e179e85fe334edf4556aa946 | [
"MIT"
] | null | null | null | #!/usr/bin/python
##############################################
###Python template
###Author: Elizabeth Lee
###Date: 10/11/14
###Function: Any diagnosis per 100,000 population vs. week number for flu weeks (wks 40-20). Population size is from the calendar year of the week of calculation.
###Import data: SQL_export/anydiag_outpatient_allweeks.csv
### branch from v2/Supp_anydiag_time.py
###Command Line: python WIPS_anydiag_time.py
##############################################
### notes ###
# Incidence per 100,000 is normalized by total population by second calendar year of the flu season
### packages/modules ###
import csv
import matplotlib.pyplot as plt
## local modules ##
import functions_v5 as fxn
### data structures ###
### functions ###
### data files ###
anydiagin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/SQL_export/anydiag_allweeks_outpatient.csv','r')
anydiagin.readline() # rm header
anydiag = csv.reader(anydiagin, delimiter=',')
### called/local plotting parameters ###
ps = fxn.pseasons
fw = fxn.gp_fluweeks
sl = fxn.gp_seasonlabels
colvec = fxn.gp_colors
wklab = fxn.gp_weeklabels
fs = 24
fssml = 16
### program ###
# dict_wk[week] = seasonnum, dict_any[week] = visits per 100,000 in US population in calendar year of week,d_any53ls[seasonnum] = [anydiag wk 40 per 100000, anydiag wk 41 per 100000,...]
d_wk, d_any, d_any53ls = fxn.week_anydiag_processing(anydiag)
# plot values
for s in ps:
plt.plot(xrange(53), d_any53ls[s], marker = fxn.gp_marker, color = colvec[s-2], label = sl[s-2], linewidth = fxn.gp_linewidth)
plt.fill([7, 8, 8, 7], [0, 0, 4000, 4000], facecolor='grey', alpha=0.4)
plt.fill([12, 14, 14, 12], [0, 0, 4000, 4000], facecolor='grey', alpha=0.4)
plt.xlim([0, fw-1])
plt.xticks(range(53)[::5], wklab[::5])
plt.ylim([0, 4000])
plt.xlabel('Week Number', fontsize=fs)
plt.ylabel('Outpatient Visit per 100,000', fontsize=fs)
plt.legend(loc='upper right')
plt.savefig('/home/elee/Dropbox/Department/Presentations/2015_WIPS/Figures/anydiag_time.png', transparent=False, bbox_inches='tight', pad_inches=0)
plt.close()
# plt.show()
| 32.461538 | 186 | 0.691469 |
a13d78de55aa35e5195b6d00dd9af4b319aa1688 | 5,290 | py | Python | misc/src/scheduler_plugin.py | hivesolutions/colony_plugins | cfd8fb2ac58037e01002966704b8a642feb37895 | [
"Apache-1.1"
] | 1 | 2016-10-30T09:51:06.000Z | 2016-10-30T09:51:06.000Z | misc/src/scheduler_plugin.py | hivesolutions/colony_plugins | cfd8fb2ac58037e01002966704b8a642feb37895 | [
"Apache-1.1"
] | 1 | 2015-12-29T18:51:07.000Z | 2015-12-29T18:51:07.000Z | misc/src/scheduler_plugin.py | hivesolutions/colony_plugins | cfd8fb2ac58037e01002966704b8a642feb37895 | [
"Apache-1.1"
] | 1 | 2018-01-26T12:54:13.000Z | 2018-01-26T12:54:13.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Hive Colony Framework
# Copyright (c) 2008-2020 Hive Solutions Lda.
#
# This file is part of Hive Colony Framework.
#
# Hive Colony Framework is free software: you can redistribute it and/or modify
# it under the terms of the Apache License as published by the Apache
# Foundation, either version 2.0 of the License, or (at your option) any
# later version.
#
# Hive Colony Framework is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Apache License for more details.
#
# You should have received a copy of the Apache License along with
# Hive Colony Framework. If not, see <http://www.apache.org/licenses/>.
__author__ = "Joo Magalhes <joamag@hive.pt>"
""" The author(s) of the module """
__version__ = "1.0.0"
""" The version of the module """
__revision__ = "$LastChangedRevision$"
""" The revision number of the module """
__date__ = "$LastChangedDate$"
""" The last change date of the module """
__copyright__ = "Copyright (c) 2008-2020 Hive Solutions Lda."
""" The copyright for the module """
__license__ = "Apache License, Version 2.0"
""" The license for the module """
import colony
def get_task_class(self):
"""
Retrieves the class that represents
a task in the current scope.
:rtype: Class
:return: The task class for the current scope.
"""
return self.system.get_task_class()
| 35.986395 | 112 | 0.709074 |
a13e0be2220cebb57badaee86dd77ccad221768a | 3,458 | py | Python | source/vistas/ui/controls/gl_camera.py | VISTAS-IVES/pyvistas | 2de1541c0fb40ccbac4014af758ff329ba0677b1 | [
"BSD-3-Clause"
] | 1 | 2017-08-26T20:18:38.000Z | 2017-08-26T20:18:38.000Z | source/vistas/ui/controls/gl_camera.py | VISTAS-IVES/pyvistas | 2de1541c0fb40ccbac4014af758ff329ba0677b1 | [
"BSD-3-Clause"
] | 89 | 2017-06-10T21:03:16.000Z | 2022-03-11T23:19:56.000Z | source/vistas/ui/controls/gl_camera.py | VISTAS-IVES/pyvistas | 2de1541c0fb40ccbac4014af758ff329ba0677b1 | [
"BSD-3-Clause"
] | 1 | 2019-03-05T21:44:29.000Z | 2019-03-05T21:44:29.000Z | import os
import wx
from vistas.core.graphics.camera_interactor import *
from vistas.core.graphics.overlay import BasicOverlayButton
from vistas.core.paths import get_resources_directory
from vistas.ui.events import CameraChangedEvent
from vistas.ui.utils import get_main_window
| 34.237624 | 114 | 0.669751 |
a13f0a11b4555fcfbf9c924b7e7de9f674331ec4 | 8,678 | py | Python | src/_sever_qt4.py | Joy917/fast-transfer | dfbcf5c4239da3d550b721500dff05fb6d40b756 | [
"MIT"
] | null | null | null | src/_sever_qt4.py | Joy917/fast-transfer | dfbcf5c4239da3d550b721500dff05fb6d40b756 | [
"MIT"
] | null | null | null | src/_sever_qt4.py | Joy917/fast-transfer | dfbcf5c4239da3d550b721500dff05fb6d40b756 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'D:\SVNzhangy\fast-transfer\src\_sever.ui'
#
# Created by: PyQt4 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
from PySide import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
try:
_encoding = QtGui.QApplication.UnicodeUTF8
except AttributeError:
| 58.635135 | 110 | 0.735999 |
a1423e6a2572b095e511d07a5f47171e04381471 | 4,579 | py | Python | aleph/tests/test_documents_api.py | gazeti/aleph | f6714c4be038471cfdc6408bfe88dc9e2ed28452 | [
"MIT"
] | 1 | 2017-07-28T12:54:09.000Z | 2017-07-28T12:54:09.000Z | aleph/tests/test_documents_api.py | gazeti/aleph | f6714c4be038471cfdc6408bfe88dc9e2ed28452 | [
"MIT"
] | 7 | 2017-08-16T12:49:23.000Z | 2018-02-16T10:22:11.000Z | aleph/tests/test_documents_api.py | gazeti/aleph | f6714c4be038471cfdc6408bfe88dc9e2ed28452 | [
"MIT"
] | 6 | 2017-07-26T12:29:53.000Z | 2017-08-18T09:35:50.000Z | import json
from aleph.tests.util import TestCase
| 37.227642 | 74 | 0.592487 |
a143abc8dbbd62332b147ee1258deecef9896d32 | 649 | py | Python | acropolis.py | andreasa13/Flask_WebApp_TripAdvisor | ea77291280676128b224da02c4938a42bbbb5200 | [
"MIT"
] | null | null | null | acropolis.py | andreasa13/Flask_WebApp_TripAdvisor | ea77291280676128b224da02c4938a42bbbb5200 | [
"MIT"
] | 1 | 2021-12-13T20:52:54.000Z | 2021-12-13T20:52:54.000Z | acropolis.py | andreasagap/Flask_WebApp_TripAdvisor | 06fd682248ea12ee440834719c113ec974635dd0 | [
"MIT"
] | 1 | 2021-06-09T18:29:33.000Z | 2021-06-09T18:29:33.000Z | import json
import pandas as pd
from geopy.geocoders import Nominatim
| 28.217391 | 82 | 0.70416 |
a146f1a5836a0723e015b88316d930723a68dc51 | 1,464 | py | Python | share/pegasus/init/split/daxgen.py | fengggli/pegasus | b68f588d90eb2b832086ed627d61414691f8ba95 | [
"Apache-2.0"
] | null | null | null | share/pegasus/init/split/daxgen.py | fengggli/pegasus | b68f588d90eb2b832086ed627d61414691f8ba95 | [
"Apache-2.0"
] | null | null | null | share/pegasus/init/split/daxgen.py | fengggli/pegasus | b68f588d90eb2b832086ed627d61414691f8ba95 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import os
import pwd
import sys
import time
from Pegasus.DAX3 import *
# The name of the DAX file is the first argument
if len(sys.argv) != 2:
sys.stderr.write("Usage: %s DAXFILE\n" % (sys.argv[0]))
sys.exit(1)
daxfile = sys.argv[1]
USER = pwd.getpwuid(os.getuid())[0]
# Create a abstract dag
dax = ADAG("split")
# Add some workflow-level metadata
dax.metadata("creator", "%s@%s" % (USER, os.uname()[1]))
dax.metadata("created", time.ctime())
webpage = File("pegasus.html")
# the split job that splits the webpage into smaller chunks
split = Job("split")
split.addArguments("-l","100","-a","1",webpage,"part.")
split.uses(webpage, link=Link.INPUT)
# associate the label with the job. all jobs with same label
# are run with PMC when doing job clustering
split.addProfile( Profile("pegasus","label","p1"))
dax.addJob(split)
# we do a parmeter sweep on the first 4 chunks created
for c in "abcd":
part = File("part.%s" % c)
split.uses(part, link=Link.OUTPUT, transfer=False, register=False)
count = File("count.txt.%s" % c)
wc = Job("wc")
wc.addProfile( Profile("pegasus","label","p1"))
wc.addArguments("-l",part)
wc.setStdout(count)
wc.uses(part, link=Link.INPUT)
wc.uses(count, link=Link.OUTPUT, transfer=True, register=True)
dax.addJob(wc)
#adding dependency
dax.depends(wc, split)
f = open(daxfile, "w")
dax.writeXML(f)
f.close()
print "Generated dax %s" %daxfile
| 25.684211 | 70 | 0.672814 |
a147e22d5aeaabe35ccc4c56ea5539f536e24407 | 3,685 | py | Python | lbrynet/wallet/ledger.py | ttkopec/lbry | 03415415ed397730e6f691f527f51b429a834ed5 | [
"MIT"
] | null | null | null | lbrynet/wallet/ledger.py | ttkopec/lbry | 03415415ed397730e6f691f527f51b429a834ed5 | [
"MIT"
] | 110 | 2018-11-26T05:41:35.000Z | 2021-08-03T15:37:20.000Z | lbrynet/wallet/ledger.py | ttkopec/lbry | 03415415ed397730e6f691f527f51b429a834ed5 | [
"MIT"
] | 1 | 2018-09-20T22:15:59.000Z | 2018-09-20T22:15:59.000Z | import logging
from six import int2byte
from binascii import unhexlify
from twisted.internet import defer
from .resolve import Resolver
from lbryschema.error import URIParseError
from lbryschema.uri import parse_lbry_uri
from torba.baseledger import BaseLedger
from .account import Account
from .network import Network
from .database import WalletDatabase
from .transaction import Transaction
from .header import Headers, UnvalidatedHeaders
log = logging.getLogger(__name__)
| 34.12037 | 101 | 0.735414 |
a14898fc9eb718d11bd7d8fbc8f0101300add0a6 | 297 | py | Python | MATA37-ILP 2021.2/JUDE/Lista 3 e Prova 3 - Loop/lista3_D.py | jeffersonraimon/Programming-UFBA | 6a6803bfd0e6aa72f8c2b9ffa120792d73c727ca | [
"MIT"
] | 1 | 2021-12-09T12:55:56.000Z | 2021-12-09T12:55:56.000Z | MATA37-ILP 2021.2/JUDE/Lista 3 e Prova 3 - Loop/lista3_D.py | jeffersonraimon/Programming-UFBA | 6a6803bfd0e6aa72f8c2b9ffa120792d73c727ca | [
"MIT"
] | null | null | null | MATA37-ILP 2021.2/JUDE/Lista 3 e Prova 3 - Loop/lista3_D.py | jeffersonraimon/Programming-UFBA | 6a6803bfd0e6aa72f8c2b9ffa120792d73c727ca | [
"MIT"
] | 1 | 2022-02-21T12:01:53.000Z | 2022-02-21T12:01:53.000Z | T = int(input())
P = int(input())
controle = 0 #Uso para guardar o valor maior que o limite
while P != 0:
P = int(input())
if P >= T:
controle = 1 #coloquei 1 so pra ser diferente de 0
if controle == 1:
print("ALARME")
else:
print("O Havai pode dormir tranquilo") | 14.142857 | 58 | 0.592593 |
a1489d0338a6be1fe32c5e1421435901d7f812f7 | 1,387 | py | Python | dopamine/fetch_cam_train/fetch_cam/test/fetch_dis_error.py | kbehouse/dopamine | 1922481d9c23d6c3cf3ee3ec06e613c6eb87cbc1 | [
"Apache-2.0"
] | null | null | null | dopamine/fetch_cam_train/fetch_cam/test/fetch_dis_error.py | kbehouse/dopamine | 1922481d9c23d6c3cf3ee3ec06e613c6eb87cbc1 | [
"Apache-2.0"
] | null | null | null | dopamine/fetch_cam_train/fetch_cam/test/fetch_dis_error.py | kbehouse/dopamine | 1922481d9c23d6c3cf3ee3ec06e613c6eb87cbc1 | [
"Apache-2.0"
] | null | null | null | import numpy as np
import gym
import time
from matplotlib import pyplot as plt
from fetch_cam import FetchCameraEnv
from fsm import FSM
dis_tolerance = 0.0001 # 1mm
env = FetchCameraEnv()
obs = env.reset()
done = False
want_pos = (obs['eeinfo'][0]).copy()
ori_pos = (obs['eeinfo'][0]).copy()
print('---ori_pos = ' , obs['eeinfo'][0],'----')
step = 0
robot_step = 0
s_time = time.time()
while True:
# env.render()
now_pos = obs['eeinfo'][0]
dis = np.linalg.norm(now_pos - want_pos)
print('dis = ',dis)
if dis < dis_tolerance:
x, y, z, g = 0.01, 0.01, 0.01, 0.
want_pos = obs['eeinfo'][0] + np.array([x, y, z])
print('want_pos =' , want_pos)
step +=1
if step>=11:
break
else:
x, y, z, g = 0., 0.0, 0., 0.
a = np.array([x, y, z, g])
obs, r, done, info = env.step(a)
robot_step +=1
if abs(x) > 0 or abs(y) > 0 or abs(z) > 0 :
diff_x = obs['eeinfo'][0] - want_pos
# print("pre_obs['eeinfo'][0] = ", pre_x)
print("obs['eeinfo'][0] = {}, diff_x={}".format( obs['eeinfo'][0], diff_x) )
# time.sleep(0.5)
print('---final_pos = ' , obs['eeinfo'][0],'----')
print('---pos_diff = ' , obs['eeinfo'][0] - ori_pos,'----')
print('step = {}, robot_step={}'.format(step, robot_step))
print('use time = {:.2f}'.format(time.time()-s_time)) | 24.333333 | 85 | 0.539293 |
a1490edf966fa802ac0a01963e5d3d0e3138778b | 5,091 | py | Python | pyHarvest_build_151223/pyHarvest_Analyse_Data_v1.py | bl305/pyHarvest | d4c62d443ca657f9d31245c3c3f24c741cf2ae0b | [
"CC0-1.0"
] | null | null | null | pyHarvest_build_151223/pyHarvest_Analyse_Data_v1.py | bl305/pyHarvest | d4c62d443ca657f9d31245c3c3f24c741cf2ae0b | [
"CC0-1.0"
] | null | null | null | pyHarvest_build_151223/pyHarvest_Analyse_Data_v1.py | bl305/pyHarvest | d4c62d443ca657f9d31245c3c3f24c741cf2ae0b | [
"CC0-1.0"
] | null | null | null | # coding=utf-8
from packages import *
import os
#SET PARAMETERS
myverbosity=-1
mymaxencode=5
TXT_filetypes=(
#simple text files
'txt','lst',
#config files
'ini','cfg',
#programming languages
'c','cpp',
#scripts
'vbs','py','pl')
XLS_filetypes=('xls','xlsx')
DOC_filetypes=('doc',)
DOCX_filetypes=('docx',)
PDF_filetypes=('pdf',)
#TEMPLATE FILES
myXLSpath=r'c:\LENBAL\Trainings\Securitytube_Python_Expert_PRIVATE\My_Network_Discovery_Project\Main_Program\AllTestFiles\XLS\test.xlsx'
myTXTpath=r'c:\LENBAL\Trainings\Securitytube_Python_Expert_PRIVATE\My_Network_Discovery_Project\Main_Program\AllTestFiles\TXT\normal.txt'
#myTXTpath=r'c:\LENBAL\Trainings\Securitytube_Python_Expert_PRIVATE\My_Network_Discovery_Project\Main_Program\AllTestFiles\TXT\unicode.txt'
#myTXTpath=r'c:\LENBAL\Trainings\Securitytube_Python_Expert_PRIVATE\My_Network_Discovery_Project\Main_Program\AllTestFiles\TXT\unicode_big.txt'
#myTXTpath=r'c:\LENBAL\Trainings\Securitytube_Python_Expert_PRIVATE\My_Network_Discovery_Project\Main_Program\AllTestFiles\TXT\unicode_utf8.txt'
#myTXTpath=r'c:\LENBAL\Trainings\Securitytube_Python_Expert_PRIVATE\My_Network_Discovery_Project\Main_Program\AllTestFiles\TXT\x.txt'
#myPDFpath=r'c:\LENBAL\Trainings\Securitytube_Python_Expert_PRIVATE\My_Network_Discovery_Project\Main_Program\AllTestFiles\PDF\test.pdf'
#myPDFpath=r'c:\LENBAL\Trainings\Securitytube_Python_Expert_PRIVATE\My_Network_Discovery_Project\Main_Program\AllTestFiles\PDF\xtest.pdf'
myPDFpath=r'c:\LENBAL\Trainings\Securitytube_Python_Expert_PRIVATE\My_Network_Discovery_Project\Main_Program\AllTestFiles\PDF\ztest.pdf'
myDOCpath=r'c:\LENBAL\Trainings\Securitytube_Python_Expert_PRIVATE\My_Network_Discovery_Project\Main_Program\AllTestFiles\DOC\xtest.doc'
myDOCXpath=r'c:\LENBAL\Trainings\Securitytube_Python_Expert_PRIVATE\My_Network_Discovery_Project\Main_Program\AllTestFiles\DOC\xtest.docx'
mydirpath=r'c:\LENBAL\Trainings\Securitytube_Python_Expert_PRIVATE\My_Network_Discovery_Project\Main_Program\AllTestFiles'
#mydirpath=r'c:\LENBAL\Trainings\Securitytube_Python_Expert_PRIVATE\My_Network_Discovery_Project\Main_Program\DataGathered'
#mypath=myTXTpath
#mypath=myXLSpath
#mypath=myPDFpath
#mypath=myDOCpath
#mypath=myDOCXpath
#PROGRAM START
#print "##########################Main Program Started##########################"
#ANALYSE A SPECIFIC FILE
#process_myfile(mypath)
#ANALYSE ALL FILES IN A SPECIFIED DIRECTORY
filesindir=process_localdir(mydirpath,1)
Analysisconn, Analysisc = db_connect(Analysis_sqlite_file)
create_host_db(Analysisconn, Analysis_create_script,print_out=False)
filecount=len(filesindir)
filecounter=1
if filecount==0:
print "No files to analyse"
for fn in range(len(filesindir)):
mytext=process_myfile(filesindir[fn])
print "Analysing file %d/%d %s"%(filecounter,filecount,filesindir[fn])
filecounter+=1
if mytext:
ftype=mytext[1]
mytextdata=mytext[0]
insert_analysis_data(Analysisc,Analysis_table_name,mytextdata,ftype,print_out=False)
db_commit(Analysisconn)
pass
db_commit(Analysisconn)
db_close(Analysisconn)
print (raw_input('Press Enter to Exit!')) | 36.891304 | 144 | 0.792772 |
a1499e6c4207a38f095d2e507e2c6116418ae733 | 2,732 | py | Python | functions/update_modeling_results.py | zheng-da/covid19-severity-prediction | 205ab5aa13a5e91a4c23ccd73e65939e4003626b | [
"MIT"
] | 2 | 2020-05-15T14:42:02.000Z | 2020-05-22T08:51:47.000Z | functions/update_modeling_results.py | rahul263-stack/covid19-severity-prediction | f581adb2fccb12d5ab3f3c59ee120f484703edf5 | [
"MIT"
] | null | null | null | functions/update_modeling_results.py | rahul263-stack/covid19-severity-prediction | f581adb2fccb12d5ab3f3c59ee120f484703edf5 | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
from os.path import join as oj
import os
import pygsheets
import pandas as pd
import sys
import inspect
from datetime import datetime, timedelta
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.append(parentdir)
sys.path.append(parentdir + '/modeling')
import load_data
from fit_and_predict import fit_and_predict_ensemble
from functions import merge_data
from viz import viz_interactive
import matplotlib.pyplot as plt
import plotly.express as px
import plotly
if __name__ == '__main__':
print('loading data...')
NUM_DAYS_LIST = [1, 2, 3, 4, 5, 6, 7]
df_county = load_data.load_county_level(data_dir=oj(parentdir, 'data'))
num_days_in_past = 3
output_key = f'Predicted Deaths {num_days_in_past}-day'
df_county = fit_and_predict_ensemble(df_county,
outcome='deaths',
mode='eval_mode',
target_day=np.array([num_days_in_past]),
output_key=output_key)
df_county[output_key] = [v[0] for v in df_county[output_key].values]
predictions_plot(df_county, NUM_DAYS_LIST, num_days_in_past, output_key) | 35.947368 | 100 | 0.625915 |
a14c5c58cf2881b62cfe95e034f42cf5c934399c | 4,582 | py | Python | zun/tests/unit/common/test_rpc.py | wanghuiict/zun | 2f4a3a2ba06d7ca83002418d4003ee5dece70952 | [
"Apache-2.0"
] | 83 | 2016-09-14T22:06:26.000Z | 2022-01-27T03:49:52.000Z | zun/tests/unit/common/test_rpc.py | wanghuiict/zun | 2f4a3a2ba06d7ca83002418d4003ee5dece70952 | [
"Apache-2.0"
] | 2 | 2017-06-22T21:58:47.000Z | 2019-04-10T03:17:44.000Z | zun/tests/unit/common/test_rpc.py | wanghuiict/zun | 2f4a3a2ba06d7ca83002418d4003ee5dece70952 | [
"Apache-2.0"
] | 54 | 2016-09-29T10:16:02.000Z | 2022-01-28T19:12:49.000Z | # Copyright 2017 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from oslo_serialization import jsonutils as json
from zun.common import context
from zun.common import rpc
from zun.tests import base
| 34.19403 | 78 | 0.654518 |
a14da1829b09a4bac353d3762281e3ef271e99d4 | 26,935 | py | Python | skidl/Pin.py | arjenroodselaar/skidl | 0bf801bd3b74e6ef94bd9aa1b68eef756b568276 | [
"MIT"
] | null | null | null | skidl/Pin.py | arjenroodselaar/skidl | 0bf801bd3b74e6ef94bd9aa1b68eef756b568276 | [
"MIT"
] | null | null | null | skidl/Pin.py | arjenroodselaar/skidl | 0bf801bd3b74e6ef94bd9aa1b68eef756b568276 | [
"MIT"
] | 1 | 2020-09-21T23:31:41.000Z | 2020-09-21T23:31:41.000Z | # -*- coding: utf-8 -*-
# MIT license
#
# Copyright (C) 2018 by XESS Corp.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
Handles part pins.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from builtins import range, super
from collections import defaultdict
from copy import copy
from enum import IntEnum
from future import standard_library
from .Alias import *
from .baseobj import SkidlBaseObject
from .defines import *
from .logger import erc_logger, logger
from .utilities import *
standard_library.install_aliases()
__nonzero__ = __bool__ # Python 2 compatibility.
##############################################################################
##############################################################################
##############################################################################
# This will make all the Pin.drive members into attributes of the Pin class
# so things like Pin.INPUT will work as well as Pin.types.INPUT.
Pin.add_type()
# Create the pin conflict matrix as a defaultdict of defaultdicts which
# returns OK if the given element is not in the matrix. This would indicate
# the pin types used to index that element have no contention if connected.
conflict_matrix = defaultdict(lambda: defaultdict(lambda: [OK, ""]))
# Add the non-OK pin connections to the matrix.
conflict_matrix[Pin.types.OUTPUT][Pin.types.OUTPUT] = [ERROR, ""]
conflict_matrix[Pin.types.TRISTATE][Pin.types.OUTPUT] = [WARNING, ""]
conflict_matrix[Pin.types.UNSPEC][Pin.types.INPUT] = [WARNING, ""]
conflict_matrix[Pin.types.UNSPEC][Pin.types.OUTPUT] = [WARNING, ""]
conflict_matrix[Pin.types.UNSPEC][Pin.types.BIDIR] = [WARNING, ""]
conflict_matrix[Pin.types.UNSPEC][Pin.types.TRISTATE] = [WARNING, ""]
conflict_matrix[Pin.types.UNSPEC][Pin.types.PASSIVE] = [WARNING, ""]
conflict_matrix[Pin.types.UNSPEC][Pin.types.PULLUP] = [WARNING, ""]
conflict_matrix[Pin.types.UNSPEC][Pin.types.PULLDN] = [WARNING, ""]
conflict_matrix[Pin.types.UNSPEC][Pin.types.UNSPEC] = [WARNING, ""]
conflict_matrix[Pin.types.PWRIN][Pin.types.TRISTATE] = [WARNING, ""]
conflict_matrix[Pin.types.PWRIN][Pin.types.UNSPEC] = [WARNING, ""]
conflict_matrix[Pin.types.PWROUT][Pin.types.OUTPUT] = [ERROR, ""]
conflict_matrix[Pin.types.PWROUT][Pin.types.BIDIR] = [WARNING, ""]
conflict_matrix[Pin.types.PWROUT][Pin.types.TRISTATE] = [ERROR, ""]
conflict_matrix[Pin.types.PWROUT][Pin.types.UNSPEC] = [WARNING, ""]
conflict_matrix[Pin.types.PWROUT][Pin.types.PWROUT] = [ERROR, ""]
conflict_matrix[Pin.types.OPENCOLL][Pin.types.OUTPUT] = [ERROR, ""]
conflict_matrix[Pin.types.OPENCOLL][Pin.types.TRISTATE] = [ERROR, ""]
conflict_matrix[Pin.types.OPENCOLL][Pin.types.UNSPEC] = [WARNING, ""]
conflict_matrix[Pin.types.OPENCOLL][Pin.types.PWROUT] = [ERROR, ""]
conflict_matrix[Pin.types.OPENEMIT][Pin.types.OUTPUT] = [ERROR, ""]
conflict_matrix[Pin.types.OPENEMIT][Pin.types.BIDIR] = [WARNING, ""]
conflict_matrix[Pin.types.OPENEMIT][Pin.types.TRISTATE] = [WARNING, ""]
conflict_matrix[Pin.types.OPENEMIT][Pin.types.UNSPEC] = [WARNING, ""]
conflict_matrix[Pin.types.OPENEMIT][Pin.types.PWROUT] = [ERROR, ""]
conflict_matrix[Pin.types.NOCONNECT][Pin.types.INPUT] = [ERROR, ""]
conflict_matrix[Pin.types.NOCONNECT][Pin.types.OUTPUT] = [ERROR, ""]
conflict_matrix[Pin.types.NOCONNECT][Pin.types.BIDIR] = [ERROR, ""]
conflict_matrix[Pin.types.NOCONNECT][Pin.types.TRISTATE] = [ERROR, ""]
conflict_matrix[Pin.types.NOCONNECT][Pin.types.PASSIVE] = [ERROR, ""]
conflict_matrix[Pin.types.NOCONNECT][Pin.types.PULLUP] = [ERROR, ""]
conflict_matrix[Pin.types.NOCONNECT][Pin.types.PULLDN] = [ERROR, ""]
conflict_matrix[Pin.types.NOCONNECT][Pin.types.UNSPEC] = [ERROR, ""]
conflict_matrix[Pin.types.NOCONNECT][Pin.types.PWRIN] = [ERROR, ""]
conflict_matrix[Pin.types.NOCONNECT][Pin.types.PWROUT] = [ERROR, ""]
conflict_matrix[Pin.types.NOCONNECT][Pin.types.OPENCOLL] = [ERROR, ""]
conflict_matrix[Pin.types.NOCONNECT][Pin.types.OPENEMIT] = [ERROR, ""]
conflict_matrix[Pin.types.NOCONNECT][Pin.types.NOCONNECT] = [ERROR, ""]
conflict_matrix[Pin.types.PULLUP][Pin.types.PULLUP] = [
WARNING,
"Multiple pull-ups connected.",
]
conflict_matrix[Pin.types.PULLDN][Pin.types.PULLDN] = [
WARNING,
"Multiple pull-downs connected.",
]
conflict_matrix[Pin.types.PULLUP][Pin.types.PULLDN] = [
ERROR,
"Pull-up connected to pull-down.",
]
# Fill-in the other half of the symmetrical contention matrix by looking
# for entries that != OK at position (r,c) and copying them to position
# (c,r).
cols = list(conflict_matrix.keys())
for c in cols:
for r in list(conflict_matrix[c].keys()):
conflict_matrix[r][c] = conflict_matrix[c][r]
| 34.57638 | 87 | 0.569742 |
a14dc76d87023f8e5ab3f4a7babd9708c41bf004 | 34,030 | py | Python | Project1/cl1_p1_wsd.py | Sanghyun-Hong/NLPProjects | 9f81fa680946648f64ac25e5ca8197e9f3386deb | [
"MIT"
] | null | null | null | Project1/cl1_p1_wsd.py | Sanghyun-Hong/NLPProjects | 9f81fa680946648f64ac25e5ca8197e9f3386deb | [
"MIT"
] | null | null | null | Project1/cl1_p1_wsd.py | Sanghyun-Hong/NLPProjects | 9f81fa680946648f64ac25e5ca8197e9f3386deb | [
"MIT"
] | null | null | null | import numpy as np
import operator
# SHHONG: custom modules imported
import json
import random
import itertools
from math import pow, log
from collections import Counter
import os
import sys
sys.stdout = open(os.devnull, 'w')
"""
CMSC723 / INST725 / LING723 -- Fall 2016
Project 1: Implementing Word Sense Disambiguation Systems
"""
"""
read one of train, dev, test subsets
subset - one of train, dev, test
output is a tuple of three lists
labels: one of the 6 possible senses <cord, division, formation, phone, product, text >
targets: the index within the text of the token to be disambiguated
texts: a list of tokenized and normalized text input (note that there can be multiple sentences)
"""
import nltk
#### added dev_manual to the subset of allowable files
"""
computes f1-score of the classification accuracy
gold_labels - is a list of the gold labels
predicted_labels - is a list of the predicted labels
output is a tuple of the micro averaged score and the macro averaged score
"""
import sklearn.metrics
#### changed method name from eval because of naming conflict with python keyword
"""
a helper method that takes a list of predictions and writes them to a file (1 prediction per line)
predictions - list of predictions (strings)
file_name - name of the output file
"""
"""
Trains a naive bayes model with bag of words features and computes the accuracy on the test set
train_texts, train_targets, train_labels are as described in read_dataset above
The same thing applies to the reset of the parameters.
"""
## extract all the distinct words from a set of texts
## return a dictionary {word:index} that maps each word to a unique index
## extract all distinct labels from a dataset
## return a dictionary {label:index} that maps each label to a unique index
## construct a bow feature matrix for a set of instances
## the returned matrix has the size NUM_INSTANCES X NUM_FEATURES
## compute the feature vector for a set of words and a given label
## the features are computed as described in Slide #19 of:
## http://www.cs.umd.edu/class/fall2016/cmsc723/slides/slides_02.pdf
## get the predicted label for a given instance
## the predicted label is the one with the highest dot product of theta*feature_vector
## return the predicted label, the dot product scores for all labels and the features computed for all labels for that instance
## train the perceptron by iterating over the entire training dataset
## the algorithm is an implementation of the pseudocode from Slide #23 of:
## http://www.cs.umd.edu/class/fall2016/cmsc723/slides/slides_03.pdf
## return the predictions of the perceptron on a test set
"""
Trains a perceptron model with bag of words features and computes the accuracy on the test set
train_texts, train_targets, train_labels are as described in read_dataset above
The same thing applies to the reset of the parameters.
"""
"""
Trains a naive bayes model with bag of words features + two additional features
and computes the accuracy on the test set
train_texts, train_targets, train_labels are as described in read_dataset above
The same thing applies to the reset of the parameters.
"""
## this feature is just a random number generated for each instance
## this feature encodes the number of distinct words in each instance
"""
Trains a perceptron model with bag of words features + two additional features
and computes the accuracy on the test set
train_texts, train_targets, train_labels are as described in read_dataset above
The same thing applies to the reset of the parameters.
"""
# Part 1.1
# Part 1.2
"""
Main (able to change the classifier to other ones)
"""
if __name__ == "__main__":
# reading, tokenizing, and normalizing data
train_labels, train_targets, train_texts = read_dataset('train')
dev_labels, dev_targets, dev_texts = read_dataset('dev')
test_labels, test_targets, test_texts = read_dataset('test')
#running the classifier
test_scores = run_bow_perceptron_classifier(train_texts, train_targets, train_labels,
dev_texts, dev_targets, dev_labels, test_texts, test_targets, test_labels)
print test_scores
| 43.075949 | 211 | 0.614634 |
a14fb8c57a2911a94e991dd47b577ec949e53771 | 640 | py | Python | Week 7 Web pages/Task05.py | retverd/python_hse | cb9bfb092c1cf68ae0c53b9919ca24a71a8cbf88 | [
"MIT"
] | null | null | null | Week 7 Web pages/Task05.py | retverd/python_hse | cb9bfb092c1cf68ae0c53b9919ca24a71a8cbf88 | [
"MIT"
] | null | null | null | Week 7 Web pages/Task05.py | retverd/python_hse | cb9bfb092c1cf68ae0c53b9919ca24a71a8cbf88 | [
"MIT"
] | null | null | null | #
# https://stepik.org/media/attachments/lesson/209717/1.html
#
# , Python C++ (
# ). Python C++ .
from urllib.request import urlopen
response = urlopen('https://stepik.org/media/attachments/lesson/209717/1.html')
html = response.read().decode('utf-8')
c = html.count('C++')
p = html.count('Python')
if c > p:
print('C++')
else:
print('Python')
| 30.47619 | 117 | 0.739063 |
a150c0cbc599ebc411b4f81c6fa3b0405cf1395b | 31,794 | py | Python | tests/test_bio/test_cell.py | jfaccioni/clovars | 64e24286a2dc185490384aeb08027d88eb9462c4 | [
"MIT"
] | null | null | null | tests/test_bio/test_cell.py | jfaccioni/clovars | 64e24286a2dc185490384aeb08027d88eb9462c4 | [
"MIT"
] | null | null | null | tests/test_bio/test_cell.py | jfaccioni/clovars | 64e24286a2dc185490384aeb08027d88eb9462c4 | [
"MIT"
] | null | null | null | import unittest
from unittest import mock
from unittest.mock import MagicMock
from clovars.abstract import Circle
from clovars.bio import Cell, Treatment
from clovars.scientific import ConstantCellSignal, CellSignal, GaussianCellSignal, Gaussian
from clovars.utils import SimulationError
from tests import NotEmptyTestCase
if __name__ == '__main__':
unittest.main()
| 53.345638 | 120 | 0.686482 |
a151ad0affbfcc7813c745ba76d87908fc3a227a | 2,959 | py | Python | nutsml/examples/pytorch_/mnist/mlp_train.py | maet3608/nuts-ml | 2551612a47bc6e9efa534eda0db5d8c5def51887 | [
"Apache-2.0"
] | 39 | 2017-02-07T03:22:41.000Z | 2021-11-24T20:27:57.000Z | nutsml/examples/pytorch_/mnist/mlp_train.py | maet3608/nuts-ml | 2551612a47bc6e9efa534eda0db5d8c5def51887 | [
"Apache-2.0"
] | 19 | 2017-02-13T22:22:30.000Z | 2019-01-31T04:13:39.000Z | nutsml/examples/pytorch_/mnist/mlp_train.py | maet3608/nuts-ml | 2551612a47bc6e9efa534eda0db5d8c5def51887 | [
"Apache-2.0"
] | 13 | 2017-06-01T13:44:54.000Z | 2020-09-08T04:51:36.000Z | """
.. module:: cnn_train
:synopsis: Example nuts-ml pipeline for training a MLP on MNIST
"""
import torch
import torch.nn.functional as F
import torch.nn as nn
import torch.optim as optim
import nutsflow as nf
import nutsml as nm
import numpy as np
from nutsml.network import PytorchNetwork
from utils import download_mnist, load_mnist
def accuracy(y_true, y_pred):
"""Compute accuracy"""
from sklearn.metrics import accuracy_score
y_pred = [yp.argmax() for yp in y_pred]
return 100 * accuracy_score(y_true, y_pred)
def evaluate(network, x, y):
"""Evaluate network performance (here accuracy)"""
metrics = [accuracy]
build_batch = (nm.BuildBatch(64)
.input(0, 'vector', 'float32')
.output(1, 'number', 'int64'))
acc = zip(x, y) >> build_batch >> network.evaluate(metrics)
return acc
def train(network, epochs=3):
"""Train network for given number of epochs"""
print('loading data...')
filepath = download_mnist()
x_train, y_train, x_test, y_test = load_mnist(filepath)
plot = nm.PlotLines(None, every_sec=0.2)
build_batch = (nm.BuildBatch(128)
.input(0, 'vector', 'float32')
.output(1, 'number', 'int64'))
for epoch in range(epochs):
print('epoch', epoch + 1)
losses = (zip(x_train, y_train) >> nf.PrintProgress(x_train) >>
nf.Shuffle(1000) >> build_batch >>
network.train() >> plot >> nf.Collect())
acc_test = evaluate(network, x_test, y_test)
acc_train = evaluate(network, x_train, y_train)
print('train loss : {:.6f}'.format(np.mean(losses)))
print('train acc : {:.1f}'.format(acc_train))
print('test acc : {:.1f}'.format(acc_test))
if __name__ == '__main__':
print('creating model...')
device = 'cuda' if torch.cuda.is_available() else 'cpu'
model = Model(device)
network = PytorchNetwork(model)
# network.load_weights()
network.print_layers((28 * 28,))
print('training network...')
train(network, epochs=3)
| 31.147368 | 76 | 0.613045 |
a152a29b6edc8d593cb4451e6903d733b234650c | 2,317 | py | Python | get_image.py | DanielJamesEvans/spectrophotometer_code | 10957590a4b49fe91ec6a0111ef83da63cc4ee67 | [
"MIT"
] | 3 | 2019-08-31T16:43:10.000Z | 2019-10-07T20:35:13.000Z | get_image.py | DanielJamesEvans/spectrophotometer_code | 10957590a4b49fe91ec6a0111ef83da63cc4ee67 | [
"MIT"
] | null | null | null | get_image.py | DanielJamesEvans/spectrophotometer_code | 10957590a4b49fe91ec6a0111ef83da63cc4ee67 | [
"MIT"
] | 1 | 2019-08-31T19:10:40.000Z | 2019-08-31T19:10:40.000Z | """This code contains functions called by gui.py.
This software is licensed under the MIT license.
"""
import time
from picamera.array import PiRGBArray
from picamera import PiCamera
from gpiozero import LED
import numpy as np
from PIL import Image
__author__ = "Daniel James Evans"
__copyright__ = "Copyright 2019, Daniel James Evans"
__license__ = "MIT"
camera = PiCamera()
camera.resolution = (640, 480)
camera.framerate = 24
time.sleep(0.5)
def get_color_image():
"""Take a color image using the camera. Return as a numpy array."""
led = LED(4)
led.on()
output = np.empty((480, 640, 3), dtype=np.uint8)
camera.capture(output, "rgb")
led.off()
return output
def get_bw_image():
"""Return a numpy array of a grayscale image from the camera.
I couldn't figure out the proper way
to do this, so the function saves the image as bw.png.
The function takes multiple pictures and averages the values from
each picture. This is done to reduce noise."""
led = LED(4)
led.on()
# I couldn't find a way for the
# camera to pass a grayscale
# image directly to numpy. So
# the code saves a grayscale
# image file then reads it.
camera.color_effects = (128, 128)
camera.capture("bw.png")
image_pil = Image.open("bw.png")
image_arr_1 = np.array(image_pil)
time.sleep(0.1)
camera.capture("bw.png")
image_pil = Image.open("bw.png")
image_arr_2 = np.array(image_pil)
time.sleep(0.1)
camera.capture("bw.png")
image_pil = Image.open("bw.png")
image_arr_3 = np.array(image_pil)
time.sleep(0.1)
camera.capture("bw.png")
image_pil = Image.open("bw.png")
image_arr_4 = np.array(image_pil)
time.sleep(0.1)
camera.capture("bw.png")
image_pil = Image.open("bw.png")
image_arr_5 = np.array(image_pil)
image_arr = (image_arr_1.astype(np.int16) + image_arr_2.astype(np.int16) +
image_arr_3.astype(np.int16) + image_arr_4.astype(np.int16) +
image_arr_5.astype(np.int16)) / 5
image_arr = image_arr.astype(np.uint8)
camera.color_effects = None
led.off()
# Each pixel has 3 values (plus a 4th).
# But the values are identical
# (+/- 1) because of camera.color_effects.
return image_arr[:, :, 1]
| 25.461538 | 79 | 0.662063 |
a1536df44cebf44b8ca6b21340ed07ba5ea74a42 | 15,346 | py | Python | rave_ec/Lib/ec_mcgill.py | DanielMichelson/drqc_article | cd7df2f7290adedb557bbc6ba484d30039a23ce2 | [
"CC-BY-4.0"
] | null | null | null | rave_ec/Lib/ec_mcgill.py | DanielMichelson/drqc_article | cd7df2f7290adedb557bbc6ba484d30039a23ce2 | [
"CC-BY-4.0"
] | null | null | null | rave_ec/Lib/ec_mcgill.py | DanielMichelson/drqc_article | cd7df2f7290adedb557bbc6ba484d30039a23ce2 | [
"CC-BY-4.0"
] | null | null | null | '''
Copyright (C) 2016 The Crown (i.e. Her Majesty the Queen in Right of Canada)
This file is an add-on to RAVE.
RAVE is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
RAVE is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with RAVE. If not, see <http://www.gnu.org/licenses/>.
'''
##
# McGill format reader
# McGill indices are base 1, except the bin_number!
##
# @file
# @author Daniel Michelson, Environment and Climate Change Canada
# @date 2016-01-22
import time
import _rave, _raveio
import _polarvolume, _polarscan, _polarscanparam
from Proj import dr
from numpy import *
HEADER_LENGTH = 4096
RECORD_LENGTH = 2048
SEGMENT_LENGTH = 19
SEGMENTS = 107
NRAYS = 360
SCANT = 10 # Time in seconds to acquire a sweep.
QUANTITIES = {1 : "DBZH", 4 : "VRADH", 16 : "ZDR", 17 : "PHIDP",
18 : "RHOHV", 19 : "KDP"} # Only 1 and 4 are available
# esteps are the times in seconds between tilts in the ascending scan strategy
# These are real times from an acquisition in April 2012. They are used to
# adjust the timing metadata backwards, as McGill timestamps the end of data
# acquisition. They are indicative only, but the best we can do.
esteps = (0.921875, 0.914062, 0.914062, 1.04688, 0.976562, 1.00000, 0.984375,
1.02344, 1.47656, 1.33594, 1.17188, 1.71094, 2.17188, 2.82812,
3.12500, 3.32031, 3.71875, 3.92969, 4.44531, 4.83594, 5.13281,
5.22656, 5.29688, 0.0) # Last value is a dummy
## Empty generic container, to be populated
# @param object
## Is this a McGill file?
# @param string containing the input file name
# @returns True if the file is a McGill file, otherwise False
def isMcGill(filename):
fd = open(filename)
s = fd.read(6)
fd.close()
return s == "mcgill"
## Reads the contents of a McGill file, according to
# http://deneb.tor.ec.gc.ca/urpdoc/reference/science/mcgill_volume_scan.html
# Attribute naming follows this document.
# The generic container is used to represent the contents of the file as:
# mobj : top-level McGill() object
# mobj.logical_records : a list of McGill objects containing one logical record each
# mobj.logical_records[index].segments : a list of 107 McGill objects, each
# representing a segment
# @param string input file name
# @returns McGill object representing the file contents
## Takes the output of readMcGill and creates contiguous scans of data.
# This is done by pasting the contents of each McGill segment into the
# equivalent position in the corresponding contiguous scan.
# @param McGill object representing file contents
## McGill data times are the end of data acquisition. This function guestimates
# the beginning dates and times of each scan in the volume.
# @param McGill object representing file contents
## Creates a PVOL from the McGill object
# @param McGill object representing file contents
# @returns BALTRAD/ODIM PVOL object
## Each PVOL contains only one moment, so merge several of these into one.
# Assume the first PVOL contains DBZH and the second VRADH.
# @param list of (two) PVOLs
# @returns PVOL object containing (both) moments per scan.
## Reads McGill data from file and returns a BALTRAD/ODIM PVOL object for a
# single moment
# @param string of McGill file
# @returns PVOL object containing one moment for each scan.
## Reads McGill data from two files into a single BALTRAD/ODIM PVOL
# @param string of the McGill file containing reflectivity (DBZH)
# @param string of the McGill file containing radial wind velocity (VRADH)
# @returns PVOL object containing both moments per scan
if __name__=="__main__":
pass
| 37.891358 | 84 | 0.6373 |
a1537d70484481dc31d44d35ec4975bba8b264f5 | 1,038 | py | Python | product/migrations/0001_initial.py | dnetochaves/e-commerce | 97c2266934b6db883d520381520130b0472e9db4 | [
"MIT"
] | null | null | null | product/migrations/0001_initial.py | dnetochaves/e-commerce | 97c2266934b6db883d520381520130b0472e9db4 | [
"MIT"
] | null | null | null | product/migrations/0001_initial.py | dnetochaves/e-commerce | 97c2266934b6db883d520381520130b0472e9db4 | [
"MIT"
] | null | null | null | # Generated by Django 3.1.4 on 2020-12-27 15:03
from django.db import migrations, models
| 35.793103 | 122 | 0.575145 |
a155e11f0e425a96e53ea2166d51415855a2b463 | 921 | py | Python | src/python/setup.py | Basasuya/tsne-cuda | dc518acd9fdf9109952ffe57d6cf12363e3ffd2c | [
"BSD-3-Clause"
] | 2 | 2021-04-30T16:48:47.000Z | 2021-05-21T08:49:13.000Z | src/python/setup.py | Basasuya/tsne-cuda | dc518acd9fdf9109952ffe57d6cf12363e3ffd2c | [
"BSD-3-Clause"
] | null | null | null | src/python/setup.py | Basasuya/tsne-cuda | dc518acd9fdf9109952ffe57d6cf12363e3ffd2c | [
"BSD-3-Clause"
] | 1 | 2021-04-25T23:11:05.000Z | 2021-04-25T23:11:05.000Z | from setuptools import setup
setup(
name='tsnecuda',
version='2.1.0',
author='Chan, David M., Huang, Forrest., Rao, Roshan.',
author_email='davidchan@berkeley.edu',
packages=['tsnecuda', 'tsnecuda.test'],
package_data={'tsnecuda': ['libtsnecuda.so']},
scripts=[],
url='https://github.com/CannyLab/tsne-cuda',
license='LICENSE.txt',
description='CUDA Implementation of T-SNE with Python bindings',
long_description=open('README.txt').read(),
install_requires=[
'numpy >= 1.14.1',
],
classifiers=[
'Programming Language :: Python :: 3.6',
'Operating System :: POSIX :: Linux',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Artificial Intelligence'
],
keywords=[
'TSNE',
'CUDA',
'Machine Learning',
'AI'
]
)
| 27.909091 | 68 | 0.598263 |
a15747184e94e78f55f7ab475ca0b1abe33741e3 | 107,889 | py | Python | programs/parallels.py | ETCBC/parallells | f45f6cc3c4f933dba6e649f49cdb14a40dcf333f | [
"MIT"
] | 4 | 2017-10-01T05:14:59.000Z | 2020-09-09T09:41:26.000Z | programs/parallels.py | ETCBC/parallells | f45f6cc3c4f933dba6e649f49cdb14a40dcf333f | [
"MIT"
] | null | null | null | programs/parallels.py | ETCBC/parallells | f45f6cc3c4f933dba6e649f49cdb14a40dcf333f | [
"MIT"
] | 1 | 2020-10-16T13:21:51.000Z | 2020-10-16T13:21:51.000Z | #!/usr/bin/env python
# coding: utf-8
# <h1>Table of Contents<span class="tocSkip"></span></h1>
# <div class="toc" style="margin-top: 1em;"><ul class="toc-item"><li><span><a href="#0.1-Motivation" data-toc-modified-id="0.1-Motivation-1"><span class="toc-item-num">1 </span>0.1 Motivation</a></span></li><li><span><a href="#0.3-Open-Source" data-toc-modified-id="0.3-Open-Source-2"><span class="toc-item-num">2 </span>0.3 Open Source</a></span></li><li><span><a href="#0.4-What-are-parallel-passages?" data-toc-modified-id="0.4-What-are-parallel-passages?-3"><span class="toc-item-num">3 </span>0.4 What are parallel passages?</a></span></li><li><span><a href="#0.5-Authors" data-toc-modified-id="0.5-Authors-4"><span class="toc-item-num">4 </span>0.5 Authors</a></span></li><li><span><a href="#0.6-Status" data-toc-modified-id="0.6-Status-5"><span class="toc-item-num">5 </span>0.6 Status</a></span></li><li><span><a href="#2.1-Assessing-the-outcomes" data-toc-modified-id="2.1-Assessing-the-outcomes-6"><span class="toc-item-num">6 </span>2.1 Assessing the outcomes</a></span><ul class="toc-item"><li><span><a href="#2.1.1-Assessment-criteria" data-toc-modified-id="2.1.1-Assessment-criteria-6.1"><span class="toc-item-num">6.1 </span>2.1.1 Assessment criteria</a></span></li></ul></li><li><span><a href="#3.1-Similarity" data-toc-modified-id="3.1-Similarity-7"><span class="toc-item-num">7 </span>3.1 Similarity</a></span><ul class="toc-item"><li><span><a href="#3.1.1-SET" data-toc-modified-id="3.1.1-SET-7.1"><span class="toc-item-num">7.1 </span>3.1.1 SET</a></span></li><li><span><a href="#3.1.2-LCS" data-toc-modified-id="3.1.2-LCS-7.2"><span class="toc-item-num">7.2 </span>3.1.2 LCS</a></span></li></ul></li><li><span><a href="#3.2-Performance" data-toc-modified-id="3.2-Performance-8"><span class="toc-item-num">8 </span>3.2 Performance</a></span></li><li><span><a href="#4.1-Chunking" data-toc-modified-id="4.1-Chunking-9"><span class="toc-item-num">9 </span>4.1 Chunking</a></span><ul class="toc-item"><li><span><a href="#4.1.1-Fixed-chunking" data-toc-modified-id="4.1.1-Fixed-chunking-9.1"><span class="toc-item-num">9.1 </span>4.1.1 Fixed chunking</a></span></li><li><span><a href="#4.1.2-Object-chunking" data-toc-modified-id="4.1.2-Object-chunking-9.2"><span class="toc-item-num">9.2 </span>4.1.2 Object chunking</a></span></li></ul></li><li><span><a href="#4.2-Preparing" data-toc-modified-id="4.2-Preparing-10"><span class="toc-item-num">10 </span>4.2 Preparing</a></span></li><li><span><a href="#4.3-Cliques" data-toc-modified-id="4.3-Cliques-11"><span class="toc-item-num">11 </span>4.3 Cliques</a></span><ul class="toc-item"><li><span><a href="#4.3.1-Organizing-the-cliques" data-toc-modified-id="4.3.1-Organizing-the-cliques-11.1"><span class="toc-item-num">11.1 </span>4.3.1 Organizing the cliques</a></span></li><li><span><a href="#4.3.2-Evaluating-clique-sets" data-toc-modified-id="4.3.2-Evaluating-clique-sets-11.2"><span class="toc-item-num">11.2 </span>4.3.2 Evaluating clique sets</a></span></li></ul></li><li><span><a href="#5.1-Loading-the-feature-data" data-toc-modified-id="5.1-Loading-the-feature-data-12"><span class="toc-item-num">12 </span>5.1 Loading the feature data</a></span></li><li><span><a href="#5.2-Configuration" data-toc-modified-id="5.2-Configuration-13"><span class="toc-item-num">13 </span>5.2 Configuration</a></span></li><li><span><a href="#5.3-Experiment-settings" data-toc-modified-id="5.3-Experiment-settings-14"><span class="toc-item-num">14 </span>5.3 Experiment settings</a></span></li><li><span><a href="#5.4-Chunking" data-toc-modified-id="5.4-Chunking-15"><span class="toc-item-num">15 </span>5.4 Chunking</a></span></li><li><span><a href="#5.5-Preparing" data-toc-modified-id="5.5-Preparing-16"><span class="toc-item-num">16 </span>5.5 Preparing</a></span><ul class="toc-item"><li><span><a href="#5.5.1-Preparing-for-SET-comparison" data-toc-modified-id="5.5.1-Preparing-for-SET-comparison-16.1"><span class="toc-item-num">16.1 </span>5.5.1 Preparing for SET comparison</a></span></li><li><span><a href="#5.5.2-Preparing-for-LCS-comparison" data-toc-modified-id="5.5.2-Preparing-for-LCS-comparison-16.2"><span class="toc-item-num">16.2 </span>5.5.2 Preparing for LCS comparison</a></span></li></ul></li><li><span><a href="#5.6-Similarity-computation" data-toc-modified-id="5.6-Similarity-computation-17"><span class="toc-item-num">17 </span>5.6 Similarity computation</a></span><ul class="toc-item"><li><span><a href="#5.6.1-SET-similarity" data-toc-modified-id="5.6.1-SET-similarity-17.1"><span class="toc-item-num">17.1 </span>5.6.1 SET similarity</a></span></li><li><span><a href="#5.6.2-LCS-similarity" data-toc-modified-id="5.6.2-LCS-similarity-17.2"><span class="toc-item-num">17.2 </span>5.6.2 LCS similarity</a></span></li></ul></li><li><span><a href="#5.7-Cliques" data-toc-modified-id="5.7-Cliques-18"><span class="toc-item-num">18 </span>5.7 Cliques</a></span></li><li><span><a href="#5.7.1-Selecting-passages" data-toc-modified-id="5.7.1-Selecting-passages-19"><span class="toc-item-num">19 </span>5.7.1 Selecting passages</a></span></li><li><span><a href="#5.7.2-Growing-cliques" data-toc-modified-id="5.7.2-Growing-cliques-20"><span class="toc-item-num">20 </span>5.7.2 Growing cliques</a></span></li><li><span><a href="#5.8-Output" data-toc-modified-id="5.8-Output-21"><span class="toc-item-num">21 </span>5.8 Output</a></span><ul class="toc-item"><li><span><a href="#5.8.1-Format-definitions" data-toc-modified-id="5.8.1-Format-definitions-21.1"><span class="toc-item-num">21.1 </span>5.8.1 Format definitions</a></span></li><li><span><a href="#5.8.2-Formatting-clique-lists" data-toc-modified-id="5.8.2-Formatting-clique-lists-21.2"><span class="toc-item-num">21.2 </span>5.8.2 Formatting clique lists</a></span></li><li><span><a href="#5.8.3-Compiling-the-table-of-experiments" data-toc-modified-id="5.8.3-Compiling-the-table-of-experiments-21.3"><span class="toc-item-num">21.3 </span>5.8.3 Compiling the table of experiments</a></span></li><li><span><a href="#5.8.4-High-level-formatting-functions" data-toc-modified-id="5.8.4-High-level-formatting-functions-21.4"><span class="toc-item-num">21.4 </span>5.8.4 High level formatting functions</a></span></li></ul></li><li><span><a href="#5.9-Running-experiments" data-toc-modified-id="5.9-Running-experiments-22"><span class="toc-item-num">22 </span>5.9 Running experiments</a></span></li><li><span><a href="#Discussion" data-toc-modified-id="Discussion-23"><span class="toc-item-num">23 </span>Discussion</a></span></li></ul></div>
# <img align="right" src="images/dans-small.png"/>
# <img align="right" src="images/tf-small.png"/>
# <img align="right" src="images/etcbc.png"/>
#
#
# # Parallel Passages in the MT
#
# # 0. Introduction
#
# ## 0.1 Motivation
# We want to make a list of **all** parallel passages in the Masoretic Text (MT) of the Hebrew Bible.
#
# Here is a quote that triggered Dirk to write this notebook:
#
# > Finally, the Old Testament Parallels module in Accordance is a helpful resource that enables the researcher to examine 435 sets of parallel texts, or in some cases very similar wording in different texts, in both the MT and translation, but the large number of sets of texts in this database should not fool one to think it is complete or even nearly complete for all parallel writings in the Hebrew Bible.
#
# Robert Rezetko and Ian Young.
# Historical linguistics & Biblical Hebrew. Steps Toward an Integrated Approach.
# *Ancient Near East Monographs, Number9*. SBL Press Atlanta. 2014.
# [PDF Open access available](https://www.google.nl/url?sa=t&rct=j&q=&esrc=s&source=web&cd=2&ved=0CCgQFjAB&url=http%3A%2F%2Fwww.sbl-site.org%2Fassets%2Fpdfs%2Fpubs%2F9781628370461_OA.pdf&ei=2QSdVf-vAYSGzAPArJeYCg&usg=AFQjCNFA3TymYlsebQ0MwXq2FmJCSHNUtg&sig2=LaXuAC5k3V7fSXC6ZVx05w&bvm=bv.96952980,d.bGQ)
# <img align="right" width="50%" src="parallel.png"/>
#
# ## 0.3 Open Source
# This is an IPython notebook.
# It contains a working program to carry out the computations needed to obtain the results reported here.
#
# You can download this notebook and run it on your computer, provided you have
# [Text-Fabric](https://github.com/Dans-labs/text-fabric) installed.
#
# It is a pity that we cannot compare our results with the Accordance resource mentioned above,
# since that resource has not been published in an accessible manner.
# We also do not have the information how this resource has been constructed on the basis of the raw data.
# In contrast with that, we present our results in a completely reproducible manner.
# This notebook itself can serve as the method of replication,
# provided you have obtained the necessary resources.
# See [sources](https://github.com/ETCBC/shebanq/wiki/Sources), which are all Open Access.
#
# ## 0.4 What are parallel passages?
# The notion of *parallel passage* is not a simple, straightforward one.
# There are parallels on the basis of lexical content in the passages on the one hand,
# but on the other hand there are also correspondences in certain syntactical structures,
# or even in similarities in text structure.
#
# In this notebook we do select a straightforward notion of parallel, based on lexical content only.
# We investigate two measures of similarity, one that ignores word order completely,
# and one that takes word order into account.
#
# Two kinds of short-comings of this approach must be mentioned:
#
# 1. We will not find parallels based on non-lexical criteria (unless they are also lexical parallels)
# 1. We will find too many parallels: certain short sentences (and he said), or formula like passages (and the word of God came to Moses) occur so often that they have a more subtle bearing on whether there is a common text history.
#
# For a more full treatment of parallel passages, see
#
# **Wido Th. van Peursen and Eep Talstra**:
# Computer-Assisted Analysis of Parallel Texts in the Bible -
# The Case of 2 Kings xviii-xix and its Parallels in Isaiah and Chronicles.
# *Vetus Testamentum* 57, pp. 45-72.
# 2007, Brill, Leiden.
#
# Note that our method fails to identify any parallels with Chronica_II 32.
# Van Peursen and Talstra state about this chapter and 2 Kings 18:
#
# > These chapters differ so much, that it is sometimes impossible to establish
# which verses should be considered parallel.
#
# In this notebook we produce a set of *cliques*,
# a clique being a set of passages that are *quite* similar, based on lexical information.
#
#
# ## 0.5 Authors
# This notebook is by Dirk Roorda and owes a lot to discussions with Martijn Naaijer.
#
# [Dirk Roorda](mailto:dirk.roorda@dans.knaw.nl) while discussing ideas with
# [Martijn Naaijer](mailto:m.naaijer@vu.nl).
#
#
# ## 0.6 Status
#
# * **modified: 2017-09-28** Is now part of a pipeline for transferring data from the ETCBC to Text-Fabric.
# * **modified: 2016-03-03** Added experiments based on chapter chunks and lower similarities.
#
# 165 experiments have been carried out, of which 18 with promising results.
# All results can be easily inspected, just by clicking in your browser.
# One of the experiments has been chosen as the basis for
# [crossref](https://shebanq.ancient-data.org/hebrew/note?version=4b&id=Mnxjcm9zc3JlZg__&tp=txt_tb1&nget=v)
# annotations in SHEBANQ.
#
# # 1. Results
#
# Click in a green cell to see interesting results. The numbers in the cell indicate
#
# * the number of passages that have a variant elsewhere
# * the number of *cliques* they form (cliques are sets of similar passages)
# * the number of passages in the biggest clique
#
# Below the results is an account of the method that we used, followed by the actual code to produce these results.
# # Pipeline
# See [operation](https://github.com/ETCBC/pipeline/blob/master/README.md#operation)
# for how to run this script in the pipeline.
#
# The pipeline comes in action in Section [6a](#6a) below: TF features.
# # Caveat
#
# This notebook makes use of a new feature of text-fabric, first present in 2.3.15.
# Make sure to upgrade first.
#
# ```
# sudo -H pip3 install --upgrade text-fabric
# ```
# In[1]:
import sys
import os
import re
import collections
import pickle
import math
import difflib
import yaml
from difflib import SequenceMatcher
from IPython.display import HTML
import matplotlib.pyplot as plt
from tf.core.helpers import formatMeta
# pip3 install python-Levenshtein
# In[2]:
from Levenshtein import ratio
# In[3]:
import utils
from tf.fabric import Fabric
# In[4]:
get_ipython().run_line_magic("load_ext", "autoreload") # noqa F821
get_ipython().run_line_magic("autoreload", "2") # noqa F821
get_ipython().run_line_magic("matplotlib", "inline") # noqa F821
# In[2]:
# In[5]:
if "SCRIPT" not in locals():
# SCRIPT = False
SCRIPT = False
FORCE = True
FORCE_MATRIX = False
LANG_FEATURE = "languageISO"
OCC_FEATURE = "g_cons"
LEX_FEATURE = "lex"
TEXT_FEATURE = "g_word_utf8"
TRAILER_FEATURE = "trailer_utf8"
CORE_NAME = "bhsa"
NAME = "parallels"
VERSION = "2021"
# In[6]:
# In[3]:
# In[7]:
# run this cell after all other cells
if False and not SCRIPT:
HTML(other_exps)
# # 2. Experiments
#
# We have conducted 165 experiments, all corresponding to a specific choice of parameters.
# Every experiment is an attempt to identify variants and collect them in *cliques*.
#
# The table gives an overview of the experiments conducted.
#
# Every *row* corresponds to a particular way of chunking and a method of measuring the similarity.
#
# There are *columns* for each similarity *threshold* that we have tried.
# The idea is that chunks are similar if their similarity is above the threshold.
#
# The outcomes of one experiment have been added to SHEBANQ as the note set
# [crossref](https://shebanq.ancient-data.org/hebrew/note?version=4b&id=Mnxjcm9zc3JlZg__&tp=txt_tb1&nget=v).
# The experiment chosen for this is currently
#
# * *chunking*: **object verse**
# * *similarity method*: **SET**
# * *similarity threshold*: **65**
#
#
# ## 2.1 Assessing the outcomes
#
# Not all experiments lead to useful results.
# We have indicated the value of a result by a color coding, based on objective characteristics,
# such as the number of parallel passages, the number of cliques, the size of the greatest clique, and the way of chunking.
# These numbers are shown in the cells.
#
# ### 2.1.1 Assessment criteria
#
# If the method is based on *fixed* chunks, we deprecated the method and the results.
# Because two perfectly similar verses could be missed if a 100-word wide window that shifts over the text aligns differently with both verses, which will usually be the case.
#
# Otherwise, we consider the *ll*, the length of the longest clique, and *nc*, the number of cliques.
# We set three quality parameters:
# * `REC_CLIQUE_RATIO` = 5 : recommended clique ratio
# * `DUB_CLIQUE_RATIO` = 15 : dubious clique ratio
# * `DEP_CLIQUE_RATIO` = 25 : deprecated clique ratio
#
# where the *clique ratio* is $100 (ll/nc)$,
# i.e. the length of the longest clique divided by the number of cliques as percentage.
#
# An experiment is *recommended* if its clique ratio is between the recommended and dubious clique ratios.
#
# It is *dubious* if its clique ratio is between the dubious and deprecated clique ratios.
#
# It is *deprecated* if its clique ratio is above the deprecated clique ratio.
#
# # 2.2 Inspecting results
# If you click on the hyperlink in the cell, you are taken to a page that gives you
# all the details of the results:
#
# 1. A link to a file with all *cliques* (which are the sets of similar passages)
# 1. A list of links to chapter-by-chapter diff files (for cliques with just two members), and only for
# experiments with outcomes that are labeled as *promising* or *unassessed quality* or *mixed results*.
#
# To get into the variants quickly, inspect the list (2) and click through
# to see the actual variant material in chapter context.
#
# Not all variants occur here, so continue with (1) to see the remaining cliques.
#
# Sometimes in (2) a chapter diff file does not indicate clearly the relevant common part of both chapters.
# In that case you have to consult the big list (1)
#
# All these results can be downloaded from the
# [SHEBANQ github repo](https://github.com/ETCBC/shebanq/tree/master/static/docs/tools/parallel/files)
# After downloading the whole directory, open ``experiments.html`` in your browser.
# # 3. Method
#
# Here we discuss the method we used to arrive at a list of parallel passages
# in the Masoretic Text (MT) of the Hebrew Bible.
#
# ## 3.1 Similarity
#
# We have to find passages in the MT that are *similar*.
# Therefore we *chunk* the text in some way, and then compute the similarities between pairs of chunks.
#
# There are many ways to define and compute similarity between texts.
# Here, we have tried two methods ``SET`` and ``LCS``.
# Both methods define similarity as the fraction of common material with respect to the total material.
#
# ### 3.1.1 SET
#
# The ``SET`` method reduces textual chunks to *sets* of *lexemes*.
# This method abstracts from the order and number of occurrences of words in chunks.
#
# We use as measure for the similarity of chunks $C_1$ and $C_2$ (taken as sets):
#
# $$ s_{\rm set}(C_1, C_2) = {\vert C_1 \cap C_2\vert \over \vert C_1 \cup C_2 \vert} $$
#
# where $\vert X \vert$ is the number of elements in set $X$.
#
# ### 3.1.2 LCS
#
# The ``LCS`` method is less reductive: chunks are *strings* of *lexemes*,
# so the order and number of occurrences of words is retained.
#
# We use as measure for the similarity of chunks $C_1$ and $C_2$ (taken as strings):
#
# $$ s_{\rm lcs}(C_1, C_2) = {\vert {\rm LCS}(C_1,C_2)\vert \over \vert C_1\vert + \vert C_2 \vert -
# \vert {\rm LCS}(C_1,C_2)\vert} $$
#
# where ${\rm LCS}(C_1, C_2)$ is the
# [longest common subsequence](https://en.wikipedia.org/wiki/Longest_common_subsequence_problem)
# of $C_1$ and $C_2$ and
# $\vert X\vert$ is the length of sequence $X$.
#
# It remains to be seen whether we need the extra sophistication of ``LCS``.
# The risk is that ``LCS`` could fail to spot related passages when there is a large amount of transposition going on.
# The results should have the last word.
#
# We need to compute the LCS efficiently, and for this we used the python ``Levenshtein`` module:
#
# ``pip install python-Levenshtein``
#
# whose documentation is
# [here](http://www.coli.uni-saarland.de/courses/LT1/2011/slides/Python-Levenshtein.html).
#
# ## 3.2 Performance
#
# Similarity computation is the part where the heavy lifting occurs.
# It is basically quadratic in the number of chunks, so if you have verses as chunks (~ 23,000),
# you need to do ~ 270,000,000 similarity computations, and if you use sentences (~ 64,000),
# you need to do ~ 2,000,000,000 ones!
# The computation of a single similarity should be *really* fast.
#
# Besides that, we use two ways to economize:
#
# * after having computed a matrix for a specific set of parameter values, we save the matrix to disk;
# new runs can load the matrix from disk in a matter of seconds;
# * we do not store low similarity values in the matrix, low being < ``MATRIX_THRESHOLD``.
#
# The ``LCS`` method is more complicated.
# We have tried the ``ratio`` method from the ``difflib`` package that is present in the standard python distribution.
# This is unbearably slow for our purposes.
# The ``ratio`` method in the ``Levenshtein`` package is much quicker.
#
# See the table for an indication of the amount of work to create the similarity matrix
# and the performance per similarity method.
#
# The *matrix threshold* is the lower bound of similarities that are stored in the matrix.
# If a pair of chunks has a lower similarity, no entry will be made in the matrix.
#
# The computing has been done on a Macbook Air (11", mid 2012, 1.7 GHz Intel Core i5, 8GB RAM).
#
# |chunk type |chunk size|similarity method|matrix threshold|# of comparisons|size of matrix (KB)|computing time (min)|
# |:----------|---------:|----------------:|---------------:|---------------:|------------------:|-------------------:|
# |fixed |100 |LCS |60 | 9,003,646| 7| ? |
# |fixed |100 |SET |50 | 9,003,646| 7| ? |
# |fixed |50 |LCS |60 | 36,197,286| 37| ? |
# |fixed |50 |SET |50 | 36,197,286| 18| ? |
# |fixed |20 |LCS |60 | 227,068,705| 2,400| ? |
# |fixed |20 |SET |50 | 227,068,705| 113| ? |
# |fixed |10 |LCS |60 | 909,020,841| 59,000| ? |
# |fixed |10 |SET |50 | 909,020,841| 1,800| ? |
# |object |verse |LCS |60 | 269,410,078| 2,300| 31|
# |object |verse |SET |50 | 269,410,078| 509| 14|
# |object |half_verse|LCS |60 | 1,016,396,241| 40,000| 50|
# |object |half_verse|SET |50 | 1,016,396,241| 3,600| 41|
# |object |sentence |LCS |60 | 2,055,975,750| 212,000| 68|
# |object |sentence |SET |50 | 2,055,975,750| 82,000| 63|
# # 4. Workflow
#
# ## 4.1 Chunking
#
# There are several ways to chunk the text:
#
# * fixed chunks of approximately ``CHUNK_SIZE`` words
# * by object, such as verse, sentence and even chapter
#
# After chunking, we prepare the chunks for similarity measuring.
#
# ### 4.1.1 Fixed chunking
# Fixed chunking is unnatural, but if the chunk size is small, it can yield fair results.
# The results are somewhat difficult to inspect, because they generally do not respect constituent boundaries.
# It is to be expected that fixed chunks in variant passages will be mutually *out of phase*,
# meaning that the chunks involved in these passages are not aligned with each other.
# So they will have a lower similarity than they could have if they were aligned.
# This is a source of artificial noise in the outcome and/or missed cases.
#
# If the chunking respects "natural" boundaries in the text, there is far less misalignment.
#
# ### 4.1.2 Object chunking
# We can also chunk by object, such as verse, half_verse or sentence.
#
# Chunking by *verse* is very much like chunking in fixed chunks of size 20, performance-wise.
#
# Chunking by *half_verse* is comparable to fixed chunks of size 10.
#
# Chunking by *sentence* will generate an enormous amount of
# false positives, because there are very many very short sentences (down to 1-word) in the text.
# Besides that, the performance overhead is huge.
#
# The *half_verses* seem to be a very interesting candidate.
# They are smaller than verses, but there are less *degenerate cases* compared to with sentences.
# From the table above it can be read that half verses require only half as many similarity computations as sentences.
#
#
# ## 4.2 Preparing
#
# We prepare the chunks for the application of the chosen method of similarity computation (``SET`` or ``LCS``).
#
# In both cases we reduce the text to a sequence of transliterated consonantal *lexemes* without disambiguation.
# In fact, we go one step further: we remove the consonants (aleph, wav, yod) that are often silent.
#
# For ``SET``, we represent each chunk as the set of its reduced lexemes.
#
# For ``LCS``, we represent each chunk as the string obtained by joining its reduced lexemes separated by white spaces.
#
# ## 4.3 Cliques
#
# After having computed a sufficient part of the similarity matrix, we set a value for ``SIMILARITY_THRESHOLD``.
# All pairs of chunks having at least that similarity are deemed *interesting*.
#
# We organize the members of such pairs in *cliques*, groups of chunks of which each member is
# similar (*similarity* > ``SIMILARITY_THRESHOLD``) to at least one other member.
#
# We start with no cliques and walk through the pairs whose similarity is above ``SIMILARITY_THRESHOLD``,
# and try to put each member into a clique.
#
# If there is not yet a clique, we make the member in question into a new singleton clique.
#
# If there are cliques, we find the cliques that have a member similar to the member in question.
# If we find several, we merge them all into one clique.
#
# If there is no such clique, we put the member in a new singleton clique.
#
# NB: Cliques may *drift*, meaning that they contain members that are completely different from each other.
# They are in the same clique, because there is a path of pairwise similar members leading from the one chunk to the other.
#
# ### 4.3.1 Organizing the cliques
# In order to handle cases where there are many corresponding verses in corresponding chapters, we produce
# chapter-by-chapter diffs in the following way.
#
# We make a list of all chapters that are involved in cliques.
# This yields a list of chapter cliques.
# For all *binary* chapters cliques, we generate a colorful diff rendering (as HTML) for the complete two chapters.
#
# We only do this for *promising* experiments.
#
# ### 4.3.2 Evaluating clique sets
#
# Not all clique sets are equally worth while.
# For example, if we set the ``SIMILARITY_THRESHOLD`` too low, we might get one gigantic clique, especially
# in combination with a fine-grained chunking. In other words: we suffer from *clique drifting*.
#
# We detect clique drifting by looking at the size of the largest clique.
# If that is large compared to the total number of chunks, we deem the results unsatisfactory.
#
# On the other hand, when the ``SIMILARITY_THRESHOLD`` is too high, you might miss a lot of correspondences,
# especially when chunks are large, or when we have fixed-size chunks that are out of phase.
#
# We deem the results of experiments based on a partitioning into fixed length chunks as unsatisfactory, although it
# might be interesting to inspect what exactly the damage is.
#
# At the moment, we have not yet analyzed the relative merits of the similarity methods ``SET`` and ``LCS``.
# # 5. Implementation
#
#
# The rest is code. From here we fire up the engines and start computing.
# In[8]:
PICKLE_PROTOCOL = 3
# # Setting up the context: source file and target directories
#
# The conversion is executed in an environment of directories, so that sources, temp files and
# results are in convenient places and do not have to be shifted around.
# In[5]:
# In[9]:
repoBase = os.path.expanduser("~/github/etcbc")
coreRepo = "{}/{}".format(repoBase, CORE_NAME)
thisRepo = "{}/{}".format(repoBase, NAME)
# In[10]:
coreTf = "{}/tf/{}".format(coreRepo, VERSION)
# In[11]:
allTemp = "{}/_temp".format(thisRepo)
thisTemp = "{}/_temp/{}".format(thisRepo, VERSION)
thisTempTf = "{}/tf".format(thisTemp)
# In[12]:
thisTf = "{}/tf/{}".format(thisRepo, VERSION)
thisNotes = "{}/shebanq/{}".format(thisRepo, VERSION)
# In[6]:
# In[13]:
notesFile = "crossrefNotes.csv"
if not os.path.exists(thisNotes):
os.makedirs(thisNotes)
# # Test
#
# Check whether this conversion is needed in the first place.
# Only when run as a script.
# In[7]:
# In[14]:
if SCRIPT:
(good, work) = utils.mustRun(
None, "{}/.tf/{}.tfx".format(thisTf, "crossref"), force=FORCE
)
if not good:
stop(good=False)
if not work:
stop(good=True)
# ## 5.1 Loading the feature data
#
# We load the features we need from the BHSA core database.
# In[8]:
# In[15]:
utils.caption(4, "Load the existing TF dataset")
TF = Fabric(locations=coreTf, modules=[""])
# In[9]:
# In[16]:
api = TF.load(
"""
otype
{} {} {}
book chapter verse number
""".format(
LEX_FEATURE,
TEXT_FEATURE,
TRAILER_FEATURE,
)
)
api.makeAvailableIn(globals())
# ## 5.2 Configuration
#
# Here are the parameters on which the results crucially depend.
#
# There are also parameters that control the reporting of the results, such as file locations.
# In[10]:
# In[17]:
# chunking
CHUNK_LABELS = {True: "fixed", False: "object"}
CHUNK_LBS = {True: "F", False: "O"}
CHUNK_SIZES = (100, 50, 20, 10)
CHUNK_OBJECTS = ("chapter", "verse", "half_verse", "sentence")
# In[18]:
# preparing
EXCLUDED_CONS = r"[>WJ=/\[]" # weed out weak consonants
EXCLUDED_PAT = re.compile(EXCLUDED_CONS)
# In[19]:
# similarity
MATRIX_THRESHOLD = 50
SIM_METHODS = ("SET", "LCS")
SIMILARITIES = (100, 95, 90, 85, 80, 75, 70, 65, 60, 55, 50, 45, 40, 35, 30)
# In[20]:
# printing
DEP_CLIQUE_RATIO = 25
DUB_CLIQUE_RATIO = 15
REC_CLIQUE_RATIO = 5
LARGE_CLIQUE_SIZE = 50
CLIQUES_PER_FILE = 50
# In[21]:
# assessing results
VALUE_LABELS = dict(
mis="no results available",
rec="promising results: recommended",
dep="messy results: deprecated",
dub="mixed quality: take care",
out="method deprecated",
nor="unassessed quality: inspection needed",
lr="this experiment is the last one run",
)
# note that the TF_TABLE and LOCAL_BASE_COMP are deliberately
# located in the version independent
# part of the tempdir.
# Here the results of expensive calculations are stored,
# to be used by all versions
# In[22]:
# crossrefs for TF
TF_TABLE = "{}/parallelTable.tsv".format(allTemp)
# In[23]:
# crossrefs for SHEBANQ
SHEBANQ_MATRIX = (False, "verse", "SET")
SHEBANQ_SIMILARITY = 65
SHEBANQ_TOOL = "parallel"
CROSSREF_STATUS = "!"
CROSSREF_KEYWORD = "crossref"
# In[24]:
# progress indication
VERBOSE = False
MEGA = 1000000
KILO = 1000
SIMILARITY_PROGRESS = 5 * MEGA
CLIQUES_PROGRESS = 1 * KILO
# In[25]:
# locations and hyperlinks
LOCAL_BASE_COMP = "{}/calculus".format(allTemp)
LOCAL_BASE_OUTP = "files"
EXPERIMENT_DIR = "experiments"
EXPERIMENT_FILE = "experiments"
EXPERIMENT_PATH = "{}/{}.txt".format(LOCAL_BASE_OUTP, EXPERIMENT_FILE)
EXPERIMENT_HTML = "{}/{}.html".format(LOCAL_BASE_OUTP, EXPERIMENT_FILE)
NOTES_FILE = "crossref"
NOTES_PATH = "{}/{}.csv".format(LOCAL_BASE_OUTP, NOTES_FILE)
STORED_CLIQUE_DIR = "stored/cliques"
STORED_MATRIX_DIR = "stored/matrices"
STORED_CHUNK_DIR = "stored/chunks"
CHAPTER_DIR = "chapters"
CROSSREF_DB_FILE = "crossrefdb.csv"
CROSSREF_DB_PATH = "{}/{}".format(LOCAL_BASE_OUTP, CROSSREF_DB_FILE)
# ## 5.3 Experiment settings
#
# For each experiment we have to adapt the configuration settings to the parameters that define the experiment.
# In[11]:
# In[26]:
# In[27]:
# In[28]:
# In[29]:
# In[30]:
reset_params()
# ## 5.4 Chunking
#
# We divide the text into chunks to be compared. The result is ``chunks``,
# which is a list of lists.
# Every chunk is a list of word nodes.
# In[12]:
# In[31]:
# ## 5.5 Preparing
#
# In order to compute similarities between chunks, we have to compile each chunk into the information that really matters for the comparison. This is dependent on the chosen method of similarity computing.
#
# ### 5.5.1 Preparing for SET comparison
#
# We reduce words to their lexemes (dictionary entries) and from them we also remove the aleph, wav, and yods.
# The lexeme feature also contains characters (`/ [ =`) to disambiguate homonyms. We also remove these.
# If we end up with something empty, we skip it.
# Eventually, we take the set of these reduced word lexemes, so that we effectively ignore order and multiplicity of words. In other words: the resulting similarity will be based on lexeme content.
#
# ### 5.5.2 Preparing for LCS comparison
#
# Again, we reduce words to their lexemes as for the SET preparation, and we do the same weeding of consonants and empty strings. But then we concatenate everything, separated by a space. So we preserve order and multiplicity.
# In[13]:
# In[32]:
# ## 5.6 Similarity computation
#
# Here we implement our two ways of similarity computation.
# Both need a massive amount of work, especially for experiments with many small chunks.
# The similarities are stored in a ``matrix``, a data structure that stores a similarity number for each pair of chunk indexes.
# Most pair of chunks will be dissimilar. In order to save space, we do not store similarities below a certain threshold.
# We store matrices for re-use.
#
# ### 5.6.1 SET similarity
# The core is an operation on the sets, associated with the chunks by the prepare step. We take the cardinality of the intersection divided by the cardinality of the union.
# Intuitively, we compute the proportion of what two chunks have in common against their total material.
#
# In case the union is empty (both chunks have yielded an empty set), we deem the chunks not to be interesting as a parallel pair, and we set the similarity to 0.
#
# ### 5.6.2 LCS similarity
# The core is the method `ratio()`, taken from the Levenshtein module.
# Remember that the preparation step yielded a space separated string of lexemes, and these strings are compared on the basis of edit distance.
# In[14]:
# In[33]:
# In[34]:
# ## 5.7 Cliques
#
# Based on the value for the ``SIMILARITY_THRESHOLD`` we use the similarity matrix to pick the *interesting*
# similar pairs out of it. From these pairs we lump together our cliques.
#
# Our list of experiments will select various values for ``SIMILARITY_THRESHOLD``, which will result
# in various types of clique behavior.
#
# We store computed cliques for re-use.
#
# ## 5.7.1 Selecting passages
#
# We take all pairs from the similarity matrix which are above the threshold, and add both members to a list of passages.
#
# ## 5.7.2 Growing cliques
# We inspect all passages in our set, and try to add them to the cliques we are growing.
# We start with an empty set of cliques.
# Each passage is added to a clique with which it has *enough familiarity*, otherwise it is added to a new clique.
# *Enough familiarity means*: the passage is similar to at least one member of the clique, and the similarity is at least ``SIMILARITY_THRESHOLD``.
# It is possible that a passage is thus added to more than one clique. In that case, those cliques are merged.
# This may lead to growing very large cliques if ``SIMILARITY_THRESHOLD`` is too low.
# In[15]:
# In[35]:
# In[36]:
# In[37]:
# In[38]:
# In[39]:
# ## 5.8 Output
#
# We deliver the output of our experiments in various ways, all in HTML.
#
# We generate chapter based diff outputs with color-highlighted differences between the chapters for every pair of chapters that merit it.
#
# For every (*good*) experiment, we produce a big list of its cliques, and for
# every such clique, we produce a diff-view of its members.
#
# Big cliques will be split into several files.
#
# Clique listings will also contain metadata: the value of the experiment parameters.
#
# ### 5.8.1 Format definitions
# Here are the definitions for formatting the (HTML) output.
# In[16]:
# In[40]:
# clique lists
css = """
td.vl {
font-family: Verdana, Arial, sans-serif;
font-size: small;
text-align: right;
color: #aaaaaa;
width: 10%;
direction: ltr;
border-left: 2px solid #aaaaaa;
border-right: 2px solid #aaaaaa;
}
td.ht {
font-family: Ezra SIL, SBL Hebrew, Verdana, sans-serif;
font-size: x-large;
line-height: 1.7;
text-align: right;
direction: rtl;
}
table.ht {
width: 100%;
direction: rtl;
border-collapse: collapse;
}
td.ht {
border-left: 2px solid #aaaaaa;
border-right: 2px solid #aaaaaa;
}
tr.ht.tb {
border-top: 2px solid #aaaaaa;
border-left: 2px solid #aaaaaa;
border-right: 2px solid #aaaaaa;
}
tr.ht.bb {
border-bottom: 2px solid #aaaaaa;
border-left: 2px solid #aaaaaa;
border-right: 2px solid #aaaaaa;
}
span.m {
background-color: #aaaaff;
}
span.f {
background-color: #ffaaaa;
}
span.x {
background-color: #ffffaa;
color: #bb0000;
}
span.delete {
background-color: #ffaaaa;
}
span.insert {
background-color: #aaffaa;
}
span.replace {
background-color: #ffff00;
}
"""
# In[41]:
# chapter diffs
diffhead = """
<head>
<meta http-equiv="Content-Type"
content="text/html; charset=UTF-8" />
<title></title>
<style type="text/css">
table.diff {
font-family: Ezra SIL, SBL Hebrew, Verdana, sans-serif;
font-size: x-large;
text-align: right;
}
.diff_header {background-color:#e0e0e0}
td.diff_header {text-align:right}
.diff_next {background-color:#c0c0c0}
.diff_add {background-color:#aaffaa}
.diff_chg {background-color:#ffff77}
.diff_sub {background-color:#ffaaaa}
</style>
</head>
"""
# In[42]:
# table of experiments
ecss = """
<style type="text/css">
.mis {background-color: #cccccc;}
.rec {background-color: #aaffaa;}
.dep {background-color: #ffaaaa;}
.dub {background-color: #ffddaa;}
.out {background-color: #ffddff;}
.nor {background-color: #fcfcff;}
.ps {font-weight: normal;}
.mx {font-style: italic;}
.cl {font-weight: bold;}
.lr {font-weight: bold; background-color: #ffffaa;}
p,td {font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: small;}
td {border: 1pt solid #000000; padding: 4pt;}
table {border: 1pt solid #000000; border-collapse: collapse;}
</style>
"""
# In[43]:
legend = """
<table>
<tr><td class="mis">{mis}</td></tr>
<tr><td class="rec">{rec}</td></tr>
<tr><td class="dep">{dep}</td></tr>
<tr><td class="dub">{dub}</td></tr>
<tr><td class="out">{out}</td></tr>
<tr><td class="nor">{nor}</td></tr>
</table>
""".format(
**VALUE_LABELS
)
# ### 5.8.2 Formatting clique lists
# In[17]:
# In[44]:
# In[45]:
# In[46]:
# In[47]:
# In[48]:
# In[49]:
# In[50]:
# In[51]:
# In[52]:
# In[53]:
# In[54]:
# In[55]:
# In[56]:
# In[57]:
# In[58]:
# In[59]:
# In[60]:
# In[61]:
# ### 5.8.3 Compiling the table of experiments
#
# Here we generate the table of experiments, complete with the coloring according to their assessments.
# In[18]:
# In[62]:
# generate the table of experiments
# ### 5.8.4 High level formatting functions
#
# Here everything concerning output is brought together.
# In[19]:
# In[63]:
# In[64]:
# ## 5.9 Running experiments
#
# The workflows of doing a single experiment, and then all experiments, are defined.
# In[20]:
# In[65]:
outputs = {}
# In[66]:
# In[67]:
# In[68]:
# In[69]:
# In[70]:
# In[71]:
# In[72]:
# In[73]:
# # 6a
# # TF features
#
# Based on selected similarity matrices, we produce an
# edge features between verses, containing weighted links to parallel verses.
#
# The features to deliver are called `crossrefSET` and `crossrefLCS` and `crossref`.
#
# These are edge feature, both are symmetric, and hence redundant.
# For every node, the *from* and *to* edges are identical.
#
# The `SET` variant consists of set based similarity, the `LCS` one on longest common subsequence
# similarity.
#
# The `crossref` feature takes the union of both methods, with the average confidence.
#
# The weight is the similarity as percentage integer as it comes from the similarity matrix.
#
# ## Discussion
# We only produce the results of the similarity computation (the matrix), we do not do the cliqueing.
# There are many ways to make cliques, and that can easily be done by users of the data, once the
# matrix results are in place.
# We also do not produce pretty outputs, chapter diffs and other goodies.
# Just the raw similarity data.
#
# The matrix computation is expensive.
# We use fixed settings:
# * verse chunks
# * `SET` method / `LCS` method,
# * matrix threshold 50 / 60
# * similarity threshold 75
#
# That is, we compute a matrix that contains all pairs with similarity above 50 or 60
# depending on whether we do the `SET` method or the `LCS` method.
#
# From that matrix, we only use the similarities above 75.
# This gives us room to play without recomputing the matrix.
#
# We do not want to redo this computation if it can be avoided.
#
# Verse similarity is not something that is very sensitive to change in the encoding.
# It is very likely that similar verses in one version of the data agree with similar
# verses in all other versions.
#
# However, the node numbers of verses may change from version to version, so that part
# must be done again for each version.
#
# This is how we proceed:
# * the matrix computation gives us triples (v1, v2, w), where v1, v2 are verse nodes and d is there similarity
# * we store the result of the matrix computation in a csv file with the following fields:
# * method, v1, v1Ref, v2, v2Ref, d, where v1Ref and v2Ref are verse references,
# each containing exactly 3 fields: book, chapter, verse
# * NB: the similarity table has only one entry for each pair of similar verses per method.
# If (v1, v2) is in the table, (v2, v1) is not in the table, per method.
#
# When we run this notebook for the pipeline, we check for the presence of this file.
# If it is present, we uses the vRefs in it to compute the verse nodes that are valid for the
# version we are going to produce.
# That gives us all the data we need, so we can skip the matrix computation.
#
# If the file is not present, we have to compute the matrix.
# There will be a parameter, called FORCE_MATRIX, which can enforce a re-computation of the matrix.
# We need some utility function geared to TF feature production.
# The `get_verse()` function is simpler, and we do not have to run full experiments.
# In[21]:
# In[74]:
# In[75]:
# In[76]:
# In[22]:
# In[77]:
utils.caption(4, "CROSSREFS: Fetching crossrefs")
# In[78]:
xTable = os.path.exists(TF_TABLE)
if FORCE_MATRIX:
utils.caption(
0,
"\t{} requested of {}".format(
"Recomputing" if xTable else "computing",
TF_TABLE,
),
)
else:
if xTable:
utils.caption(0, "\tReading existing {}".format(TF_TABLE))
else:
utils.caption(0, "\tComputing missing {}".format(TF_TABLE))
# In[79]:
if FORCE_MATRIX or not xTable:
similars = makeSimTable()
else:
similars = readSimTable()
# In[23]:
# In[80]:
if not SCRIPT:
print("\n".join(sorted(repr(sim) for sim in similars if sim[0] == "LCS")[0:10]))
print("\n".join(sorted(repr(sim) for sim in similars if sim[0] == "SET")[0:10]))
# In[81]:
crossrefData = {}
otherMethod = dict(LCS="SET", SET="LCS")
# In[82]:
for (method, v1, v2, sim, *x) in similars:
crossrefData.setdefault(method, {}).setdefault(v1, {})[v2] = sim
crossrefData.setdefault(method, {}).setdefault(v2, {})[v1] = sim
omethod = otherMethod[method]
otherSim = crossrefData.get(omethod, {}).get(v1, {}).get(v2, None)
thisSim = sim if otherSim is None else int(round((otherSim + sim) / 2))
crossrefData.setdefault("", {}).setdefault(v1, {})[v2] = thisSim
crossrefData.setdefault("", {}).setdefault(v2, {})[v1] = thisSim
# # Generating parallels module for Text-Fabric
#
# We generate the feature `crossref`.
# It is an edge feature between verse nodes, with the similarity as weight.
# In[89]:
utils.caption(4, "Writing TF parallel features")
# In[90]:
newFeatureStr = "crossref crossrefSET crossrefLCS"
newFeatures = newFeatureStr.strip().split()
# In[91]:
genericMetaPath = f"{thisRepo}/yaml/generic.yaml"
parallelsMetaPath = f"{thisRepo}/yaml/parallels.yaml"
with open(genericMetaPath) as fh:
genericMeta = yaml.load(fh, Loader=yaml.FullLoader)
genericMeta["version"] = VERSION
with open(parallelsMetaPath) as fh:
parallelsMeta = formatMeta(yaml.load(fh, Loader=yaml.FullLoader))
metaData = {"": genericMeta, **parallelsMeta}
# In[92]:
nodeFeatures = dict()
edgeFeatures = dict()
for method in [""] + list(otherMethod):
edgeFeatures["crossref{}".format(method)] = crossrefData[method]
# In[93]:
for newFeature in newFeatures:
metaData[newFeature]["valueType"] = "int"
metaData[newFeature]["edgeValues"] = True
# In[94]:
TF = Fabric(locations=thisTempTf, silent=True)
TF.save(nodeFeatures=nodeFeatures, edgeFeatures=edgeFeatures, metaData=metaData)
# # Generating simple crossref notes for SHEBANQ
# We base them on the average of both methods, we supply the confidence.
# In[33]:
# In[ ]:
MAX_REFS = 10
# In[ ]:
# In[ ]:
crossrefBase = crossrefData[""]
# In[ ]:
refsGrouped = []
nCrossrefs = 0
for (x, refs) in crossrefBase.items():
vys = sorted(refs.keys())
nCrossrefs += len(vys)
currefs = []
for vy in vys:
nr = len(currefs)
if nr == MAX_REFS:
refsGrouped.append((x, tuple(currefs)))
currefs = []
currefs.append(vy)
if len(currefs):
refsGrouped.append((x, tuple(currefs)))
# In[33]:
refsCompiled = []
for (x, vys) in refsGrouped:
vysd = [
(*T.sectionFromNode(vy, lang="la"), " ~{}%".format(crossrefBase[x][vy]))
for vy in vys
]
vysl = condenseX(vysd)
these_refs = []
for (i, vy) in enumerate(vysd):
link_text = vysl[i]
link_target = "{} {}:{}".format(vy[0], vy[1], vy[2])
these_refs.append("{}({})".format(link_text, link_target))
refsCompiled.append((x, " ".join(these_refs)))
utils.caption(
0,
"Compiled {} cross references into {} notes".format(nCrossrefs, len(refsCompiled)),
)
# In[34]:
# In[ ]:
sfields = """
version
book
chapter
verse
clause_atom
is_shared
is_published
status
keywords
ntext
""".strip().split()
# In[ ]:
sfields_fmt = ("{}\t" * (len(sfields) - 1)) + "{}\n"
# In[ ]:
ofs = open("{}/{}".format(thisNotes, notesFile), "w")
ofs.write("{}\n".format("\t".join(sfields)))
# In[ ]:
for (v, refs) in refsCompiled:
firstWord = L.d(v, otype="word")[0]
ca = F.number.v(L.u(firstWord, otype="clause_atom")[0])
(bk, ch, vs) = T.sectionFromNode(v, lang="la")
ofs.write(
sfields_fmt.format(
VERSION,
bk,
ch,
vs,
ca,
"T",
"",
CROSSREF_STATUS,
CROSSREF_KEYWORD,
refs,
)
)
# In[34]:
utils.caption(0, "Generated {} notes".format(len(refsCompiled)))
ofs.close()
# # Diffs
#
# Check differences with previous versions.
# In[35]:
# In[35]:
utils.checkDiffs(thisTempTf, thisTf, only=set(newFeatures))
# # Deliver
#
# Copy the new TF feature from the temporary location where it has been created to its final destination.
# In[36]:
# In[36]:
utils.deliverDataset(thisTempTf, thisTf)
# # Compile TF
# In[38]:
# In[ ]:
utils.caption(4, "Load and compile the new TF features")
# In[38]:
TF = Fabric(locations=[coreTf, thisTf], modules=[""])
api = TF.load(newFeatureStr)
api.makeAvailableIn(globals())
# # Examples
# We list all the crossrefs that the verses of Genesis 10 are involved in.
# In[39]:
# In[ ]:
utils.caption(4, "Test: crossrefs of Genesis 10")
# In[ ]:
chapter = ("Genesis", 10)
chapterNode = T.nodeFromSection(chapter)
startVerses = {}
# In[39]:
for method in ["", "SET", "LCS"]:
utils.caption(0, "\tMethod {}".format(method))
for verseNode in L.d(chapterNode, otype="verse"):
crossrefs = Es("crossref{}".format(method)).f(verseNode)
if crossrefs:
startVerses[T.sectionFromNode(verseNode)] = crossrefs
utils.caption(0, "\t\t{} start verses".format(len(startVerses)))
for (start, crossrefs) in sorted(startVerses.items()):
utils.caption(0, "\t\t{} {}:{}".format(*start), continuation=True)
for (target, confidence) in crossrefs:
utils.caption(
0,
"\t\t{:>20} {:<20} confidende {:>3}%".format(
"-" * 10 + ">",
"{} {}:{}".format(*T.sectionFromNode(target)),
confidence,
),
)
# In[29]:
# In[29]:
if SCRIPT:
stop(good=True)
# # 6b. SHEBANQ annotations
#
# The code below generates extensive crossref notes for `4b`, including clique overviews and chapter diffs.
# But since the pipeline in October 2017, we generate much simpler notes.
# That code is above.
#
# We retain this code here, in case we want to expand the crossref functionality in the future again.
#
# Based on selected similarity matrices, we produce a SHEBANQ note set of cross references for similar passages.
# In[30]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
MAX_REFS = 10
# In[ ]:
# In[ ]:
dfields = """
book1
chapter1
verse1
book2
chapter2
verse2
similarity
""".strip().split()
# In[ ]:
dfields_fmt = ("{}\t" * (len(dfields) - 1)) + "{}\n"
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
sfields = """
version
book
chapter
verse
clause_atom
is_shared
is_published
status
keywords
ntext
""".strip().split()
# In[ ]:
sfields_fmt = ("{}\t" * (len(sfields) - 1)) + "{}\n"
# In[ ]:
# In[30]:
# # 7. Main
#
# In the cell below you can select the experiments you want to carry out.
#
# The previous cells contain just definitions and parameters.
# The next cell will do work.
#
# If none of the matrices and cliques have been computed before on the system where this runs, doing all experiments might take multiple hours (4-8).
# In[ ]:
# In[ ]:
reset_params()
# do_experiment(False, 'sentence', 'LCS', 60, False)
# In[ ]:
do_all_experiments()
# do_all_experiments(no_fixed=True, only_object='chapter')
# crossrefs2shebanq()
# show_all_experiments()
# get_specific_crossrefs(False, 'verse', 'LCS', 60, 'crossrefs_lcs_db.txt')
# do_all_chunks()
# In[ ]:
# In[ ]:
HTML(ecss)
# # 8. Overview of the similarities
#
# Here are the plots of two similarity matrices
# * with verses as chunks and SET as similarity method
# * with verses as chunks and LCS as similarity method
#
# Horizontally you see the degree of similarity from 0 to 100%, vertically the number of pairs that have that (rounded) similarity. This axis is logarithmic.
# In[ ]:
# In[ ]:
do_experiment(False, "verse", "SET", 60, False)
distances = collections.Counter()
for (x, d) in chunk_dist.items():
distances[int(round(d))] += 1
# In[ ]:
x = range(MATRIX_THRESHOLD, 101)
fig = plt.figure(figsize=[15, 4])
plt.plot(x, [math.log(max((1, distances[y]))) for y in x], "b-")
plt.axis([MATRIX_THRESHOLD, 101, 0, 15])
plt.xlabel("similarity as %")
plt.ylabel("log # similarities")
plt.xticks(x, x, rotation="vertical")
plt.margins(0.2)
plt.subplots_adjust(bottom=0.15)
plt.title("distances")
# In[ ]:
# In[ ]:
do_experiment(False, "verse", "LCS", 60, False)
distances = collections.Counter()
for (x, d) in chunk_dist.items():
distances[int(round(d))] += 1
# In[ ]:
x = range(MATRIX_THRESHOLD, 101)
fig = plt.figure(figsize=[15, 4])
plt.plot(x, [math.log(max((1, distances[y]))) for y in x], "b-")
plt.axis([MATRIX_THRESHOLD, 101, 0, 15])
plt.xlabel("similarity as %")
plt.ylabel("log # similarities")
plt.xticks(x, x, rotation="vertical")
plt.margins(0.2)
plt.subplots_adjust(bottom=0.15)
plt.title("distances")
# In[ ]:
| 30.896048 | 6,888 | 0.579503 |
a157d32f7b13b416fb6bf59f5d4cfdbbe25ce080 | 4,870 | py | Python | src/python/pants/goal/initialize_reporting.py | WamBamBoozle/pants | 98cadfa1a5d337146903eb66548cfe955f2627b3 | [
"Apache-2.0"
] | null | null | null | src/python/pants/goal/initialize_reporting.py | WamBamBoozle/pants | 98cadfa1a5d337146903eb66548cfe955f2627b3 | [
"Apache-2.0"
] | null | null | null | src/python/pants/goal/initialize_reporting.py | WamBamBoozle/pants | 98cadfa1a5d337146903eb66548cfe955f2627b3 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import errno
import os
import sys
from six import StringIO
from pants.reporting.html_reporter import HtmlReporter
from pants.reporting.plaintext_reporter import PlainTextReporter
from pants.reporting.quiet_reporter import QuietReporter
from pants.reporting.report import Report, ReportingError
from pants.reporting.reporting_server import ReportingServerManager
from pants.util.dirutil import safe_mkdir, safe_rmtree
def initial_reporting(config, run_tracker):
"""Sets up the initial reporting configuration.
Will be changed after we parse cmd-line flags.
"""
reports_dir = os.path.join(config.getdefault('pants_workdir'), 'reports')
link_to_latest = os.path.join(reports_dir, 'latest')
run_id = run_tracker.run_info.get_info('id')
if run_id is None:
raise ReportingError('No run_id set')
run_dir = os.path.join(reports_dir, run_id)
safe_rmtree(run_dir)
html_dir = os.path.join(run_dir, 'html')
safe_mkdir(html_dir)
try:
if os.path.lexists(link_to_latest):
os.unlink(link_to_latest)
os.symlink(run_dir, link_to_latest)
except OSError as e:
# Another run may beat us to deletion or creation.
if not (e.errno == errno.EEXIST or e.errno == errno.ENOENT):
raise
report = Report()
# Capture initial console reporting into a buffer. We'll do something with it once
# we know what the cmd-line flag settings are.
outfile = StringIO()
capturing_reporter_settings = PlainTextReporter.Settings(outfile=outfile, log_level=Report.INFO,
color=False, indent=True, timing=False,
cache_stats=False)
capturing_reporter = PlainTextReporter(run_tracker, capturing_reporter_settings)
report.add_reporter('capturing', capturing_reporter)
# Set up HTML reporting. We always want that.
template_dir = config.get('reporting', 'reports_template_dir')
html_reporter_settings = HtmlReporter.Settings(log_level=Report.INFO,
html_dir=html_dir,
template_dir=template_dir)
html_reporter = HtmlReporter(run_tracker, html_reporter_settings)
report.add_reporter('html', html_reporter)
# Add some useful RunInfo.
run_tracker.run_info.add_info('default_report', html_reporter.report_path())
(_, port) = ReportingServerManager.get_current_server_pid_and_port()
if port:
run_tracker.run_info.add_info('report_url', 'http://localhost:{}/run/{}'.format(port, run_id))
return report
def update_reporting(options, is_quiet_task, run_tracker):
"""Updates reporting config once we've parsed cmd-line flags."""
# Get any output silently buffered in the old console reporter, and remove it.
old_outfile = run_tracker.report.remove_reporter('capturing').settings.outfile
old_outfile.flush()
buffered_output = old_outfile.getvalue()
old_outfile.close()
log_level = Report.log_level_from_string(options.level or 'info')
# Ideally, we'd use terminfo or somesuch to discover whether a
# terminal truly supports color, but most that don't set TERM=dumb.
color = (options.colors) and (os.getenv('TERM') != 'dumb')
timing = options.time
cache_stats = options.time # TODO: Separate flag for this?
if options.quiet or is_quiet_task:
console_reporter = QuietReporter(run_tracker,
QuietReporter.Settings(log_level=log_level, color=color))
else:
# Set up the new console reporter.
settings = PlainTextReporter.Settings(log_level=log_level, outfile=sys.stdout, color=color,
indent=True, timing=timing, cache_stats=cache_stats)
console_reporter = PlainTextReporter(run_tracker, settings)
console_reporter.emit(buffered_output)
console_reporter.flush()
run_tracker.report.add_reporter('console', console_reporter)
if options.logdir:
# Also write plaintext logs to a file. This is completely separate from the html reports.
safe_mkdir(options.logdir)
run_id = run_tracker.run_info.get_info('id')
outfile = open(os.path.join(options.logdir, '{}.log'.format(run_id)), 'w')
settings = PlainTextReporter.Settings(log_level=log_level, outfile=outfile, color=False,
indent=True, timing=True, cache_stats=True)
logfile_reporter = PlainTextReporter(run_tracker, settings)
logfile_reporter.emit(buffered_output)
logfile_reporter.flush()
run_tracker.report.add_reporter('logfile', logfile_reporter)
| 42.719298 | 98 | 0.716222 |
a1586b7c08a86b032589e3a797f710af94eef3ed | 4,947 | py | Python | ResolvePageSwitcher.py | IgorRidanovic/DaVinciResolve-PageSwitcher | 5a771d8fa319454dbcf986b8921e5fa0c665baa9 | [
"MIT"
] | 17 | 2018-06-01T07:30:33.000Z | 2021-12-22T21:05:29.000Z | ResolvePageSwitcher.py | IgorRidanovic/DaVinciResolve-PageSwitcher | 5a771d8fa319454dbcf986b8921e5fa0c665baa9 | [
"MIT"
] | 2 | 2018-10-23T17:32:45.000Z | 2020-12-09T07:48:06.000Z | ResolvePageSwitcher.py | IgorRidanovic/DaVinciResolve-PageSwitcher | 5a771d8fa319454dbcf986b8921e5fa0c665baa9 | [
"MIT"
] | 5 | 2018-09-06T02:11:56.000Z | 2020-10-25T11:25:22.000Z | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# DaVinci Resolve scripting proof of concept. Resolve page external switcher.
# Local or TCP/IP control mode.
# Refer to Resolve V15 public beta 2 scripting API documentation for host setup.
# Copyright 2018 Igor Rianovi, www.hdhead.com
from PyQt4 import QtCore, QtGui
import sys
import socket
# If API module not found assume we're working as a remote control
try:
import DaVinciResolveScript
#Instantiate Resolve object
resolve = DaVinciResolveScript.scriptapp('Resolve')
checkboxState = False
except ImportError:
print 'Resolve API not found.'
checkboxState = True
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
try:
_encoding = QtGui.QApplication.UnicodeUTF8
except AttributeError:
if __name__ == '__main__':
# Assign server parameters
server = '192.168.1.1'
port = 7779
app = QtGui.QApplication(sys.argv)
Form = QtGui.QWidget()
ui = Ui_Form()
ui.setupUi(Form)
Form.show()
sys.exit(app.exec_())
| 36.91791 | 80 | 0.761472 |
a15a0aec2c8adfc46228db42100cded4658cf98f | 14,022 | py | Python | Make Data Files.py | micitz/Dune_Aspect_Ratio_XB_Paper | 25395219886facb3a7e68835e8aae406dbff0b4d | [
"MIT"
] | null | null | null | Make Data Files.py | micitz/Dune_Aspect_Ratio_XB_Paper | 25395219886facb3a7e68835e8aae406dbff0b4d | [
"MIT"
] | null | null | null | Make Data Files.py | micitz/Dune_Aspect_Ratio_XB_Paper | 25395219886facb3a7e68835e8aae406dbff0b4d | [
"MIT"
] | null | null | null | """
All the data sources are scattered around the D drive, this script
organizes it and consolidates it into the "Data" subfolder in the
"Chapter 2 Dune Aspect Ratio" folder.
Michael Itzkin, 5/6/2020
"""
import shutil as sh
import pandas as pd
import numpy as np
import os
# Set the data directory to save files into
DATA_DIR = os.path.join('..', 'Data')
# Set the directory with most of the XBeach data
XB_DIR = os.path.join('..', '..', 'XBeach Modelling', 'Dune Complexity Experiments')
def bogue_lidar_data():
"""
Load all Bogue Banks morphometrics from 1997-2016
and return a dataframe of aspect ratios and natural
dune volumes
"""
# Set a list of years
years = [1997, 1998, 1999, 2000, 2004, 2005, 2010, 2011, 2014, 2016]
# Set an empty dataframe
morpho = pd.DataFrame()
# Loop through the years and load the data
for year in years:
# Set a path to the data and load
path = os.path.join('..', '..', 'Chapter 1 Sand Fences', 'Data', f'Morphometrics for Bogue {year}.csv')
temp = pd.read_csv(path, delimiter=',', header=0)
# Add a column for the year
temp['Year'] = year
# Append the data to the main dataframe
morpho = pd.concat([morpho, temp])
# Make a new dataframe with just aspect ratios and volumes
data = pd.DataFrame()
data['Year'] = morpho['Year']
data['Ratio'] = (morpho['y_crest'] - morpho['y_toe']) / (morpho['x_heel'] - morpho['x_toe'])
data['Volume'] = morpho['Natural Dune Volume']
# Save the Dataframe to the data folder
save_name = os.path.join(DATA_DIR, 'Bogue Banks Volumes and Aspect Ratios.csv')
data.to_csv(save_name, index=False)
print(f'File Saved: {save_name}')
def initial_profiles():
"""
Take all the initial profiles and place them
into a Dataframe to save as a .csv
Make a column for the experiment names, a column for
the X-grids, and columns for the profiles
"""
# Set the experiment names. The initial profiles are the same regardless of
# the surge level so just take from the half surge simulations
experiments = ['Toes Joined', 'Crests Joined', 'Heels Joined', 'Fenced']
# Set an empty dataframe
profiles = pd.DataFrame()
# Loop through the experiments
for experiment in experiments:
# Set a path to the profiles
PROFILE_DIR = os.path.join(XB_DIR, f'{experiment} Half Surge')
# Load the x-grid
x_grid_fname = os.path.join(PROFILE_DIR, 'Dune Complexity 1 1', 'x.grd')
x_grid = np.loadtxt(x_grid_fname)
# Load the dunes
dune_1 = np.loadtxt(fname=os.path.join(PROFILE_DIR, 'Dune Complexity 1 1', 'bed.dep'))
dune_2 = np.loadtxt(fname=os.path.join(PROFILE_DIR, 'Dune Complexity 20 1', 'bed.dep'))
dune_3 = np.loadtxt(fname=os.path.join(PROFILE_DIR, 'Dune Complexity 40 1', 'bed.dep'))
dune_4 = np.loadtxt(fname=os.path.join(PROFILE_DIR, 'Dune Complexity 60 1', 'bed.dep'))
dune_5 = np.loadtxt(fname=os.path.join(PROFILE_DIR, 'Dune Complexity -20 1', 'bed.dep'))
dune_6 = np.loadtxt(fname=os.path.join(PROFILE_DIR, 'Dune Complexity -40 1', 'bed.dep'))
dune_7 = np.loadtxt(fname=os.path.join(PROFILE_DIR, 'Dune Complexity -60 1', 'bed.dep'))
# Put all of the stretched dunes into a dataframe
dune_dict = {
'Experiment': experiment.replace('Joined', 'Aligned'),
'X': x_grid,
'1 pct': dune_1,
'20 pct': dune_2,
'40 pct': dune_3,
'60 pct': dune_4,
'-20 pct': dune_5,
'-40 pct': dune_6,
'-60 pct': dune_7,
}
dune_data = pd.DataFrame(data=dune_dict)
# Concatenate the Dataframes
profiles = pd.concat([profiles, dune_data])
# Save the Dataframe to the data folder
save_name = os.path.join(DATA_DIR, 'Initial Profiles.csv')
profiles.to_csv(save_name, index=False)
print(f'File Saved: {save_name}')
def initial_ratios():
"""
Make a .csv file with the initial dune aspect ratios and
dune volumes for the profiles used in the simulations
"""
# Set the experiment names. The initial profiles are the same regardless of
# the surge level so just take from the half surge simulations
experiments = ['Toes Joined', 'Crests Joined', 'Heels Joined', 'Fenced']
# Set an empty dataframe
ratios = pd.DataFrame()
# Loop through the experiments
for experiment in experiments:
# Load the initial dune ratios
init_ratio_fname = os.path.join(XB_DIR, f'{experiment} Half Surge', 'Setup Data', 'Initial Dune Ratios.csv')
init_ratios = pd.read_csv(init_ratio_fname, delimiter=',', header=None, names=['Stretch', 'Ratio', 'Volume'])
# Add a column for the experiment name
init_ratios['Experiment'] = experiment.replace('Joined', 'Aligned')
# Concatenate the data
ratios = pd.concat([ratios, init_ratios])
# Save the Dataframe to the data folder
save_name = os.path.join(DATA_DIR, 'Initial Dune Ratios.csv')
ratios.to_csv(save_name, index=False)
print(f'File Saved: {save_name}')
def joaquin_and_florence():
"""
Load the storm surge time series' from
Tropical Storm Joaquin and Hurricane
Florence, put them in a .csv file
"""
# Loop through the storms
for storm in ['Joaquin', 'Florence']:
# Load the tide predictions and observations as a Pandas dataframe
filename = os.path.join(XB_DIR, 'Setup Data', f'{storm}.csv')
if storm == 'Joaquin':
parse_dates_cols = ['Date', 'Time']
data_columns = ['Time', 'Predicted', 'Observed']
else:
parse_dates_cols = ['Date', 'Time (GMT)']
data_columns = ['Time', 'Predicted', 'Preliminary', 'Observed']
data = pd.read_csv(filename, delimiter=',', parse_dates=[parse_dates_cols], header=0)
data.columns = data_columns
# Calculate the non-tidal residual
data['NTR'] = data['Observed'] - data['Predicted']
# Load the time data
times = data['Time'].tolist()
data['String Times'] = [t.strftime('%Y-%m-%d %H') for t in times]
# Save the DataFrame as a .csv
save_name = os.path.join(DATA_DIR, f'{storm}.csv')
data.to_csv(save_name, index=False)
def move_csv_output():
"""
Take the .csv files and move them into the "Data" folder,
then rename them from "xboutput.nc" to the name of the simulation
"""
# Set lists with the dune configurations, storm surge
# modifications, storm duration increases, and dune aspect
# ratio stretches
dunes = ['Toes Joined', 'Crests Joined', 'Heels Joined', 'Fenced']
surges = ['Half', 'Normal', 'One Half']
durations = [1, 12, 18, 24, 36, 48]
stretches = [-60, -40, -20, 1, 20, 40, 60]
# Loop through the dunes and surges
for dune in dunes:
for surge in surges:
# Set the experiment folder name
experiment_name = f'{dune} {surge} Surge'
experiment_folder = os.path.join(XB_DIR, experiment_name)
# Make a target folder to move the runs into
save_folder = os.path.join(DATA_DIR, 'XBeach Morphometrics', experiment_name)
if not os.path.exists(save_folder):
os.mkdir(save_folder)
# Loop through the dunes and durations within the experiment
for stretch in stretches:
for duration in durations:
# Set the simulation folder
run_name = f'Dune Complexity {stretch} {duration}'
simulation_folder = os.path.join(experiment_folder, run_name)
# Set the XBeach output file as the source. Set the destination
# name. Then copy the file over
source = os.path.join(simulation_folder, f'{run_name} Morphometrics.csv')
if os.path.exists(source):
destination = os.path.join(save_folder, f'{run_name} Morphometrics.csv')
if not os.path.exists(destination):
sh.copy(source, destination)
print(f'File Successfully Copied: {destination}')
else:
print(f'File already exists: {destination}')
else:
print(f'FILE DOES NOT EXIST: {source}')
def move_field_data():
"""
Move the field data morphometrics from 2017
and 2018 into the data folder
"""
# Set the years
years = [2017, 2018]
# Set a path to the field data
field_dir = os.path.join('..', '..', 'Bogue Banks Field Data')
# Loop through the years
for year in years:
# Identify the source file
source = os.path.join(field_dir, str(year), f'Morphometrics for Bogue Banks {year}.csv')
# Set the target
destination = os.path.join(DATA_DIR, f'Morphometrics for Bogue Banks {year}.csv')
# Copy the file
sh.copy(source, destination)
def move_netcdf_output():
"""
Take the netCDF files and move them into the "Data" folder,
then rename them from "xboutput.nc" to the name of the simulation
"""
# Set lists with the dune configurations, storm surge
# modifications, storm duration increases, and dune aspect
# ratio stretches
dunes = ['Toes Joined', 'Crests Joined', 'Heels Joined', 'Fenced']
surges = ['Half', 'Normal', 'One Half']
durations = [1, 12, 18, 24, 36, 48]
stretches = [-60, -40, -20, 1, 20, 40, 60]
# Loop through the dunes and surges
for dune in dunes:
for surge in surges:
# Set the experiment folder name
experiment_name = f'{dune} {surge} Surge'
experiment_folder = os.path.join(XB_DIR, experiment_name)
# Make a target folder to move the runs into
save_folder = os.path.join(DATA_DIR, 'XBeach Output', experiment_name)
if not os.path.exists(save_folder):
os.mkdir(save_folder)
# Loop through the dunes and durations within the experiment
for stretch in stretches:
for duration in durations:
# Set the simulation folder
run_name = f'Dune Complexity {stretch} {duration}'
simulation_folder = os.path.join(experiment_folder, run_name)
# Set the XBeach output file as the source. Set the destination
# name. Then copy the file over
source = os.path.join(simulation_folder, 'xboutput.nc')
if os.path.exists(source):
destination = os.path.join(save_folder, f'{run_name}.nc')
if not os.path.exists(destination):
sh.copy(source, destination)
print(f'File Successfully Copied: {destination}')
else:
print(f'File already exists: {destination}')
else:
print(f'FILE DOES NOT EXIST: {source}')
def surge_time_series():
"""
Put all the storm time series' into
a .csv file that can be loaded as a
DataFrame
"""
# Set a list of storm surge modifiers
# and storm duration increases
surges, surge_labels = [0.5, 1.0, 1.5], ['Half', 'Normal', 'One Half']
durations = [1, 12, 18, 24, 36, 48]
# Make an empty DataFrame to loop into
surge_df = pd.DataFrame()
# Loop through the surges
for surge, label in zip(surges, surge_labels):
# Loop through the durations
for duration in durations:
# The DataFrame won't work if the columns are different
# lengths so place them all in a preset 125 "hour" long
# array so that they'll fit in the DataFrame
time_series = np.full((1, 125), fill_value=np.nan)[0]
# Load the data and place it in the time series NaN array
filename = os.path.join(XB_DIR, f'Toes Joined {label} Surge', f'Dune Complexity 1 {duration}', 'ntr.txt')
ntr = np.genfromtxt(filename, dtype=np.float32)
time_series[:len(ntr)] = ntr
# Place the time series in the dict
surge_df[f'{label} {duration}'] = time_series
# Save the DataFrame as a .csv file
save_name = os.path.join(DATA_DIR, 'Storm Surge Time Series.csv')
surge_df.to_csv(save_name, index=False)
def main():
"""
Main program function to consolidate all the
data sources
"""
# Make a .csv file with the initial profiles used
# initial_profiles()
# Make a .csv file with the initial dune ratios
# initial_ratios()
# Make a .csv file with all the natural dune volumes
# and aspect ratios measured from Bogue Banks LiDAR
# bogue_lidar_data()
# Make a .csv file with the storm surge time
# series' for all the model runs
# surge_time_series()
# Make a .csv file with storm surge data
# for Tropical Storm Joaquin and Hurricane Florence
# joaquin_and_florence()
# Move the netCDF output files into the Data folder
# and rename them for the run name. Move the .csv
# files with the morphometrics from the runs too
# move_csv_output()
# move_netcdf_output()
# Move the Bogue Banks field data morphometrics
# from 2017 and 2018 into the data folder
move_field_data()
if __name__ == '__main__':
main()
| 36.80315 | 118 | 0.597276 |
a15ae079911483a5e3b82012f76254443eb7a059 | 339 | py | Python | counter-test-applications/lr100000/linear-regr-100k.py | EsperLiu/vPython | f1005f011d6d9fd079cf72e8f78bab6d95a9f993 | [
"0BSD"
] | 1 | 2021-11-21T03:31:32.000Z | 2021-11-21T03:31:32.000Z | counter-test-applications/lr100000/linear-regr-100k.py | EsperLiu/vPython | f1005f011d6d9fd079cf72e8f78bab6d95a9f993 | [
"0BSD"
] | null | null | null | counter-test-applications/lr100000/linear-regr-100k.py | EsperLiu/vPython | f1005f011d6d9fd079cf72e8f78bab6d95a9f993 | [
"0BSD"
] | 1 | 2021-11-28T05:57:55.000Z | 2021-11-28T05:57:55.000Z |
import numpy as np
from sklearn.linear_model import LinearRegression
end_of_import()
X = np.array(range(0,100000)).reshape(-1, 1)
# y = 2x + 3
y = np.dot(X, 2) + 3
end_of_init()
reg = LinearRegression().fit(X, y)
end_of_computing() | 16.95 | 49 | 0.696165 |
a15b3e54d6303597b66c9ac9aa7e5fefcc34013d | 262 | py | Python | python/bitcoin/ch04/04_08.py | gangserver/py_test | 869bdfa5c94c3b6a15b87e0c3de6b2cdaca821f4 | [
"Apache-2.0"
] | null | null | null | python/bitcoin/ch04/04_08.py | gangserver/py_test | 869bdfa5c94c3b6a15b87e0c3de6b2cdaca821f4 | [
"Apache-2.0"
] | null | null | null | python/bitcoin/ch04/04_08.py | gangserver/py_test | 869bdfa5c94c3b6a15b87e0c3de6b2cdaca821f4 | [
"Apache-2.0"
] | null | null | null | import requests
url = "https://api.korbit.co.kr/v1/ticker/detailed?currency_pair=btc_krw"
r = requests.get(url)
bitcoin = r.json()
print(bitcoin)
print(type(bitcoin))
print(bitcoin['last'])
print(bitcoin['bid'])
print(bitcoin['ask'])
print(bitcoin['volume'])
| 18.714286 | 73 | 0.725191 |
a15b9e2b4f9954059a9f62e3b0c43fda6866814f | 3,938 | py | Python | jackselect/indicator.py | SpotlightKid/jack-select | acb6cfa5a48846fa7640373d4976d4df1ab0bbd7 | [
"MIT"
] | 12 | 2016-03-30T18:32:35.000Z | 2022-01-18T21:12:51.000Z | jackselect/indicator.py | SpotlightKid/jack-select | acb6cfa5a48846fa7640373d4976d4df1ab0bbd7 | [
"MIT"
] | 8 | 2018-09-03T15:26:51.000Z | 2020-04-20T14:44:00.000Z | jackselect/indicator.py | SpotlightKid/jack-select | acb6cfa5a48846fa7640373d4976d4df1ab0bbd7 | [
"MIT"
] | null | null | null | """A convenience class for a GTK 3 system tray indicator."""
from pkg_resources import resource_filename
import gi
gi.require_version('Gtk', '3.0') # noqa
from gi.repository import Gtk
from gi.repository.GdkPixbuf import Pixbuf
| 32.278689 | 94 | 0.61935 |
a15c583b91868493579d97f1c0cb3471ef7cba0e | 442 | py | Python | myaxf/migrations/0011_minebtns_is_used.py | Pyrans/test1806 | 1afc62e09bbebf74521b4b6fdafde8eeaa260ed9 | [
"Apache-2.0"
] | null | null | null | myaxf/migrations/0011_minebtns_is_used.py | Pyrans/test1806 | 1afc62e09bbebf74521b4b6fdafde8eeaa260ed9 | [
"Apache-2.0"
] | null | null | null | myaxf/migrations/0011_minebtns_is_used.py | Pyrans/test1806 | 1afc62e09bbebf74521b4b6fdafde8eeaa260ed9 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2018-11-06 01:54
from __future__ import unicode_literals
from django.db import migrations, models
| 21.047619 | 52 | 0.608597 |
a15d304cf1b066b2781b604c9736d8b3d3f4ed26 | 3,342 | py | Python | components/PyTorch/pytorch-kfp-components/setup.py | nostro-im/pipelines | 39f5b6b74040abbf4b764cbd5b422d7548723d9e | [
"Apache-2.0"
] | 2,860 | 2018-05-24T04:55:01.000Z | 2022-03-31T13:49:56.000Z | components/PyTorch/pytorch-kfp-components/setup.py | nostro-im/pipelines | 39f5b6b74040abbf4b764cbd5b422d7548723d9e | [
"Apache-2.0"
] | 7,331 | 2018-05-16T09:03:26.000Z | 2022-03-31T23:22:04.000Z | components/PyTorch/pytorch-kfp-components/setup.py | nostro-im/pipelines | 39f5b6b74040abbf4b764cbd5b422d7548723d9e | [
"Apache-2.0"
] | 1,359 | 2018-05-15T11:05:41.000Z | 2022-03-31T09:42:09.000Z | #!/usr/bin/env/python3
#
# Copyright (c) Facebook, Inc. and its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setup script."""
import importlib
import os
import types
from setuptools import setup, find_packages
if __name__ == "__main__":
relative_directory = os.path.relpath(
os.path.dirname(os.path.abspath(__file__)))
version = detect_version(relative_directory)
setup(
name="pytorch-kfp-components",
version=version,
description="PyTorch Kubeflow Pipeline",
url="https://github.com/kubeflow/pipelines/tree/master/components/PyTorch/pytorch-kfp-components/",
author="The PyTorch Kubeflow Pipeline Components authors",
author_email="pytorch-kfp-components@fb.com",
license="Apache License 2.0",
extra_requires={"tests": make_required_test_packages()},
include_package_data=True,
python_requires=">=3.6",
install_requires=make_required_install_packages(),
dependency_links=make_dependency_links(),
keywords=[
"Kubeflow Pipelines",
"KFP",
"ML workflow",
"PyTorch",
],
classifiers=[
"Development Status :: 4 - Beta",
"Operating System :: Unix",
"Operating System :: MacOS",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3 :: Only",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Software Development",
"Topic :: Software Development :: Libraries",
"Topic :: Software Development :: Libraries :: Python Modules",
],
package_dir={
"pytorch_kfp_components":
os.path.join(relative_directory, "pytorch_kfp_components")
},
packages=find_packages(where=relative_directory),
)
| 31.528302 | 107 | 0.635548 |
a15d6cd6a92c370d9583f2a5012f9737df67a02a | 10,453 | py | Python | generate_pipelines.py | phorne-uncharted/d3m-primitives | 77d900b9dd6ab4b2b330f4e969dabcdc419c73e1 | [
"MIT"
] | null | null | null | generate_pipelines.py | phorne-uncharted/d3m-primitives | 77d900b9dd6ab4b2b330f4e969dabcdc419c73e1 | [
"MIT"
] | null | null | null | generate_pipelines.py | phorne-uncharted/d3m-primitives | 77d900b9dd6ab4b2b330f4e969dabcdc419c73e1 | [
"MIT"
] | null | null | null | """
Utility to get generate all submission pipelines for all primitives.
This script assumes that `generate_annotations.py` has already been run.
"""
import os
import subprocess
import shutil
import fire
from kf_d3m_primitives.data_preprocessing.data_cleaning.data_cleaning_pipeline import DataCleaningPipeline
from kf_d3m_primitives.data_preprocessing.text_summarization.duke_pipeline import DukePipeline
from kf_d3m_primitives.data_preprocessing.geocoding_forward.goat_forward_pipeline import GoatForwardPipeline
from kf_d3m_primitives.data_preprocessing.geocoding_reverse.goat_reverse_pipeline import GoatReversePipeline
from kf_d3m_primitives.data_preprocessing.data_typing.simon_pipeline import SimonPipeline
from kf_d3m_primitives.clustering.spectral_clustering.spectral_clustering_pipeline import SpectralClusteringPipeline
from kf_d3m_primitives.clustering.k_means.storc_pipeline import StorcPipeline
from kf_d3m_primitives.clustering.hdbscan.hdbscan_pipeline import HdbscanPipeline
from kf_d3m_primitives.dimensionality_reduction.tsne.tsne_pipeline import TsnePipeline
from kf_d3m_primitives.feature_selection.pca_features.pca_features_pipeline import PcaFeaturesPipeline
from kf_d3m_primitives.feature_selection.rf_features.rf_features_pipeline import RfFeaturesPipeline
from kf_d3m_primitives.natural_language_processing.sent2vec.sent2vec_pipeline import Sent2VecPipeline
from kf_d3m_primitives.object_detection.retinanet.object_detection_retinanet_pipeline import ObjectDetectionRNPipeline
from kf_d3m_primitives.image_classification.imagenet_transfer_learning.gator_pipeline import GatorPipeline
from kf_d3m_primitives.ts_classification.knn.kanine_pipeline import KaninePipeline
from kf_d3m_primitives.ts_classification.lstm_fcn.lstm_fcn_pipeline import LstmFcnPipeline
from kf_d3m_primitives.ts_forecasting.vector_autoregression.var_pipeline import VarPipeline
from kf_d3m_primitives.ts_forecasting.deep_ar.deepar_pipeline import DeepARPipeline
from kf_d3m_primitives.ts_forecasting.nbeats.nbeats_pipeline import NBEATSPipeline
from kf_d3m_primitives.remote_sensing.classifier.mlp_classifier_pipeline import MlpClassifierPipeline
if __name__ == '__main__':
fire.Fire(generate_pipelines) | 50.990244 | 118 | 0.672534 |
a16015f7fdd109191a18e2ce3c5cc5cd31b338c6 | 210 | py | Python | gorynych/ontologies/gch/edges/basic/__init__.py | vurmux/gorynych | d721e8cdb61f7c7ee6bc4bd31026605df15f2d9d | [
"Apache-2.0"
] | null | null | null | gorynych/ontologies/gch/edges/basic/__init__.py | vurmux/gorynych | d721e8cdb61f7c7ee6bc4bd31026605df15f2d9d | [
"Apache-2.0"
] | null | null | null | gorynych/ontologies/gch/edges/basic/__init__.py | vurmux/gorynych | d721e8cdb61f7c7ee6bc4bd31026605df15f2d9d | [
"Apache-2.0"
] | null | null | null | __all__ = [
"aggregation",
"association",
"composition",
"connection",
"containment",
"dependency",
"includes",
"membership",
"ownership",
"responsibility",
"usage"
] | 16.153846 | 21 | 0.557143 |
a162116929e58d2ceb5db3d4712dce3ef830f40a | 3,851 | py | Python | square.py | chriswilson1982/black-and-white | e275e6f534aa51f12f4545730b627ce280aae8c3 | [
"MIT"
] | null | null | null | square.py | chriswilson1982/black-and-white | e275e6f534aa51f12f4545730b627ce280aae8c3 | [
"MIT"
] | null | null | null | square.py | chriswilson1982/black-and-white | e275e6f534aa51f12f4545730b627ce280aae8c3 | [
"MIT"
] | 2 | 2020-06-05T04:37:08.000Z | 2020-09-30T06:15:22.000Z | # coding: utf-8
"""Square module.
Represents the squares on the game grid.
"""
from scene import *
from common import *
import sound
| 33.780702 | 243 | 0.578811 |
a1635f7424a1cd00dce9eb1d4e2acface083e3bd | 1,128 | py | Python | coocurrence_loader.py | miselico/KGlove | 2bcbce3d14ed5173a319d80bfff95be6486b41e2 | [
"MIT"
] | 2 | 2021-11-05T09:27:57.000Z | 2022-02-25T12:33:14.000Z | coocurrence_loader.py | miselico/KGlove | 2bcbce3d14ed5173a319d80bfff95be6486b41e2 | [
"MIT"
] | null | null | null | coocurrence_loader.py | miselico/KGlove | 2bcbce3d14ed5173a319d80bfff95be6486b41e2 | [
"MIT"
] | 1 | 2022-02-25T12:37:47.000Z | 2022-02-25T12:37:47.000Z | import pathlib
from struct import unpack
from typing import BinaryIO, List, Optional, Tuple, cast
import numpy as np
import scipy.sparse
if __name__ == "__main__":
p = pathlib.Path("output/cooccurrence_file.bin")
with open(p, 'rb') as file:
m = load(file)
print(m.tocsc())
| 28.2 | 77 | 0.639184 |
a163e601ea9b0587f0a7996da2ea54d7b047cc87 | 597 | py | Python | api_app/migrations/0001_initial.py | DurkinDevelopment/coinbase_api | 0cea72234d481d09ff906f7bc064cfe16111c785 | [
"MIT"
] | null | null | null | api_app/migrations/0001_initial.py | DurkinDevelopment/coinbase_api | 0cea72234d481d09ff906f7bc064cfe16111c785 | [
"MIT"
] | null | null | null | api_app/migrations/0001_initial.py | DurkinDevelopment/coinbase_api | 0cea72234d481d09ff906f7bc064cfe16111c785 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.12 on 2022-02-15 02:57
from django.db import migrations, models
| 24.875 | 117 | 0.562814 |
a163f9dace925925161f417c4fc2f6f13d99f9d2 | 924 | py | Python | Kalender/views.py | RamonvdW/nhb-apps | 5a9f840bfe066cd964174515c06b806a7b170c69 | [
"BSD-3-Clause-Clear"
] | 1 | 2021-12-22T13:11:12.000Z | 2021-12-22T13:11:12.000Z | Kalender/views.py | RamonvdW/nhb-apps | 5a9f840bfe066cd964174515c06b806a7b170c69 | [
"BSD-3-Clause-Clear"
] | 9 | 2020-10-28T07:07:05.000Z | 2021-06-28T20:05:37.000Z | Kalender/views.py | RamonvdW/nhb-apps | 5a9f840bfe066cd964174515c06b806a7b170c69 | [
"BSD-3-Clause-Clear"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2021 Ramon van der Winkel.
# All rights reserved.
# Licensed under BSD-3-Clause-Clear. See LICENSE file for details.
from django.views.generic import View
from django.urls import reverse
from django.http import HttpResponseRedirect
from Functie.rol import Rollen, rol_get_huidige
from .view_maand import get_url_huidige_maand
# end of file
| 26.4 | 79 | 0.683983 |
a166142b9f7a87deb268c549d8183c79b3298038 | 9,511 | py | Python | profile.py | giswqs/Depression-filling-1D | 3c0ed86bbbe6f0b8573212a3efd59375dc7be45e | [
"MIT"
] | 1 | 2022-02-27T14:40:00.000Z | 2022-02-27T14:40:00.000Z | profile.py | giswqs/Depression-filling-1D | 3c0ed86bbbe6f0b8573212a3efd59375dc7be45e | [
"MIT"
] | null | null | null | profile.py | giswqs/Depression-filling-1D | 3c0ed86bbbe6f0b8573212a3efd59375dc7be45e | [
"MIT"
] | null | null | null | import os
import numpy as np
# class for depression
# read profile values from CSV
def read_csv(in_csv, header = True, col_index = 1):
with open(in_csv) as f:
lines = f.readlines()
if header:
lines = lines[1:]
values = []
for line in lines:
line = line.strip()
value = line.split(",")[col_index - 1]
values.append(float(value))
return values
def write_csv(in_csv, out_csv, col_name, in_values):
with open(in_csv) as f:
lines = f.readlines()
header = lines[0].strip() + "," + col_name + '\n'
lines.pop(0)
out_lines = []
for index, line in enumerate(lines):
line = line.strip()
line = line + ',' + str(in_values[index]) +'\n'
out_lines.append(line)
with open(out_csv, 'w') as ff:
ff.write(header)
ff.writelines(out_lines)
# check the depression type of a point based on its neighbors
# find forward ascending neighbors
# find forward descending neighbors
# find backward descending neighbors
# find all points associated with a depression based on one point
# remove acending edge and descending edge
# get depression width, height, and area
# find all depressions recursively
if __name__ == '__main__':
# ************************ change the following parameters if needed ******************************** #
width = 0
height = 0
area = 0
work_dir = os.path.dirname(__file__)
in_csv = os.path.join(work_dir, 'data/profile1.csv')
out_csv = in_csv.replace('.csv', '_level1.csv')
values = read_csv(in_csv, header=True, col_index=4)
size = len(values)
print("Total number of rows: {}".format(size))
dep_type = check_dep_type(values, 557)
# print(dep_type)
dep_pts = find_single_depression(values, index = 1087)
# print(dep_pts)
dep_list = find_depressions(values, in_width = 3, in_depth = 0)
out_values = fill_depressions(values, dep_list)
# print(out_values)
write_csv(in_csv, out_csv, "LEVEL-1", out_values)
# print(get_width_depth_area(values, dep_pts))
# ************************************************************************************************** # | 33.255245 | 133 | 0.588161 |
a166258a27d4639c261790d1e5d9c74ab19c0e5f | 4,544 | py | Python | data/make_joint_comp_inc_data.py | gcunhase/StackedDeBERT | 82777114fd99cafc6e2a3d760e774f007c563245 | [
"MIT"
] | 32 | 2020-01-03T09:53:03.000Z | 2021-09-07T07:23:26.000Z | data/make_joint_comp_inc_data.py | gcunhase/StackedDeBERT | 82777114fd99cafc6e2a3d760e774f007c563245 | [
"MIT"
] | null | null | null | data/make_joint_comp_inc_data.py | gcunhase/StackedDeBERT | 82777114fd99cafc6e2a3d760e774f007c563245 | [
"MIT"
] | 6 | 2020-01-21T06:50:21.000Z | 2021-01-22T08:04:00.000Z | import argparse
import os
import csv
import random
from utils import ensure_dir, get_project_path
from collections import defaultdict
# POS-tag for irrelevant tag selection
import nltk
nltk.download('punkt')
nltk.download('averaged_perceptron_tagger')
__author__ = "Gwena Cunha"
def make_dataset(root_data_dir, complete_data_dir, incomplete_data_dir, results_dir):
"""
:param root_data_dir: directory to save data
:param complete_data_dir: subdirectory with complete data
:param incomplete_data_dir: subdirectory with incomplete data
:param results_dir: subdirectory with incomplete data
:return:
"""
print("Making incomplete intention classification dataset...")
complete_data_dir_path = root_data_dir + '/' + complete_data_dir
incomplete_data_dir_path = root_data_dir + '/' + incomplete_data_dir
results_dir_path = root_data_dir + '/' + results_dir
ensure_dir(results_dir_path)
# Traverse all sub-directories
files_dictionary = defaultdict(lambda: [])
for sub_dir in os.walk(complete_data_dir_path):
if len(sub_dir[1]) == 0:
data_name = sub_dir[0].split('/')[-1]
files_dictionary[data_name] = sub_dir[2]
# Open train and test tsv files
for k, v in files_dictionary.items():
save_path = results_dir_path + '/' + k
ensure_dir(save_path)
for comp_v_i, inc_v_i in zip(['test.tsv', 'train.tsv'], ['test_withMissingWords.tsv', 'train_withMissingWords.tsv']):
complete_tsv_file = open(complete_data_dir_path + '/' + k + '/' + comp_v_i, 'r')
incomplete_tsv_file = open(incomplete_data_dir_path + '/' + k + '/' + inc_v_i, 'r')
reader_complete = csv.reader(complete_tsv_file, delimiter='\t')
reader_incomplete = csv.reader(incomplete_tsv_file, delimiter='\t')
sentences, labels, missing_words_arr, targets = [], [], [], []
row_count = 0
for row_comp, row_inc in zip(reader_complete, reader_incomplete):
if row_count != 0:
# Incomplete
sentences.append(row_inc[0])
labels.append(row_inc[1])
missing_words_arr.append(row_inc[2])
targets.append(row_comp[0])
if 'train' in comp_v_i:
# Complete
sentences.append(row_comp[0])
labels.append(row_comp[1])
missing_words_arr.append('')
targets.append(row_comp[0])
row_count += 1
# Shuffle
if 'train' in comp_v_i:
c = list(zip(sentences, labels, missing_words_arr, targets))
random.shuffle(c)
sentences, labels, missing_words_arr, targets = zip(*c)
# Save train, test, val in files in the format (sentence, label)
keys = ['sentence', 'label', 'missing', 'target']
data_dict = {'sentence': sentences, 'label': labels, 'missing': missing_words_arr, 'target': targets}
write_tsv(save_path, comp_v_i, keys, data_dict)
print("Complete + Incomplete intention classification dataset completed")
if __name__ == '__main__':
args = init_args()
make_dataset(args.root_data_dir, args.complete_data_dir, args.incomplete_data_dir, args.results_dir)
| 42.867925 | 125 | 0.645026 |
a16669ec079300a0633ffd694b38772760885089 | 4,989 | py | Python | recipes/models.py | JakubKoralewski/django-recipes | 3794c6a96fb0765e2e3cebfc3968dae88e4f084c | [
"MIT"
] | null | null | null | recipes/models.py | JakubKoralewski/django-recipes | 3794c6a96fb0765e2e3cebfc3968dae88e4f084c | [
"MIT"
] | 5 | 2021-03-19T03:49:52.000Z | 2021-06-10T19:16:05.000Z | recipes/models.py | JakubKoralewski/django-recipes | 3794c6a96fb0765e2e3cebfc3968dae88e4f084c | [
"MIT"
] | null | null | null | from typing import List, Dict, Union
from django.db import models
# Create your models here.
# https://en.wikipedia.org/wiki/Cooking_weights_and_measures
| 28.83815 | 100 | 0.71798 |
a166f12db4d713441e75c22cdaa77f074c8a2431 | 835 | py | Python | zoneh/conf.py | RaminAT/zoneh | 73c8e66d76cbd0aa51551e21740d88ff439158a9 | [
"MIT"
] | 8 | 2019-05-27T07:21:51.000Z | 2021-09-14T21:26:53.000Z | zoneh/conf.py | RaminAT/zoneh | 73c8e66d76cbd0aa51551e21740d88ff439158a9 | [
"MIT"
] | 5 | 2020-04-08T12:10:44.000Z | 2021-02-11T01:51:41.000Z | zoneh/conf.py | RaminAT/zoneh | 73c8e66d76cbd0aa51551e21740d88ff439158a9 | [
"MIT"
] | 5 | 2020-03-29T17:04:05.000Z | 2021-09-14T21:26:58.000Z | """Config module."""
import json
import logging
import os
from zoneh.exceptions import ConfigError
_log = logging.getLogger(__name__)
_CONFIG_FILE = 'config.json'
def _load_config():
"""Load telegram and filters configuration from config file."""
if not os.path.isfile(_CONFIG_FILE):
err_msg = f'Cannot find {_CONFIG_FILE} configuration file'
_log.error(err_msg)
raise ConfigError(err_msg)
with open(_CONFIG_FILE, 'r') as fd:
config = fd.read()
try:
config = json.loads(config)
except json.decoder.JSONDecodeError:
err_msg = f'Malformed JSON in {_CONFIG_FILE} configuration file'
_log.error(err_msg)
raise ConfigError(err_msg)
return config
_CONF = _load_config()
def get_config():
"""Return config as singleton."""
return _CONF
| 22.567568 | 72 | 0.68024 |
a1676b1833d7b48b6064b056da63a6fba24af86a | 3,629 | py | Python | mlogger.py | morris178/mqtt-data-logger | 75e0fbbe0311ecaba8c905df356d6f7d8a0e3615 | [
"MIT"
] | null | null | null | mlogger.py | morris178/mqtt-data-logger | 75e0fbbe0311ecaba8c905df356d6f7d8a0e3615 | [
"MIT"
] | null | null | null | mlogger.py | morris178/mqtt-data-logger | 75e0fbbe0311ecaba8c905df356d6f7d8a0e3615 | [
"MIT"
] | null | null | null | ###demo code provided by Steve Cope at www.steves-internet-guide.com
##email steve@steves-internet-guide.com
###Free to use for any purpose
"""
implements data logging class
"""
import time, os, json, logging
###############
| 36.656566 | 76 | 0.572058 |
a16793db9e30c478f5f315f915ced2b2053b7849 | 6,299 | py | Python | ptools/lipytools/little_methods.py | piteren/ptools_module | 5117d06d7dea4716b573b93d5feb10137966c373 | [
"MIT"
] | null | null | null | ptools/lipytools/little_methods.py | piteren/ptools_module | 5117d06d7dea4716b573b93d5feb10137966c373 | [
"MIT"
] | null | null | null | ptools/lipytools/little_methods.py | piteren/ptools_module | 5117d06d7dea4716b573b93d5feb10137966c373 | [
"MIT"
] | null | null | null | """
2018 (c) piteren
some little methods (but frequently used) for Python
"""
from collections import OrderedDict
import csv
import inspect
import json
import os
import pickle
import random
import shutil
import string
import time
from typing import List, Callable, Any, Optional
# prepares function parameters dictionary
# short(compressed) scientific notation for floats
# returns sting from float, always of given width
# *********************************************************************************************** file readers / writers
# ********************************************* for raise_exception=False each reader will return None if file not found
# returns timestamp string
# returns nice string of given list
# prints nested dict
# prepares folder, creates or flushes
# random <0;1> probability function
# terminal progress bar | 30.877451 | 120 | 0.582156 |
a16884524638226d0ba06be614706d7a5f91b5dc | 2,135 | py | Python | tests/test.py | zephenryus/botw-grass | 31adaebd69b56c4177bcdaf8e933fee5e8bc8433 | [
"MIT"
] | 1 | 2020-10-11T07:07:31.000Z | 2020-10-11T07:07:31.000Z | tests/test.py | zephenryus/botw-grass | 31adaebd69b56c4177bcdaf8e933fee5e8bc8433 | [
"MIT"
] | null | null | null | tests/test.py | zephenryus/botw-grass | 31adaebd69b56c4177bcdaf8e933fee5e8bc8433 | [
"MIT"
] | 1 | 2020-10-11T07:07:33.000Z | 2020-10-11T07:07:33.000Z | import filecmp
import hashlib
import json
import grass
def grass_to_json():
"""
Tests reading of grass file and exports data as a json file
"""
data = grass.read_grass("assets/5000000000.grass.extm")
print("Saving file output/5000000000.grass.extm.json...")
with open("output/5000000000.grass.extm.json", "w+") as outfile:
out_obj = []
for entry in data:
out_obj.append(entry.__dict__)
outfile.write(json.dumps(out_obj, indent=4, separators=(',', ': ')))
def grass_to_binary_string():
"""
Tests that data is recompiled correctly and matches the original file
"""
data = grass.read_grass("assets/5000000000.grass.extm")
binary_data = grass.compile_grass(data)
hash_md5 = hashlib.md5()
with open("assets/5000000000.grass.extm", "rb") as infile:
for chunk in iter(lambda: infile.read(4096), b""):
hash_md5.update(chunk)
file_hash = hash_md5.hexdigest()
hash_md5 = hashlib.md5()
pos = 0
for chunk in iter(lambda: binary_data[pos:pos + 4096], b""):
pos += 4096
hash_md5.update(chunk)
string_hash = hash_md5.hexdigest()
print("The file and binary string are the same: {0}".format(file_hash == string_hash))
def grass_to_binary_file():
"""
Tests reading data from grass file then writes the same data back as a binary
"""
data = grass.read_grass("assets/5000000000.grass.extm")
grass.write_grass(data, "output/5000000000.grass.extm")
print("The files are the same: {0}".format(
filecmp.cmp("assets/5000000000.grass.extm", "output/5000000000.grass.extm")))
def grass_to_image():
"""
Tests reading data from grass file then generating height and color map images
"""
data = grass.read_grass("assets/5000000000.grass.extm")
grass.generate_height_map(data, 'output/5000000000.grass.extm.height.tiff')
grass.generate_color_map(data, 'output/5000000000.grass.extm.color.tiff')
if __name__ == "__main__":
main()
| 28.092105 | 90 | 0.6726 |
a16aa7de0e511402c80303f34d1d2b678e7b0256 | 446 | py | Python | tests/LayoutTest.py | lakhman/restructuredWeb | a8aff9f96c63415fdefe6832f923a6d395d4ebdd | [
"MIT"
] | 2 | 2021-05-19T15:43:26.000Z | 2021-05-19T16:07:00.000Z | tests/LayoutTest.py | lakhman/restructuredWeb | a8aff9f96c63415fdefe6832f923a6d395d4ebdd | [
"MIT"
] | null | null | null | tests/LayoutTest.py | lakhman/restructuredWeb | a8aff9f96c63415fdefe6832f923a6d395d4ebdd | [
"MIT"
] | 1 | 2021-05-19T15:43:44.000Z | 2021-05-19T15:43:44.000Z | # -*- coding: utf-8 -*-
from .BaseTest import BaseTest
| 26.235294 | 83 | 0.650224 |
a16aadbd9d67147c97cce0ae81ac212da4c01e1c | 2,472 | py | Python | .leetcode/16.3-sum-closest.2.py | KuiyuanFu/PythonLeetCode | 8962df2fa838eb7ae48fa59de272ba55a89756d8 | [
"MIT"
] | null | null | null | .leetcode/16.3-sum-closest.2.py | KuiyuanFu/PythonLeetCode | 8962df2fa838eb7ae48fa59de272ba55a89756d8 | [
"MIT"
] | null | null | null | .leetcode/16.3-sum-closest.2.py | KuiyuanFu/PythonLeetCode | 8962df2fa838eb7ae48fa59de272ba55a89756d8 | [
"MIT"
] | null | null | null | # @lc app=leetcode id=16 lang=python3
#
# [16] 3Sum Closest
#
# https://leetcode.com/problems/3sum-closest/description/
#
# algorithms
# Medium (46.33%)
# Likes: 3080
# Dislikes: 169
# Total Accepted: 570.4K
# Total Submissions: 1.2M
# Testcase Example: '[-1,2,1,-4]\n1'
#
# Given an array nums of n integers and an integer target, find three integers
# in numssuch that the sum is closest totarget. Return the sum of the three
# integers. You may assume that each input would have exactly one solution.
#
#
# Example 1:
#
#
# Input: nums = [-1,2,1,-4], target = 1
# Output: 2
# Explanation: The sum that is closest to the target is 2. (-1 + 2 + 1 =
# 2).
#
#
#
# Constraints:
#
#
# 3 <= nums.length <= 10^3
# -10^3<= nums[i]<= 10^3
# -10^4<= target<= 10^4
#
#
#
# @lc tags=array;two-pointers
# @lc imports=start
from imports import *
# @lc imports=end
# @lc idea=start
#
#
#
#
# @lc idea=end
# @lc group=two-pointers
# @lc rank=10
# @lc code=start
# @lc code=end
# @lc main=start
if __name__ == '__main__':
print('Example 1:')
print('Input : ')
print('nums = [-1,2,1,-4], target = 1')
print('Output :')
print(str(Solution().threeSumClosest([-1, 2, 1, -4], 1)))
print('Exception :')
print('2')
print()
pass
# @lc main=end | 22.071429 | 95 | 0.506068 |
a16be12b3f57a68c02b41dfe786a31910f86a92e | 2,142 | py | Python | test/test_functions/test_michalewicz.py | carefree0910/botorch | c0b252baba8f16a4ea2eb3f99c266fba47418b1f | [
"MIT"
] | null | null | null | test/test_functions/test_michalewicz.py | carefree0910/botorch | c0b252baba8f16a4ea2eb3f99c266fba47418b1f | [
"MIT"
] | null | null | null | test/test_functions/test_michalewicz.py | carefree0910/botorch | c0b252baba8f16a4ea2eb3f99c266fba47418b1f | [
"MIT"
] | 1 | 2019-05-07T23:53:08.000Z | 2019-05-07T23:53:08.000Z | #! /usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import unittest
import torch
from botorch.test_functions.michalewicz import (
GLOBAL_MAXIMIZER,
GLOBAL_MAXIMUM,
neg_michalewicz,
)
| 38.25 | 80 | 0.661531 |
a16cdf1f2057c870dd857dd5ffd7ccfb349decff | 1,122 | py | Python | example_scripts/write_mztab_result.py | gavswe/pyqms | 299cd4d96b78611ebbe43e0ac625909c6a8d8fd9 | [
"MIT"
] | 23 | 2017-06-28T07:53:42.000Z | 2022-02-20T02:46:37.000Z | example_scripts/write_mztab_result.py | gavswe/pyqms | 299cd4d96b78611ebbe43e0ac625909c6a8d8fd9 | [
"MIT"
] | 23 | 2019-05-15T18:05:18.000Z | 2022-01-21T13:27:11.000Z | example_scripts/write_mztab_result.py | gavswe/pyqms | 299cd4d96b78611ebbe43e0ac625909c6a8d8fd9 | [
"MIT"
] | 11 | 2017-06-26T13:22:57.000Z | 2022-03-31T23:35:14.000Z | #!/usr/bin/env python3
# encoding: utf-8
"""
pyQms
-----
Python module for fast and accurate mass spectrometry data quantification
:license: MIT, see LICENSE.txt for more details
Authors:
* Leufken, J.
* Niehues, A.
* Sarin, L.P.
* Hippler, M.
* Leidel, S.A.
* Fufezan, C.
"""
import pickle
import sys
def main(result_pkl=None):
"""
usage:
./write_mztab_results.py <Path2ResultPkl>
Will write all results of a result pkl into a .mztab file. Please refer to
Documentation of :doc:`results` for further information.
Note:
Please note that the ouput in mzTab format is still in beta stage.
Since pyQms is a raw quantification tool, some meta data has to be
passed/set manually by the user.
"""
results_class = pickle.load(open(result_pkl, "rb"))
results_class.write_result_mztab(
output_file_name="{0}_results.mztab".format(result_pkl)
)
if __name__ == "__main__":
if len(sys.argv) < 2:
print(main.__doc__)
else:
main(result_pkl=sys.argv[1])
| 21.169811 | 78 | 0.622995 |
a16d009cfff8e6fc878e82ac94cf0ba2221a05c0 | 5,516 | py | Python | dbservice/dbprovider/MessageDAO.py | artyomche9/digest_bot | 480e9038ac1f42a10a9a333a72b9e38fa9fe8385 | [
"MIT"
] | 11 | 2019-10-25T12:42:03.000Z | 2020-04-03T09:43:49.000Z | dbservice/dbprovider/MessageDAO.py | maybe-hello-world/digestbot | 480e9038ac1f42a10a9a333a72b9e38fa9fe8385 | [
"MIT"
] | 13 | 2020-12-12T12:33:55.000Z | 2021-09-09T15:00:57.000Z | dbservice/dbprovider/MessageDAO.py | artyomche9/digest_bot | 480e9038ac1f42a10a9a333a72b9e38fa9fe8385 | [
"MIT"
] | 5 | 2019-10-06T09:55:24.000Z | 2019-10-21T16:36:56.000Z | from decimal import Decimal
from typing import List, Any
from common.Enums import SortingType
from models import Message
from .engine import db_engine, DBEngine
message_dao = MessageDAO(db_engine)
| 36.773333 | 99 | 0.603336 |
a16f85e6fac2fb3f5423a543ab4b85436a1f1301 | 196 | py | Python | Chapter09/fuzzing.py | firebitsbr/Penetration-Testing-with-Shellcode | 2d756bccace6b727e050b2010ebf23e08d221fdc | [
"MIT"
] | 30 | 2018-05-15T21:45:09.000Z | 2022-03-23T20:04:25.000Z | Chapter09/fuzzing.py | naveenselvan/Penetration-Testing-with-Shellcode | 2d756bccace6b727e050b2010ebf23e08d221fdc | [
"MIT"
] | 1 | 2020-10-19T13:03:32.000Z | 2020-11-24T05:50:17.000Z | Chapter09/fuzzing.py | naveenselvan/Penetration-Testing-with-Shellcode | 2d756bccace6b727e050b2010ebf23e08d221fdc | [
"MIT"
] | 18 | 2018-02-20T21:21:23.000Z | 2022-01-26T04:19:28.000Z | #!/usr/bin/python
import socket
import sys
junk = 'A'*500
s=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
connect = s.connect(('192.168.129.128',21))
s.recv(1024)
s.send('USER '+junk+'\r\n')
| 17.818182 | 50 | 0.69898 |
a17081dce9dfbf674f07300258797fe7e68a0847 | 1,746 | py | Python | 017. Letter Combinations of a Phone Number.py | youhusky/Facebook_Prepare | 4045bcb652537711b3680b2aa17204ae73c6bde8 | [
"MIT"
] | 6 | 2017-10-30T05:35:46.000Z | 2020-12-15T06:51:52.000Z | 017. Letter Combinations of a Phone Number.py | youhusky/Facebook_Prepare | 4045bcb652537711b3680b2aa17204ae73c6bde8 | [
"MIT"
] | 1 | 2017-10-30T04:11:31.000Z | 2017-10-30T05:46:24.000Z | 017. Letter Combinations of a Phone Number.py | youhusky/Facebook_Prepare | 4045bcb652537711b3680b2aa17204ae73c6bde8 | [
"MIT"
] | 2 | 2020-09-03T07:14:02.000Z | 2021-05-21T19:19:57.000Z | # Given a digit string, return all possible letter combinations that the number could represent.
# A mapping of digit to letters (just like on the telephone buttons) is given below.
# Input:Digit string "23"
# Output: ["ad", "ae", "af", "bd", "be", "bf", "cd", "ce", "cf"].
# DFS backtracking
m = Solution2()
print m.letterCombinations('abc')
| 27.28125 | 96 | 0.4874 |
a172ea5b14e8133a222d02986a593e89323cad7c | 847 | py | Python | FreeBSD/bsd_netstats_poller.py | failedrequest/telegraf-plugins | 9cda0612a912f219fa84724f12af1f428483a37a | [
"BSD-2-Clause"
] | null | null | null | FreeBSD/bsd_netstats_poller.py | failedrequest/telegraf-plugins | 9cda0612a912f219fa84724f12af1f428483a37a | [
"BSD-2-Clause"
] | null | null | null | FreeBSD/bsd_netstats_poller.py | failedrequest/telegraf-plugins | 9cda0612a912f219fa84724f12af1f428483a37a | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python3
# 3/21/2021
# Updated for python3
# A Simple sysctl to telegraf plugin for freebsd's netstats ip info
from freebsd_sysctl import Sysctl as sysctl
import subprocess as sp
import re
import json
import sys
import pprint as pp
hostname = sysctl("kern.hostname").value
netstat_data = {}
points_netstat = {}
netstat_output = sp.check_output(["netstat", "-s", "-p", "ip", "--libxo", "json", "/dev/null"],universal_newlines=True)
netstat_data = json.loads(netstat_output)
for x in netstat_data["statistics"]:
for k,v in netstat_data["statistics"][x].items():
points_netstat[k] = v
points_to_influx(points_netstat)
| 22.289474 | 119 | 0.709563 |
a1730ed2d00a6babe52f239de2d480281d939967 | 13,395 | py | Python | ixnetwork_restpy/testplatform/sessions/ixnetwork/topology/packetinlist_10d8adb40e4e05f4b37904f2c6428ca9.py | OpenIxia/ixnetwork_restpy | f628db450573a104f327cf3c737ca25586e067ae | [
"MIT"
] | 20 | 2019-05-07T01:59:14.000Z | 2022-02-11T05:24:47.000Z | ixnetwork_restpy/testplatform/sessions/ixnetwork/topology/packetinlist_10d8adb40e4e05f4b37904f2c6428ca9.py | OpenIxia/ixnetwork_restpy | f628db450573a104f327cf3c737ca25586e067ae | [
"MIT"
] | 60 | 2019-04-03T18:59:35.000Z | 2022-02-22T12:05:05.000Z | ixnetwork_restpy/testplatform/sessions/ixnetwork/topology/packetinlist_10d8adb40e4e05f4b37904f2c6428ca9.py | OpenIxia/ixnetwork_restpy | f628db450573a104f327cf3c737ca25586e067ae | [
"MIT"
] | 13 | 2019-05-20T10:48:31.000Z | 2021-10-06T07:45:44.000Z | # MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
from typing import List, Any, Union
def add(self, Name=None):
# type: (str) -> PacketInList
"""Adds a new packetInList resource on the json, only valid with config assistant
Args
----
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
Returns
-------
- self: This instance with all currently retrieved packetInList resources using find and the newly added packetInList resources available through an iterator or index
Raises
------
- Exception: if this function is not being used with config assistance
"""
return self._add_xpath(self._map_locals(self._SDM_ATT_MAP, locals()))
def find(self, Count=None, DescriptiveName=None, Name=None, SwitchName=None):
# type: (int, str, str, str) -> PacketInList
"""Finds and retrieves packetInList resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve packetInList resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all packetInList resources from the server.
Args
----
- Count (number): Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
- DescriptiveName (str): Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
- SwitchName (str): Parent Switch Name
Returns
-------
- self: This instance with matching packetInList resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of packetInList data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the packetInList resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
def SendPause(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[List[str], None]
"""Executes the sendPause operation on the server.
Pause Sending PacketIn
sendPause(Arg2=list, async_operation=bool)list
----------------------------------------------
- Arg2 (list(number)): List of PacketIn.
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns list(str): ID to associate each async action invocation
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('sendPause', payload=payload, response_object=None)
def SendStart(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[List[str], None]
"""Executes the sendStart operation on the server.
Start Sending PacketIn
sendStart(Arg2=list, async_operation=bool)list
----------------------------------------------
- Arg2 (list(number)): List of PacketIn.
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns list(str): ID to associate each async action invocation
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('sendStart', payload=payload, response_object=None)
def SendStop(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[List[str], None]
"""Executes the sendStop operation on the server.
Stop Sending PacketIn
sendStop(Arg2=list, async_operation=bool)list
---------------------------------------------
- Arg2 (list(number)): List of PacketIn.
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns list(str): ID to associate each async action invocation
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('sendStop', payload=payload, response_object=None)
def get_device_ids(self, PortNames=None, AuxiliaryId=None, FlowTable=None, InPort=None, PacketInName=None, PhysicalInPort=None, SendPacketIn=None):
"""Base class infrastructure that gets a list of packetInList device ids encapsulated by this object.
Use the optional regex parameters in the method to refine the list of device ids encapsulated by this object.
Args
----
- PortNames (str): optional regex of port names
- AuxiliaryId (str): optional regex of auxiliaryId
- FlowTable (str): optional regex of flowTable
- InPort (str): optional regex of inPort
- PacketInName (str): optional regex of packetInName
- PhysicalInPort (str): optional regex of physicalInPort
- SendPacketIn (str): optional regex of sendPacketIn
Returns
-------
- list(int): A list of device ids that meets the regex criteria provided in the method parameters
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._get_ngpf_device_ids(locals())
| 40.468278 | 193 | 0.642255 |
a173546fb4be8c1b52e29b792d62de5b919bbc8f | 97 | py | Python | Python/Phani.py | baroood/Hacktoberfest-2k17 | 87383df4bf705358866a5a4120dd678a3f2acd3e | [
"MIT"
] | 28 | 2017-10-04T19:42:26.000Z | 2021-03-26T04:00:48.000Z | Python/Phani.py | baroood/Hacktoberfest-2k17 | 87383df4bf705358866a5a4120dd678a3f2acd3e | [
"MIT"
] | 375 | 2017-09-28T02:58:37.000Z | 2019-10-31T09:10:38.000Z | Python/Phani.py | baroood/Hacktoberfest-2k17 | 87383df4bf705358866a5a4120dd678a3f2acd3e | [
"MIT"
] | 519 | 2017-09-28T02:40:29.000Z | 2021-02-15T08:29:17.000Z | a = input("Enter the first number")
b = input("Enter the second number")
print('the sum is',a+b)
| 24.25 | 36 | 0.680412 |
a1735e027f0563b68478c5ef69b57c79d02303e9 | 1,108 | py | Python | servicecatalog_factory/constants_test.py | micwha/aws-service-catalog-factory | c50a922d64e3d47fd56dbe261d841d81f872f0fb | [
"Apache-2.0"
] | null | null | null | servicecatalog_factory/constants_test.py | micwha/aws-service-catalog-factory | c50a922d64e3d47fd56dbe261d841d81f872f0fb | [
"Apache-2.0"
] | null | null | null | servicecatalog_factory/constants_test.py | micwha/aws-service-catalog-factory | c50a922d64e3d47fd56dbe261d841d81f872f0fb | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
from pytest import fixture
| 22.612245 | 73 | 0.712094 |
a173f091bd6a84a9640f8e5bfa3ab824665803fb | 1,038 | py | Python | django/contrib/contenttypes/tests/models.py | benjaoming/django | 6dbe979b4d9396e1b307c7d27388c97c13beb21c | [
"BSD-3-Clause"
] | 2 | 2015-01-21T15:45:07.000Z | 2015-02-21T02:38:13.000Z | env/lib/python2.7/site-packages/django/contrib/contenttypes/tests/models.py | luiscarlosgph/nas | e5acee61e8bbf12c34785fe971ce7df8dee775d4 | [
"MIT"
] | 10 | 2019-12-26T17:31:31.000Z | 2022-03-21T22:17:33.000Z | env/lib/python2.7/site-packages/django/contrib/contenttypes/tests/models.py | luiscarlosgph/nas | e5acee61e8bbf12c34785fe971ce7df8dee775d4 | [
"MIT"
] | 1 | 2020-05-25T08:55:19.000Z | 2020-05-25T08:55:19.000Z | from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.http import urlquote
| 23.590909 | 73 | 0.716763 |
a174909b1f9a6d386413fccc83ffd4e52629d864 | 75,049 | py | Python | tests/unit/utils/test_docker.py | springborland/salt | bee85e477d57e9a171884e54fefb9a59d0835ed0 | [
"Apache-2.0"
] | 1 | 2020-04-09T03:25:10.000Z | 2020-04-09T03:25:10.000Z | tests/unit/utils/test_docker.py | springborland/salt | bee85e477d57e9a171884e54fefb9a59d0835ed0 | [
"Apache-2.0"
] | null | null | null | tests/unit/utils/test_docker.py | springborland/salt | bee85e477d57e9a171884e54fefb9a59d0835ed0 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
tests.unit.utils.test_docker
============================
Test the funcs in salt.utils.docker and salt.utils.docker.translate
"""
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import copy
import functools
import logging
import os
# Import salt libs
import salt.config
import salt.loader
import salt.utils.docker.translate.container
import salt.utils.docker.translate.network
import salt.utils.platform
from salt.exceptions import CommandExecutionError
# Import 3rd-party libs
from salt.ext import six
from salt.utils.docker.translate import helpers as translate_helpers
# Import Salt Testing Libs
from tests.support.unit import TestCase
log = logging.getLogger(__name__)
def test_blkio_weight_device(self):
"""
Should translate a list of PATH:WEIGHT pairs to a list of dictionaries
with the following format: {'Path': PATH, 'Weight': WEIGHT}
"""
for val in ("/dev/sda:100,/dev/sdb:200", ["/dev/sda:100", "/dev/sdb:200"]):
self.assertEqual(
salt.utils.docker.translate_input(
self.translator, blkio_weight_device="/dev/sda:100,/dev/sdb:200"
),
{
"blkio_weight_device": [
{"Path": "/dev/sda", "Weight": 100},
{"Path": "/dev/sdb", "Weight": 200},
]
},
)
# Error cases
with self.assertRaisesRegex(
CommandExecutionError, r"'foo' contains 1 value\(s\) \(expected 2\)"
):
salt.utils.docker.translate_input(
self.translator, blkio_weight_device="foo"
)
with self.assertRaisesRegex(
CommandExecutionError, r"'foo:bar:baz' contains 3 value\(s\) \(expected 2\)"
):
salt.utils.docker.translate_input(
self.translator, blkio_weight_device="foo:bar:baz"
)
with self.assertRaisesRegex(
CommandExecutionError, r"Weight 'foo' for path '/dev/sdb' is not an integer"
):
salt.utils.docker.translate_input(
self.translator, blkio_weight_device=["/dev/sda:100", "/dev/sdb:foo"]
)
def test_dns(self):
"""
While this is a stringlist, it also supports IP address validation, so
it can't use the test_stringlist decorator because we need to test both
with and without validation, and it isn't necessary to make all other
stringlist tests also do that same kind of testing.
"""
for val in ("8.8.8.8,8.8.4.4", ["8.8.8.8", "8.8.4.4"]):
self.assertEqual(
salt.utils.docker.translate_input(
self.translator, dns=val, validate_ip_addrs=True,
),
{"dns": ["8.8.8.8", "8.8.4.4"]},
)
# Error case: invalid IP address caught by validation
for val in ("8.8.8.888,8.8.4.4", ["8.8.8.888", "8.8.4.4"]):
with self.assertRaisesRegex(
CommandExecutionError, r"'8.8.8.888' is not a valid IP address"
):
salt.utils.docker.translate_input(
self.translator, dns=val, validate_ip_addrs=True,
)
# This is not valid input but it will test whether or not IP address
# validation happened.
for val in ("foo,bar", ["foo", "bar"]):
self.assertEqual(
salt.utils.docker.translate_input(
self.translator, dns=val, validate_ip_addrs=False,
),
{"dns": ["foo", "bar"]},
)
def test_extra_hosts(self):
"""
Can be passed as a list of key:value pairs but can't be simply tested
using @assert_key_colon_value since we need to test both with and without
IP address validation.
"""
for val in ("web1:10.9.8.7,web2:10.9.8.8", ["web1:10.9.8.7", "web2:10.9.8.8"]):
self.assertEqual(
salt.utils.docker.translate_input(
self.translator, extra_hosts=val, validate_ip_addrs=True,
),
{"extra_hosts": {"web1": "10.9.8.7", "web2": "10.9.8.8"}},
)
# Error case: invalid IP address caught by validation
for val in (
"web1:10.9.8.299,web2:10.9.8.8",
["web1:10.9.8.299", "web2:10.9.8.8"],
):
with self.assertRaisesRegex(
CommandExecutionError, r"'10.9.8.299' is not a valid IP address"
):
salt.utils.docker.translate_input(
self.translator, extra_hosts=val, validate_ip_addrs=True,
)
# This is not valid input but it will test whether or not IP address
# validation happened.
for val in ("foo:bar,baz:qux", ["foo:bar", "baz:qux"]):
self.assertEqual(
salt.utils.docker.translate_input(
self.translator, extra_hosts=val, validate_ip_addrs=False,
),
{"extra_hosts": {"foo": "bar", "baz": "qux"}},
)
def test_log_config(self):
"""
This is a mixture of log_driver and log_opt, which get combined into a
dictionary.
log_driver is a simple string, but log_opt can be passed in several
ways, so we need to test them all.
"""
expected = (
{"log_config": {"Type": "foo", "Config": {"foo": "bar", "baz": "qux"}}},
{},
[],
)
for val in (
"foo=bar,baz=qux",
["foo=bar", "baz=qux"],
[{"foo": "bar"}, {"baz": "qux"}],
{"foo": "bar", "baz": "qux"},
):
self.assertEqual(
salt.utils.docker.translate_input(
self.translator, log_driver="foo", log_opt="foo=bar,baz=qux"
),
{"log_config": {"Type": "foo", "Config": {"foo": "bar", "baz": "qux"}}},
)
# Ensure passing either `log_driver` or `log_opt` alone works
self.assertEqual(
salt.utils.docker.translate_input(self.translator, log_driver="foo"),
{"log_config": {"Type": "foo", "Config": {}}},
)
self.assertEqual(
salt.utils.docker.translate_input(
self.translator, log_opt={"foo": "bar", "baz": "qux"}
),
{"log_config": {"Type": "none", "Config": {"foo": "bar", "baz": "qux"}}},
)
def test_port_bindings(self):
"""
This has several potential formats and can include port ranges. It
needs its own test.
"""
# ip:hostPort:containerPort - Bind a specific IP and port on the host
# to a specific port within the container.
bindings = (
"10.1.2.3:8080:80,10.1.2.3:8888:80,10.4.5.6:3333:3333,"
"10.7.8.9:14505-14506:4505-4506,10.1.2.3:8080:81/udp,"
"10.1.2.3:8888:81/udp,10.4.5.6:3334:3334/udp,"
"10.7.8.9:15505-15506:5505-5506/udp"
)
for val in (bindings, bindings.split(",")):
self.assertEqual(
self.normalize_ports(
salt.utils.docker.translate_input(
self.translator, port_bindings=val,
)
),
{
"port_bindings": {
80: [("10.1.2.3", 8080), ("10.1.2.3", 8888)],
3333: ("10.4.5.6", 3333),
4505: ("10.7.8.9", 14505),
4506: ("10.7.8.9", 14506),
"81/udp": [("10.1.2.3", 8080), ("10.1.2.3", 8888)],
"3334/udp": ("10.4.5.6", 3334),
"5505/udp": ("10.7.8.9", 15505),
"5506/udp": ("10.7.8.9", 15506),
},
"ports": [
80,
3333,
4505,
4506,
(81, "udp"),
(3334, "udp"),
(5505, "udp"),
(5506, "udp"),
],
},
)
# ip::containerPort - Bind a specific IP and an ephemeral port to a
# specific port within the container.
bindings = (
"10.1.2.3::80,10.1.2.3::80,10.4.5.6::3333,10.7.8.9::4505-4506,"
"10.1.2.3::81/udp,10.1.2.3::81/udp,10.4.5.6::3334/udp,"
"10.7.8.9::5505-5506/udp"
)
for val in (bindings, bindings.split(",")):
self.assertEqual(
self.normalize_ports(
salt.utils.docker.translate_input(
self.translator, port_bindings=val,
)
),
{
"port_bindings": {
80: [("10.1.2.3",), ("10.1.2.3",)],
3333: ("10.4.5.6",),
4505: ("10.7.8.9",),
4506: ("10.7.8.9",),
"81/udp": [("10.1.2.3",), ("10.1.2.3",)],
"3334/udp": ("10.4.5.6",),
"5505/udp": ("10.7.8.9",),
"5506/udp": ("10.7.8.9",),
},
"ports": [
80,
3333,
4505,
4506,
(81, "udp"),
(3334, "udp"),
(5505, "udp"),
(5506, "udp"),
],
},
)
# hostPort:containerPort - Bind a specific port on all of the host's
# interfaces to a specific port within the container.
bindings = (
"8080:80,8888:80,3333:3333,14505-14506:4505-4506,8080:81/udp,"
"8888:81/udp,3334:3334/udp,15505-15506:5505-5506/udp"
)
for val in (bindings, bindings.split(",")):
self.assertEqual(
self.normalize_ports(
salt.utils.docker.translate_input(
self.translator, port_bindings=val,
)
),
{
"port_bindings": {
80: [8080, 8888],
3333: 3333,
4505: 14505,
4506: 14506,
"81/udp": [8080, 8888],
"3334/udp": 3334,
"5505/udp": 15505,
"5506/udp": 15506,
},
"ports": [
80,
3333,
4505,
4506,
(81, "udp"),
(3334, "udp"),
(5505, "udp"),
(5506, "udp"),
],
},
)
# containerPort - Bind an ephemeral port on all of the host's
# interfaces to a specific port within the container.
bindings = "80,3333,4505-4506,81/udp,3334/udp,5505-5506/udp"
for val in (bindings, bindings.split(",")):
self.assertEqual(
self.normalize_ports(
salt.utils.docker.translate_input(
self.translator, port_bindings=val,
)
),
{
"port_bindings": {
80: None,
3333: None,
4505: None,
4506: None,
"81/udp": None,
"3334/udp": None,
"5505/udp": None,
"5506/udp": None,
},
"ports": [
80,
3333,
4505,
4506,
(81, "udp"),
(3334, "udp"),
(5505, "udp"),
(5506, "udp"),
],
},
)
# Test a mixture of different types of input
bindings = (
"10.1.2.3:8080:80,10.4.5.6::3333,14505-14506:4505-4506,"
"9999-10001,10.1.2.3:8080:81/udp,10.4.5.6::3334/udp,"
"15505-15506:5505-5506/udp,19999-20001/udp"
)
for val in (bindings, bindings.split(",")):
self.assertEqual(
self.normalize_ports(
salt.utils.docker.translate_input(
self.translator, port_bindings=val,
)
),
{
"port_bindings": {
80: ("10.1.2.3", 8080),
3333: ("10.4.5.6",),
4505: 14505,
4506: 14506,
9999: None,
10000: None,
10001: None,
"81/udp": ("10.1.2.3", 8080),
"3334/udp": ("10.4.5.6",),
"5505/udp": 15505,
"5506/udp": 15506,
"19999/udp": None,
"20000/udp": None,
"20001/udp": None,
},
"ports": [
80,
3333,
4505,
4506,
9999,
10000,
10001,
(81, "udp"),
(3334, "udp"),
(5505, "udp"),
(5506, "udp"),
(19999, "udp"),
(20000, "udp"),
(20001, "udp"),
],
},
)
# Error case: too many items (max 3)
with self.assertRaisesRegex(
CommandExecutionError,
r"'10.1.2.3:8080:80:123' is an invalid port binding "
r"definition \(at most 3 components are allowed, found 4\)",
):
salt.utils.docker.translate_input(
self.translator, port_bindings="10.1.2.3:8080:80:123"
)
# Error case: port range start is greater than end
for val in (
"10.1.2.3:5555-5554:1111-1112",
"10.1.2.3:1111-1112:5555-5554",
"10.1.2.3::5555-5554",
"5555-5554:1111-1112",
"1111-1112:5555-5554",
"5555-5554",
):
with self.assertRaisesRegex(
CommandExecutionError,
r"Start of port range \(5555\) cannot be greater than end "
r"of port range \(5554\)",
):
salt.utils.docker.translate_input(
self.translator, port_bindings=val,
)
# Error case: non-numeric port range
for val in (
"10.1.2.3:foo:1111-1112",
"10.1.2.3:1111-1112:foo",
"10.1.2.3::foo",
"foo:1111-1112",
"1111-1112:foo",
"foo",
):
with self.assertRaisesRegex(
CommandExecutionError, "'foo' is non-numeric or an invalid port range"
):
salt.utils.docker.translate_input(
self.translator, port_bindings=val,
)
# Error case: misatched port range
for val in ("10.1.2.3:1111-1113:1111-1112", "1111-1113:1111-1112"):
with self.assertRaisesRegex(
CommandExecutionError,
r"Host port range \(1111-1113\) does not have the same "
r"number of ports as the container port range \(1111-1112\)",
):
salt.utils.docker.translate_input(self.translator, port_bindings=val)
for val in ("10.1.2.3:1111-1112:1111-1113", "1111-1112:1111-1113"):
with self.assertRaisesRegex(
CommandExecutionError,
r"Host port range \(1111-1112\) does not have the same "
r"number of ports as the container port range \(1111-1113\)",
):
salt.utils.docker.translate_input(
self.translator, port_bindings=val,
)
# Error case: empty host port or container port
with self.assertRaisesRegex(
CommandExecutionError, "Empty host port in port binding definition ':1111'"
):
salt.utils.docker.translate_input(self.translator, port_bindings=":1111")
with self.assertRaisesRegex(
CommandExecutionError,
"Empty container port in port binding definition '1111:'",
):
salt.utils.docker.translate_input(self.translator, port_bindings="1111:")
with self.assertRaisesRegex(
CommandExecutionError, "Empty port binding definition found"
):
salt.utils.docker.translate_input(self.translator, port_bindings="")
def test_ports(self):
"""
Ports can be passed as a comma-separated or Python list of port
numbers, with '/tcp' being optional for TCP ports. They must ultimately
be a list of port definitions, in which an integer denotes a TCP port,
and a tuple in the format (port_num, 'udp') denotes a UDP port. Also,
the port numbers must end up as integers. None of the decorators will
suffice so this one must be tested specially.
"""
for val in (
"1111,2222/tcp,3333/udp,4505-4506",
[1111, "2222/tcp", "3333/udp", "4505-4506"],
["1111", "2222/tcp", "3333/udp", "4505-4506"],
):
self.assertEqual(
self.normalize_ports(
salt.utils.docker.translate_input(self.translator, ports=val,)
),
{"ports": [1111, 2222, 4505, 4506, (3333, "udp")]},
)
# Error case: non-integer and non/string value
for val in (1.0, [1.0]):
with self.assertRaisesRegex(
CommandExecutionError, "'1.0' is not a valid port definition"
):
salt.utils.docker.translate_input(
self.translator, ports=val,
)
# Error case: port range start is greater than end
with self.assertRaisesRegex(
CommandExecutionError,
r"Start of port range \(5555\) cannot be greater than end of "
r"port range \(5554\)",
):
salt.utils.docker.translate_input(
self.translator, ports="5555-5554",
)
def test_restart_policy(self):
"""
Input is in the format "name[:retry_count]", but the API wants it
in the format {'Name': name, 'MaximumRetryCount': retry_count}
"""
name = "restart_policy"
alias = "restart"
for item in (name, alias):
# Test with retry count
self.assertEqual(
salt.utils.docker.translate_input(
self.translator, **{item: "on-failure:5"}
),
{name: {"Name": "on-failure", "MaximumRetryCount": 5}},
)
# Test without retry count
self.assertEqual(
salt.utils.docker.translate_input(
self.translator, **{item: "on-failure"}
),
{name: {"Name": "on-failure", "MaximumRetryCount": 0}},
)
# Error case: more than one policy passed
with self.assertRaisesRegex(
CommandExecutionError, "Only one policy is permitted"
):
salt.utils.docker.translate_input(
self.translator, **{item: "on-failure,always"}
)
# Test collision
test_kwargs = {name: "on-failure:5", alias: "always"}
self.assertEqual(
salt.utils.docker.translate_input(
self.translator, ignore_collisions=True, **test_kwargs
),
{name: {"Name": "on-failure", "MaximumRetryCount": 5}},
)
with self.assertRaisesRegex(
CommandExecutionError, "'restart' is an alias for 'restart_policy'"
):
salt.utils.docker.translate_input(
self.translator, ignore_collisions=False, **test_kwargs
)
def test_ulimits(self):
"""
Input is in the format "name=soft_limit[:hard_limit]", but the API
wants it in the format
{'Name': name, 'Soft': soft_limit, 'Hard': hard_limit}
"""
# Test with and without hard limit
ulimits = "nofile=1024:2048,nproc=50"
for val in (ulimits, ulimits.split(",")):
self.assertEqual(
salt.utils.docker.translate_input(self.translator, ulimits=val,),
{
"ulimits": [
{"Name": "nofile", "Soft": 1024, "Hard": 2048},
{"Name": "nproc", "Soft": 50, "Hard": 50},
]
},
)
# Error case: Invalid format
with self.assertRaisesRegex(
CommandExecutionError,
r"Ulimit definition 'nofile:1024:2048' is not in the format "
r"type=soft_limit\[:hard_limit\]",
):
salt.utils.docker.translate_input(
self.translator, ulimits="nofile:1024:2048"
)
# Error case: Invalid format
with self.assertRaisesRegex(
CommandExecutionError,
r"Limit 'nofile=foo:2048' contains non-numeric value\(s\)",
):
salt.utils.docker.translate_input(
self.translator, ulimits="nofile=foo:2048"
)
def test_user(self):
"""
Must be either username (string) or uid (int). An int passed as a
string (e.g. '0') should be converted to an int.
"""
# Username passed as string
self.assertEqual(
salt.utils.docker.translate_input(self.translator, user="foo"),
{"user": "foo"},
)
for val in (0, "0"):
self.assertEqual(
salt.utils.docker.translate_input(self.translator, user=val),
{"user": 0},
)
# Error case: non string/int passed
with self.assertRaisesRegex(
CommandExecutionError, "Value must be a username or uid"
):
salt.utils.docker.translate_input(self.translator, user=["foo"])
# Error case: negative int passed
with self.assertRaisesRegex(CommandExecutionError, "'-1' is an invalid uid"):
salt.utils.docker.translate_input(self.translator, user=-1)
def test_gateway(self):
"""
Must be an IPv4 or IPv6 address
"""
for val in self.ip_addrs[True]:
self.assertEqual(
salt.utils.docker.translate_input(
self.translator, validate_ip_addrs=True, gateway=val,
),
self.apply_defaults({"gateway": val}),
)
for val in self.ip_addrs[False]:
with self.assertRaisesRegex(
CommandExecutionError, "'{0}' is not a valid IP address".format(val)
):
salt.utils.docker.translate_input(
self.translator, validate_ip_addrs=True, gateway=val,
)
self.assertEqual(
salt.utils.docker.translate_input(
self.translator, validate_ip_addrs=False, gateway=val,
),
self.apply_defaults(
{
"gateway": val
if isinstance(val, six.string_types)
else six.text_type(val)
}
),
)
| 37.134587 | 88 | 0.514371 |
a174c86a4c793d497c49fdd9127b5aea515b4346 | 400 | py | Python | utils/middleware.py | DavidRoldan523/elenas_test | 8b520fae68a275654a42ad761713c9c932d17a76 | [
"MIT"
] | null | null | null | utils/middleware.py | DavidRoldan523/elenas_test | 8b520fae68a275654a42ad761713c9c932d17a76 | [
"MIT"
] | null | null | null | utils/middleware.py | DavidRoldan523/elenas_test | 8b520fae68a275654a42ad761713c9c932d17a76 | [
"MIT"
] | null | null | null | from django.http import HttpResponse
from django.utils.deprecation import MiddlewareMixin
| 28.571429 | 57 | 0.7 |
a175602faa0357ee58584137efdc7c85d289bf89 | 3,317 | py | Python | bot/exts/evergreen/catify.py | chincholikarsalil/sir-lancebot | 05ba3de5c99b30a8eba393da1460fae255373457 | [
"MIT"
] | null | null | null | bot/exts/evergreen/catify.py | chincholikarsalil/sir-lancebot | 05ba3de5c99b30a8eba393da1460fae255373457 | [
"MIT"
] | null | null | null | bot/exts/evergreen/catify.py | chincholikarsalil/sir-lancebot | 05ba3de5c99b30a8eba393da1460fae255373457 | [
"MIT"
] | null | null | null | import random
from contextlib import suppress
from typing import Optional
from discord import AllowedMentions, Embed, Forbidden
from discord.ext import commands
from bot.constants import Cats, Colours, NEGATIVE_REPLIES
from bot.utils import helpers
def setup(bot: commands.Bot) -> None:
"""Loads the catify cog."""
bot.add_cog(Catify(bot))
| 37.269663 | 119 | 0.545674 |
a1773cd4561ed64fe6472e04a837e283a5378aa9 | 1,763 | py | Python | data/ebmnlp/stream.py | bepnye/tf_ner | c68b9f489e56e0ec8cfb02b7115d2b07d721ac6f | [
"Apache-2.0"
] | null | null | null | data/ebmnlp/stream.py | bepnye/tf_ner | c68b9f489e56e0ec8cfb02b7115d2b07d721ac6f | [
"Apache-2.0"
] | null | null | null | data/ebmnlp/stream.py | bepnye/tf_ner | c68b9f489e56e0ec8cfb02b7115d2b07d721ac6f | [
"Apache-2.0"
] | null | null | null | import os
import data_utils
from pathlib import Path
top_path = Path(os.path.dirname(os.path.abspath(__file__)))
EBM_NLP = Path('/Users/ben/Desktop/ebm_nlp/repo/ebm_nlp_2_00/')
NO_LABEL = '0'
TRAIN_TAG_D = get_tags(Path('train/'))
TRAIN_PMIDS = sorted(TRAIN_TAG_D.keys())
TRAIN_WORD_D = get_words(TRAIN_PMIDS)
TRAIN_WORDS, TRAIN_TAGS = get_seqs(TRAIN_TAG_D, TRAIN_WORD_D, TRAIN_PMIDS)
TEST_TAG_D = get_tags(Path('test/gold/'))
TEST_PMIDS = sorted(TEST_TAG_D.keys())
TEST_WORD_D = get_words(TEST_PMIDS)
TEST_WORDS, TEST_TAGS = get_seqs(TEST_TAG_D, TEST_WORD_D, TEST_PMIDS)
| 28.435484 | 109 | 0.683494 |
a178917c391e8c7d6cc84a889a8b3efdf43b8cd9 | 16,753 | py | Python | Kernels/Research/FFT/config/fft.py | WoodData/EndpointAI | 8e4d145ff45cf5559ab009eba4f423e944dc6975 | [
"Apache-2.0"
] | 190 | 2020-09-22T02:14:29.000Z | 2022-03-28T02:35:57.000Z | Kernels/Research/FFT/config/fft.py | chuancqc/EndpointAI | ab67cefeae3c06f1c93f66812bcf988c14e72ff1 | [
"Apache-2.0"
] | 2 | 2021-08-30T10:06:22.000Z | 2021-11-05T20:37:58.000Z | Kernels/Research/FFT/config/fft.py | chuancqc/EndpointAI | ab67cefeae3c06f1c93f66812bcf988c14e72ff1 | [
"Apache-2.0"
] | 80 | 2020-09-13T17:48:56.000Z | 2022-03-19T10:45:05.000Z | #
#
# Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved.
#
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the License); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from sympy.ntheory import factorint
import numpy as np
from sympy.combinatorics import Permutation
import io
import math
from config.strtools import *
import itertools
import struct
import config.formats
# Conversion of double to fixed point values
#
# - 8000 gives 8000 in C (int16)
# So when it is multiplied it will give the wrong sign for the result
# of the multiplication except if DSPE instructions with saturation are used
# to compute the negate (and we should get 7FFF).
#
# So for cortex-m without DSP extension, we should try to use 8001
# It is done but not yet tested.
Q7=1
Q15=2
Q31=3
F16=4
F32=5
F64=6
# In the final C++ code, we have a loop for a given radix.
# The input list here has not grouped the factors.
# The list need to be transformed into a list of pair.
# The pair being (radix,exponent)
# Compute the grouped factors for the the FFT length originaln
# where the only possible radix are in primitiveFactors list.
# Apply the radix decomposition to compute the input -> output permutation
# computed by the FFT.
# CFFT Twiddle table
# RFFT twiddle for the merge and split steps.
# Compute the twiddle tables
NB_ELEMS_PER_LINE=3
# Generate C array content for a given datatype
# Print a C array
# Using the type, dpse mode, name
# (dpse mode is for knowing if 0x8000 must be generated as 8000 or 8001
# to avoid sign issues when multiplying with the twiddles)
# Convert a float value to a given datatype.
# Print UINT arrays for permutations.
# Configuration structures for CFFT and RFFT
cfftconfig = """cfftconfig<%s> config%d={
.normalization=%s,
.nbPerms=%s,
.perms=perm%d,
.nbTwiddle=%s,
.twiddle=twiddle%d,
.nbGroupedFactors=%d,
.nbFactors=%d,
.factors=factors%d,
.lengths=lengths%d,
.format=%d,
.reversalVectorizable=%d
};"""
rfftconfig = """rfftconfig<%s> config%d={
.nbTwiddle=%s,
.twiddle=twiddle%d
};"""
fftconfigHeader = """extern %sconfig<%s> config%d;"""
fftFactorArray = """const uint16_t factors%d[%d]=%s;\n"""
fftLengthArray = """const uint16_t lengths%d[%d]=%s;\n"""
# Descriptino of a permutation
class Twiddle:
TwiddleId = 0
def writeTwidHeader(self,h):
ctype=getCtype(self.datatype)
# Twiddle is a complex array so 2*nbSamples must be used
printArrayHeader(h,ctype,"twiddle%d" % self.twiddleID,2*self.nbTwiddles)
def writeTwidCode(self,c):
ctype=getCtype(self.datatype)
printArray(c,ctype,self._mode,"twiddle%d" % self.twiddleID,self.samples)
class Config:
ConfigID = 0
def writeConfigHeader(self,c):
ctype=getCtype(self.twiddle.datatype)
print(fftconfigHeader % (self.transform.lower(),ctype,self.configID),file=c)
def writeConfigCode(self,c):
ctype=getCtype(self.twiddle.datatype)
twiddleLen = "NB_" + ("twiddle%d"% self.twiddle.twiddleID).upper()
if self.transform == "RFFT":
print(rfftconfig % (ctype,self.configID,twiddleLen,self.twiddle.twiddleID),file=c)
else:
normfactor = 1.0 / self.twiddle.nbSamples
normFactorStr = convertToDatatype(normfactor,ctype,self._coreMode)
permsLen = "NB_" + ("perm%d"% self.perms.permID).upper()
outputFormat = 0
#print(self.twiddle.datatype)
#print(self.twiddle.nbSamples)
#print(self.perms.factors)
# For fixed point, each stage will change the output format.
# We need to cmpute the final format of the FFT
# and record it in the initialization structure
# so that the user can easily know how to recover the
# input format (q31, q15). It is encoded as a shift value.
# The shift to apply to recover the input format
# But applying this shift will saturate the result in general.
if self.twiddle.datatype == "q15" or self.twiddle.datatype == "q31":
for f in self.perms.factors:
#print(f,self.twiddle.datatype,self._coreMode)
# The file "formats.py" is decribing the format of each radix
# and is used to compute the format of the FFT based
# on the decomposition of its length.
#
# Currently (since there is no vector version for fixed point)
# this is not taking into account the format change that may
# be implied by the vectorization in case it may be different
# from the scalar version.
formatForSize = config.formats.formats[f][self._coreMode]
outputFormat += formatForSize[self.twiddle.datatype]
vectorizable=0
if self.perms.isVectorizable:
vectorizable = 1
print(cfftconfig % (ctype,self.configID,normFactorStr,permsLen,self.perms.permID,
twiddleLen,self.twiddle.twiddleID,self.perms.nbGroupedFactors,self.perms.nbFactors,
self.perms.permID,self.perms.permID,outputFormat,vectorizable
),file=c)
| 27.463934 | 98 | 0.595595 |
a179d95ca52452ffb3320f8150fc8f1ca9d9de24 | 1,275 | py | Python | classification/resnetOnnx_inference_dynamicInput.py | SahilChachra/Onnx-Deposit | 6cdf03903639166a43e0c809b67621a1aa2449dd | [
"BSD-3-Clause"
] | null | null | null | classification/resnetOnnx_inference_dynamicInput.py | SahilChachra/Onnx-Deposit | 6cdf03903639166a43e0c809b67621a1aa2449dd | [
"BSD-3-Clause"
] | null | null | null | classification/resnetOnnx_inference_dynamicInput.py | SahilChachra/Onnx-Deposit | 6cdf03903639166a43e0c809b67621a1aa2449dd | [
"BSD-3-Clause"
] | null | null | null | '''
This inference script takes in images of dynamic size
Runs inference in batch
** In this images have been resized but not need for this script
'''
import onnx
import onnxruntime as ort
import numpy as np
import cv2
from imagenet_classlist import get_class
import os
model_path = 'resnet18.onnx'
model = onnx.load(model_path)
image_path = "../sample_images"
try:
print("Checking model...")
onnx.checker.check_model(model)
onnx.helper.printable_graph(model.graph)
print("Model checked...")
print("Running inference...")
ort_session = ort.InferenceSession(model_path)
img_list = []
for image in os.listdir(image_path):
img = cv2.imread(os.path.join(image_path, image), cv2.IMREAD_COLOR)
img = cv2.resize(img, ((224, 224)))
img = np.moveaxis(img, -1, 0) # (Batch_size, channels, width, heigth)
img_list.append(img/255.0) # Normalize the image
outputs = ort_session.run(None, {"input":img_list})
out = np.array(outputs)
for image_num, image_name in zip(range(out.shape[1]), os.listdir(image_path)):
index = out[0][image_num]
print("Image : {0}, Class : {1}".format(image_name, get_class(np.argmax(index))))
except Exception as e:
print("Exception occured : ", e) | 28.977273 | 89 | 0.680784 |
a17a4e7f440bd9450eae4bfedcba472184cfe212 | 3,857 | py | Python | demo/Master/TaskMaker.py | build2last/JOCC | 8eedaa923c6444a32e53e03fdd2a85a8031c46f5 | [
"MIT"
] | null | null | null | demo/Master/TaskMaker.py | build2last/JOCC | 8eedaa923c6444a32e53e03fdd2a85a8031c46f5 | [
"MIT"
] | null | null | null | demo/Master/TaskMaker.py | build2last/JOCC | 8eedaa923c6444a32e53e03fdd2a85a8031c46f5 | [
"MIT"
] | null | null | null | # coding:utf-8
import time
import MySQLdb
import conf
import Server
# Another way to load data to MySQL:
# load data infile "C://ProgramData/MySQL/MySQL Server 5.7/Uploads/track_info_url_0_part0.txt" ignore into table develop.task(mid, url);
# doing: load data infile "C://ProgramData/MySQL/MySQL Server 5.7/Uploads/track_info_url_1_part1.txt" ignore into table develop.task(mid, url);
if __name__ == '__main__':
tick = time.time()
main()
tock = time.time()
print("Cost %d s"%(tock - tick)) | 38.959596 | 221 | 0.577392 |
a17ebf74350b134333915aa09bd51888d3742c03 | 770 | py | Python | Inclass/python/sort_order_testing/sort_order.py | chenchuw/EC602-Design-by-Software | c233c9d08a67abc47235282fedd866d67ccaf4ce | [
"MIT"
] | null | null | null | Inclass/python/sort_order_testing/sort_order.py | chenchuw/EC602-Design-by-Software | c233c9d08a67abc47235282fedd866d67ccaf4ce | [
"MIT"
] | null | null | null | Inclass/python/sort_order_testing/sort_order.py | chenchuw/EC602-Design-by-Software | c233c9d08a67abc47235282fedd866d67ccaf4ce | [
"MIT"
] | 1 | 2022-01-11T20:23:47.000Z | 2022-01-11T20:23:47.000Z | #!/Users/francischen/opt/anaconda3/bin/python
#pythons sorts are STABLE: order is the same as original in tie.
# sort: key, reverse
q = ['two','twelve','One','3']
#sort q, result being a modified list. nothing is returned
q.sort()
print(q)
q = ['two','twelve','One','3',"this has lots of t's"]
q.sort(reverse=True)
print(q)
q.sort(key = f)
print(q)
q = ['twelve','two','One','3',"this has lots of t's"]
q.sort(key=f)
print(q)
#Multiple sorts
q = ['twelve','two','One','3',"this has lots of t's"]
q.sort()
q.sort(key=f)
# sort based on 1,2,and then 3
# sort 3, then sort 2, then sort 1
print(q)
q = ['two','otw','wot','Z','t','tt','longer t']
q.sort(key=complicated)
print(q) | 18.333333 | 64 | 0.62987 |
a17ec4639df7fdbb530566bb66941b664210b137 | 96 | py | Python | bhinneka/utils.py | kangfend/scrapy-bhinneka | a4a6e4ae5295e8bf83b213c1dace9c7de70f128c | [
"MIT"
] | 1 | 2016-10-04T10:10:05.000Z | 2016-10-04T10:10:05.000Z | bhinneka/utils.py | kangfend/scrapy-bhinneka | a4a6e4ae5295e8bf83b213c1dace9c7de70f128c | [
"MIT"
] | null | null | null | bhinneka/utils.py | kangfend/scrapy-bhinneka | a4a6e4ae5295e8bf83b213c1dace9c7de70f128c | [
"MIT"
] | null | null | null | from bhinneka.settings import BASE_URL
| 16 | 38 | 0.78125 |
a17ef045f77adc98f9fc666a8b89d72884c7ebf6 | 287 | py | Python | tests/test_vsan/vars.py | wardy3/mdssdk | 393102fab146917a3893b6aa2bd6a0449ad491c5 | [
"Apache-2.0"
] | 4 | 2020-12-13T20:02:43.000Z | 2022-02-27T23:36:58.000Z | tests/test_vsan/vars.py | wardy3/mdssdk | 393102fab146917a3893b6aa2bd6a0449ad491c5 | [
"Apache-2.0"
] | 13 | 2020-09-23T07:30:15.000Z | 2022-03-30T01:12:25.000Z | tests/test_vsan/vars.py | wardy3/mdssdk | 393102fab146917a3893b6aa2bd6a0449ad491c5 | [
"Apache-2.0"
] | 12 | 2020-05-11T09:33:21.000Z | 2022-03-18T11:11:28.000Z | import logging
import random
log = logging.getLogger(__name__)
reserved_id = [4079, 4094]
boundary_id = [0, 4095]
# No need to have end=4094 as there are some inbetween vsans reserved for fport-channel-trunk
| 22.076923 | 93 | 0.756098 |
a17f452cabac62c273c6e040b99703605a01fbfa | 1,403 | py | Python | testing_ideas/try_pymed_package/try_pymed_and_ss_api.py | hschilling/data-collection-and-prep | b70ab54fd887592bad05d5748f492fc2f9ef0f6f | [
"Unlicense"
] | null | null | null | testing_ideas/try_pymed_package/try_pymed_and_ss_api.py | hschilling/data-collection-and-prep | b70ab54fd887592bad05d5748f492fc2f9ef0f6f | [
"Unlicense"
] | 41 | 2021-01-01T14:01:30.000Z | 2021-01-27T20:17:21.000Z | testing_ideas/try_pymed_package/try_pymed_and_ss_api.py | hschilling/data-collection-and-prep | b70ab54fd887592bad05d5748f492fc2f9ef0f6f | [
"Unlicense"
] | 5 | 2021-02-08T14:19:35.000Z | 2021-10-19T12:10:55.000Z | # Use the pymed package to call the PubMed API to get lots of papers from, in this case, JEB
from pymed import PubMed
import pandas as pd
import requests
_REQUESTS_TIMEOUT = 3.0
df_jeb = pd.DataFrame(columns=['title', 'abstract'])
df_jeb = df_jeb.convert_dtypes()
pubmed = PubMed(tool="MyTool", email="my@email.address")
# query = '("The Journal of experimental biology"[Journal]) AND (("2002/01/01"[Date - Publication] : "3000"[Date - Publication]))'
query = '("The Journal of experimental biology"[Journal]) AND (("2002/01/01"[Date - Publication] : "2018/10/10"[Date - Publication]))'
# results = pubmed.query(query, max_results=10000)
results = pubmed.query(query, max_results=100)
for r in results:
doi = "http://dx.doi.org/" + r.doi if r.doi else ''
df_jeb = df_jeb.append(
{'title': r.title,
'abstract': r.abstract,
'doi': doi,
'pmid': f"https://pubmed.ncbi.nlm.nih.gov/{r.pubmed_id}/",
},
ignore_index=True)
ss_api_url = f'https://api.semanticscholar.org/v1/paper/{r.doi}'
response = requests.get(ss_api_url, timeout=_REQUESTS_TIMEOUT)
ss_api_results = response.json()
print('is open access', ss_api_results['is_open_access'])
if r.title.startswith("Bumb"):
print(response)
print('is open access', ss_api_results['is_open_access'])
df_jeb.to_csv("pubmed_titles_abstracts_doi_pmid_100_only.csv")
| 40.085714 | 134 | 0.68211 |
a17f65f1db1e9d6fc0255b219c8e7f7acd085081 | 287 | py | Python | simple_functions/__init__.py | JihaoXin/ci_acse1 | 6ba30368cc2000bb13aab0dc213837d530753612 | [
"MIT"
] | null | null | null | simple_functions/__init__.py | JihaoXin/ci_acse1 | 6ba30368cc2000bb13aab0dc213837d530753612 | [
"MIT"
] | null | null | null | simple_functions/__init__.py | JihaoXin/ci_acse1 | 6ba30368cc2000bb13aab0dc213837d530753612 | [
"MIT"
] | null | null | null | from .functions1 import my_sum, factorial
from .constants import pi
from .print import myprint
from pkg_resources import get_distribution, DistributionNotFound
try:
__version__ = get_distribution(__name__).version
except DistributionNotFound:
# package is not installed
pass
| 28.7 | 64 | 0.811847 |
a17f75ddc89a6583319e9dcd13c17dded131aa22 | 1,259 | bzl | Python | tools/build_defs/native_tools/tool_access.bzl | slsyy/rules_foreign_cc | 34ab7f86a3ab1b2381cb4820d08a1c892f55bf54 | [
"Apache-2.0"
] | null | null | null | tools/build_defs/native_tools/tool_access.bzl | slsyy/rules_foreign_cc | 34ab7f86a3ab1b2381cb4820d08a1c892f55bf54 | [
"Apache-2.0"
] | null | null | null | tools/build_defs/native_tools/tool_access.bzl | slsyy/rules_foreign_cc | 34ab7f86a3ab1b2381cb4820d08a1c892f55bf54 | [
"Apache-2.0"
] | null | null | null | # buildifier: disable=module-docstring
load(":native_tools_toolchain.bzl", "access_tool")
| 38.151515 | 111 | 0.669579 |
a1813bf8f98dea1f19c9411401522d50224116bd | 5,805 | py | Python | tests/test_model.py | jakehadar/py-snake | 3c19d572afb275768c504c66d331b5727515fd71 | [
"MIT"
] | null | null | null | tests/test_model.py | jakehadar/py-snake | 3c19d572afb275768c504c66d331b5727515fd71 | [
"MIT"
] | null | null | null | tests/test_model.py | jakehadar/py-snake | 3c19d572afb275768c504c66d331b5727515fd71 | [
"MIT"
] | 1 | 2021-11-30T10:14:32.000Z | 2021-11-30T10:14:32.000Z | # -*- coding: utf-8 -*-
import sys
import pytest
from snake.common import Frame, Point, BoundaryCollision, SelfCollision
from snake.config import GameConfig
from snake.model import SnakeModel
class TestSnakeModelInitialState:
def test_length(self, model):
assert len(model) == 1
| 28.880597 | 102 | 0.64186 |
a1823c37136cd59bed9a94266ef25fc93fb40d71 | 255 | py | Python | gallery/photo/urls.py | andyjohn23/django-photo | e65ee3ab6fdad3a9d836d32b7f1026efcc728a41 | [
"MIT"
] | null | null | null | gallery/photo/urls.py | andyjohn23/django-photo | e65ee3ab6fdad3a9d836d32b7f1026efcc728a41 | [
"MIT"
] | null | null | null | gallery/photo/urls.py | andyjohn23/django-photo | e65ee3ab6fdad3a9d836d32b7f1026efcc728a41 | [
"MIT"
] | null | null | null | from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name="index"),
path('category/<category>/', views.CategoryListView.as_view(), name="category"),
path('search/', views.image_search, name='image-search'),
] | 31.875 | 84 | 0.686275 |