hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a12a09f22c4f5e88f0c6271dc4b2b3de7f615fa8
| 932
|
py
|
Python
|
server/migrations/versions/4a916694f1ba_add_initial_image_table.py
|
brodigan-e/capstone-POV
|
8ba8bf49e168a1c27a9a252d0f7af375a4e0bb5b
|
[
"MIT"
] | 2
|
2020-10-02T20:49:48.000Z
|
2020-10-06T01:19:13.000Z
|
server/migrations/versions/4a916694f1ba_add_initial_image_table.py
|
brodigan-e/capstone-POV
|
8ba8bf49e168a1c27a9a252d0f7af375a4e0bb5b
|
[
"MIT"
] | 15
|
2020-10-01T05:42:06.000Z
|
2020-12-07T22:48:22.000Z
|
server/migrations/versions/4a916694f1ba_add_initial_image_table.py
|
brodigan-e/capstone-POV
|
8ba8bf49e168a1c27a9a252d0f7af375a4e0bb5b
|
[
"MIT"
] | 1
|
2020-11-12T20:47:57.000Z
|
2020-11-12T20:47:57.000Z
|
"""Add Initial Image Table
Revision ID: 4a916694f1ba
Revises:
Create Date: 2020-10-16 02:24:18.479608
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '4a916694f1ba'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('image_upload',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('title', sa.String(length=128), nullable=False),
sa.Column('path_uuid', sa.String(length=32), nullable=False),
sa.Column('uploadedAt', sa.DateTime(), nullable=False),
sa.Column('isProcessed', sa.Boolean(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('image_upload')
# ### end Alembic commands ###
| 26.628571
| 65
| 0.683476
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 423
| 0.453863
|
a12aedcd932c89aac78464696ed1d71cb2034b31
| 9,969
|
py
|
Python
|
skyoffset/multisimplex.py
|
jonathansick/skyoffset
|
369f54d8a237f48cd56f550e80bf1d39b355bfcd
|
[
"BSD-3-Clause"
] | null | null | null |
skyoffset/multisimplex.py
|
jonathansick/skyoffset
|
369f54d8a237f48cd56f550e80bf1d39b355bfcd
|
[
"BSD-3-Clause"
] | null | null | null |
skyoffset/multisimplex.py
|
jonathansick/skyoffset
|
369f54d8a237f48cd56f550e80bf1d39b355bfcd
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import logging
import platform
import time
import multiprocessing
import numpy
import pymongo
# Pure python/numpy
import simplex
from scalarobj import ScalarObjective
# Cython/numpy
import cyscalarobj
import cysimplex
class MultiStartSimplex(object):
"""Baseclass for multi-start recongerging simplex solvers."""
def __init__(self, dbname, cname, url, port):
#super(MultiStartSimplex, self).__init__()
self.dbname, cname, url, port = dbname, cname, url, port
self.dbname = dbname
self.cname = cname
self.url = url
self.port = port
connection = pymongo.Connection(self.url, self.port)
self.db = connection[self.dbname]
self.collection = self.db[self.cname]
def resetdb(self):
"""Delete existing entries in the mongodb collection for this
multi simplex optimization."""
# Drop the collection, then recreate it
self.db.drop_collection(self.cname)
self.collection = self.db[self.cname]
def _prep_log_file(self):
self.startTime = time.clock() # for timing with close_log_file()
logDir = os.path.dirname(self.logPath)
if os.path.exists(logDir) is False: os.makedirs(logDir)
logging.basicConfig(filename=self.logPath, level=logging.INFO)
logging.info("STARTING NEW SIMPLEX OPTIMIZATION ====================")
hostname = platform.node()
now = time.localtime(time.time())
timeStamp = time.strftime("%y/%m/%d %H:%M:%S %Z", now)
logging.info("MultiStartSimplex started on %s at %s"
% (hostname, timeStamp))
def _close_log_file(self):
endTime = time.clock()
duration = (endTime - self.startTime) / 3600.
logging.info("ENDING SIMPLEX OPTIMIZATION. Duration: %.2f hours"
% duration)
class SimplexScalarOffsetSolver(MultiStartSimplex):
"""Uses a Multi-Start and Reconverging algorithm for converging on the
the set of scalar sky offsets that minimize coupled image differences.
The optimization is persisted in real-time to MongoDB. This means
that multiple computers could be running threads and adding results
to the same pool. While optimization is running, it is possible to
query for the best-to-date offset solution.
"""
def __init__(self, dbname="m31", cname="simplexscalar",
url="localhost", port=27017):
super(SimplexScalarOffsetSolver, self).__init__(dbname,
cname, url, port)
def multi_start(self, couplings, nTrials, logPath, initSigma=6e-10,
restartSigma=1e-11, mp=True, cython=True, log_xtol=-6.,
log_ftol=-5.):
"""Start processing using the Multi-Start Reconverging algorithm.
Parameters
----------
nTrials : int
Number of times a simplex is started.
initSigma : float
Dispersion of offsets
restartSigma : float
Dispersion of offsets about a converged point when making a
restart simplex.
mp : bool
If True, run simplexes in parallel with `multiprocessing`.
cython : bool
True to use the cython version of simplex.
"""
self.logPath = logPath
self._prep_log_file()
self.couplings = couplings
if cython:
self.objf = cyscalarobj.ScalarObjective(self.couplings)
else:
self.objf = ScalarObjective(self.couplings)
ndim = self.objf.get_ndim()
xtol = 10. ** log_xtol # frac error in offsets acceptable for conv
ftol = 10. ** log_ftol # frac error in objective function acceptable
maxiter = 100000 * ndim
maxEvals = 100000 * ndim
simplexArgs = {'xtol': xtol, 'ftol': ftol, 'maxiter': maxiter,
'maxfun': maxEvals, 'full_output': True, 'disp': True,
'retall': False, 'callback': None}
dbArgs = {'dbname': self.dbname, 'cname': self.cname, 'url': self.url,
'port': self.port}
# Create initial simplexes
argsQueue = []
for n in xrange(nTrials):
sim = numpy.zeros([ndim + 1, ndim], dtype=numpy.float64)
for i in xrange(ndim + 1):
sim[i, :] = initSigma * numpy.random.standard_normal(ndim)
args = [sim, cython, self.couplings, simplexArgs, restartSigma,
xtol, n, nTrials, self.logPath, dbArgs]
argsQueue.append(args)
# Run the queue
pool = None
if mp:
pool = multiprocessing.Pool(processes=multiprocessing.cpu_count(),
maxtasksperchild=None)
pool.map(_simplexWorker, argsQueue)
pool.close()
pool.join()
pool.terminate()
else:
map(_simplexWorker, argsQueue)
self._close_log_file()
def find_best_offsets(self):
"""Queries the mongodb collection of simplex runs to find the
optimal result. Returns a dictionary of scalar offsets, keyed
by the field name.
"""
bestEnergy = 1e99 # running tally of best optimization result
bestOffsets = {}
recs = self.collection.find({}, ['best_fopt', 'best_offsets'])
for rec in recs:
if rec['best_fopt'] < bestEnergy:
bestEnergy = rec['best_fopt']
bestOffsets = rec['best_offsets']
# Normalize these offsets so that the net offset is zero
netOffset = 0.
fieldCount = 0
for field, offset in bestOffsets.iteritems():
netOffset += offset
fieldCount += 1
print "Net offset %.2e" % netOffset
netOffset = netOffset / fieldCount
for field, offset in bestOffsets.iteritems():
bestOffsets[field] = offset - netOffset
return bestOffsets
def init_func():
print multiprocessing.current_process().name
def _simplexWorker(argsList):
"""multiprocessing worker function for doing multi-trial simplex solving.
This essentially replaces the multi_start_simplex function in simplex.py
But this exists because it implicitly specifies the target function for the
optimization; multiprocessing can't pickle a function object.
This simplex worker has the ability to restart at the site of convergence
by constructing a simplex that is randomly distributed about the best vertex.
The simplex keeps reconverging from perturbed simplex until the reconverged
minimum matches the previous minimum. That is, I believe I have a global
minimum if the simplex returns to where it started.
"""
startTime = time.clock()
sim, useCython, couplings, kwargs, restartSigma, xTol, n, nTrials, logFilePath, dbArgs = argsList
if useCython:
objf = cyscalarobj.ScalarObjective(couplings)
else:
objf = ScalarObjective(couplings)
# Choose the simplex code
if useCython:
nm_simplex = cysimplex.nm_simplex
else:
nm_simplex = simplex.nm_simplex
#print "Running simplex %i/%i"% (n,nTrials)
Ndim = sim.shape[1]
_evalObjFunc = lambda offsets, objF: objF.compute(offsets)
# These variables keep track of how the code performs
totalFCalls = 0
nRestarts = 0
# Initial simplex compute
_xOpt, _fOpt, _nIters, _nFcalls, _warnflag = nm_simplex(objf,
sim, **kwargs)
bestFOpt = _fOpt
bestXOpt = _xOpt.copy()
totalFCalls += _nFcalls
# These arrays list the running tally of restarts vs best fopt vs total f calls
restartTally = [nRestarts]
bestFOptTally = [bestFOpt]
totalFCallTally = [totalFCalls]
# initiate restarts
while True:
nRestarts += 1
sim = numpy.zeros([Ndim+1, Ndim], dtype=numpy.float64)
sim[0,:] = bestXOpt.copy() # first vertex is the best point
for i in xrange(1,Ndim+1): # rest are randomly distributed.
sim[i,:] = restartSigma*numpy.random.standard_normal(Ndim) + bestXOpt
_xOpt, _fOpt, _nIters, _nFcalls, _warnflag = nm_simplex(objf,
sim, **kwargs)
totalFCalls += _nFcalls
# Ensure that the point has converged
convergenceFrac = (_xOpt - bestXOpt) / bestXOpt
if len(numpy.where(convergenceFrac > xTol)[0]) > 0:
# do another restart of the simplex
if _fOpt < bestFOpt:
# but we did find a new minimum
bestFOpt = _fOpt
bestXOpt = _xOpt.copy()
restartTally.append(nRestarts)
bestFOptTally.append(bestFOpt)
totalFCallTally.append(totalFCalls)
else:
# we're converged
break
# Report this in the log
runtime = time.clock() - startTime
if logFilePath is not None:
logging.basicConfig(filename=logFilePath,level=logging.INFO)
logging.info("%i/%i converged to %.4e in %.2f minutes, %i local restarts" % (n, nTrials, bestFOpt, runtime/60., nRestarts))
# Dictionary stores the history of restarts, as well as teh best solution
# as a field offset dictionary (we're breaking reusability here... just
# to make things faster.)
convergenceHistory = {"total_calls": totalFCalls, "n_restarts": nRestarts,
"runtime": runtime,
"best_offsets": objf.get_best_offsets(),
"best_fopt": bestFOpt,
"restart_hist": restartTally,
"fopt_hist": bestFOptTally,
"fcall_hist": totalFCallTally}
# Connect to MongoDB and add our convergence history!
try:
connection = pymongo.Connection(dbArgs['url'], dbArgs['port'])
db = connection[dbArgs['dbname']]
collection = db[dbArgs['cname']]
collection.insert(convergenceHistory, safe=True)
except pymongo.errors.AutoReconnect:
logging.info("pymongo.errors.AutoReconnect on %i"%n)
# collection.database.connection.disconnect()
| 39.403162
| 131
| 0.634467
| 5,667
| 0.568462
| 0
| 0
| 0
| 0
| 0
| 0
| 3,659
| 0.367038
|
a12b99b03f4c428fc4fbd3c7f3bfcb53005d0cea
| 695
|
py
|
Python
|
netsuitesdk/api/custom_records.py
|
cart-com/netsuite-sdk-py
|
9c759b631f7a194efb86c06e1935cdc2856200d3
|
[
"MIT"
] | null | null | null |
netsuitesdk/api/custom_records.py
|
cart-com/netsuite-sdk-py
|
9c759b631f7a194efb86c06e1935cdc2856200d3
|
[
"MIT"
] | null | null | null |
netsuitesdk/api/custom_records.py
|
cart-com/netsuite-sdk-py
|
9c759b631f7a194efb86c06e1935cdc2856200d3
|
[
"MIT"
] | null | null | null |
from netsuitesdk.internal.utils import PaginatedSearch
from .base import ApiBase
import logging
logger = logging.getLogger(__name__)
class CustomRecords(ApiBase):
def __init__(self, ns_client):
ApiBase.__init__(self, ns_client=ns_client, type_name='CustomRecordType')
def get_all_by_id(self, internalId):
cr_type = self.ns_client.CustomRecordSearchBasic(
recType=self.ns_client.CustomRecordType(
internalId=internalId
)
)
ps = PaginatedSearch(client=self.ns_client, type_name='CustomRecordType', search_record=cr_type, pageSize=20)
return list(self._paginated_search_to_generator(paginated_search=ps))
| 33.095238
| 117
| 0.728058
| 557
| 0.801439
| 0
| 0
| 0
| 0
| 0
| 0
| 36
| 0.051799
|
a12be00ef3b06e0094c89aa20c5aafe79c822021
| 343
|
py
|
Python
|
Support/renameCNVNatorOutput.py
|
zhongmicai/SV_population
|
81987865c9b67be5e358cb1b966bb69cc303abee
|
[
"MIT"
] | 18
|
2019-03-18T00:08:18.000Z
|
2021-10-19T06:21:56.000Z
|
Support/renameCNVNatorOutput.py
|
zhongmicai/SV_population
|
81987865c9b67be5e358cb1b966bb69cc303abee
|
[
"MIT"
] | 5
|
2018-11-06T15:18:17.000Z
|
2020-07-24T09:31:08.000Z
|
Support/renameCNVNatorOutput.py
|
zhongmicai/SV_population
|
81987865c9b67be5e358cb1b966bb69cc303abee
|
[
"MIT"
] | 2
|
2019-11-13T10:28:58.000Z
|
2021-09-07T08:25:12.000Z
|
#!/usr/bin/env python3
import os
vcfdir='/home/matt/Plasmodium/Pf_SV/Data'
for ID in os.listdir(vcfdir):
nameID = '_'.join(ID.split('.')[0].split('_')[:-1])
coreID = nameID.split('_')[-1]
if coreID[:3] == 'ERR':
os.system('cp {0}.cnvs {1}_DEL.cnvs'.format(coreID, nameID))
os.system('cp {0}.cnvs {1}_DUP.cnvs'.format(coreID, nameID))
| 28.583333
| 62
| 0.641399
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 125
| 0.364431
|
a12cb244767dfa01e9b581f3a545006ea34d4ac7
| 1,568
|
py
|
Python
|
string_1/hello_name.py
|
nhutnamhcmus/coding-bat-solutions
|
5f780a4027a6c3523a72961db1bad547c997fdc6
|
[
"MIT"
] | 1
|
2020-09-19T18:02:13.000Z
|
2020-09-19T18:02:13.000Z
|
string_1/hello_name.py
|
nhutnamhcmus/coding-bat-solutions
|
5f780a4027a6c3523a72961db1bad547c997fdc6
|
[
"MIT"
] | null | null | null |
string_1/hello_name.py
|
nhutnamhcmus/coding-bat-solutions
|
5f780a4027a6c3523a72961db1bad547c997fdc6
|
[
"MIT"
] | null | null | null |
# =======================================================================================================================================
# VNU-HCM, University of Science
# Department Computer Science, Faculty of Information Technology
# Authors: Nhut-Nam Le (Tich Phan Suy Rong)
# © 2020
"""
Given a string name, e.g. "Bob", return a greeting of the form "Hello Bob!".
For example test case:
hello_name('Bob') → 'Hello Bob!'
hello_name('Alice') → 'Hello Alice!'
hello_name('X') → 'Hello X!'
"""
import unittest
def hello_name(name):
return "Hello " + name + "!"
class TestHelloName(unittest.TestCase):
def test_case_00(self):
self.assertEqual(hello_name('Bob'), 'Hello Bob!')
def test_case_01(self):
self.assertEqual(hello_name('Alice'), 'Hello Alice!')
def test_case_02(self):
self.assertEqual(hello_name('X'), 'Hello X!')
def test_case_03(self):
self.assertEqual(hello_name('Dolly'), 'Hello Dolly!')
def test_case_04(self):
self.assertEqual(hello_name('Alpha'), 'Hello Alpha!')
def test_case_05(self):
self.assertEqual(hello_name('Omega'), 'Hello Omega!')
def test_case_06(self):
self.assertEqual(hello_name('Goodbye'), 'Hello Goodbye!')
def test_case_07(self):
self.assertEqual(hello_name('ho ho ho'), 'Hello ho ho ho!')
def test_case_08(self):
self.assertEqual(hello_name('xyz!'), 'Hello xyz!!')
def test_case_09(self):
self.assertEqual(hello_name('Hello'), 'Hello Hello!')
if __name__ == "__main__":
unittest.main()
| 27.508772
| 137
| 0.598852
| 944
| 0.599365
| 0
| 0
| 0
| 0
| 0
| 0
| 725
| 0.460317
|
a12f2dc13e43b20caf3450c97b9fa9395b547d8a
| 335
|
py
|
Python
|
materials/ch_04/escape_str.py
|
epsilonxe/RMUTT_09090016
|
863dd8a6471b560831b742da4aec27209c294df5
|
[
"MIT"
] | null | null | null |
materials/ch_04/escape_str.py
|
epsilonxe/RMUTT_09090016
|
863dd8a6471b560831b742da4aec27209c294df5
|
[
"MIT"
] | null | null | null |
materials/ch_04/escape_str.py
|
epsilonxe/RMUTT_09090016
|
863dd8a6471b560831b742da4aec27209c294df5
|
[
"MIT"
] | null | null | null |
text1 = '''ABCDEF
GHIJKL
MNOPQRS
TUVWXYZ
'''
text2 = 'ABCDEF\
GHIJKL\
MNOPQRS\
TUVWXYZ'
text3 = 'ABCD\'EF\'GHIJKL'
text4 = 'ABCDEF\nGHIJKL\nMNOPQRS\nTUVWXYZ'
text5 = 'ABCDEF\fGHIJKL\fMNOPQRS\fTUVWXYZ'
print(text1)
print('-' * 25)
print(text2)
print('-' * 25)
print(text3)
print('-' * 25)
print(text4)
print('-' * 25)
print(text5)
| 12.884615
| 42
| 0.671642
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 168
| 0.501493
|
a1300bc0639e795122958402aa1f3b4e0ab96874
| 823
|
py
|
Python
|
pygears/cookbook/reduce2.py
|
Risto97/pygears
|
19393e85101a16762cb3bbbf3010946ef69217f2
|
[
"MIT"
] | null | null | null |
pygears/cookbook/reduce2.py
|
Risto97/pygears
|
19393e85101a16762cb3bbbf3010946ef69217f2
|
[
"MIT"
] | null | null | null |
pygears/cookbook/reduce2.py
|
Risto97/pygears
|
19393e85101a16762cb3bbbf3010946ef69217f2
|
[
"MIT"
] | null | null | null |
from pygears import gear, Intf
from pygears.common import czip
from pygears.typing import Tuple, Uint, Union, Queue
from pygears.common import fmap, demux, decoupler, fifo, union_collapse
from pygears.cookbook import priority_mux, replicate
TCfg = Tuple[{'reduce_size': Uint['w_reduce_size'], 'init': 't_acc'}]
@gear
def reduce2(din, cfg: TCfg, *, f, max_size):
acctype = cfg.dtype['init']
qtype = Queue[acctype, din.dtype.lvl - 1]
temp_res = Intf(dtype=qtype)
cfg_rep = cfg | replicate
sec_opnd = (cfg_rep, temp_res) \
| priority_mux \
| fmap(f=union_collapse, fcat=czip, lvl=1)
result = czip(din, sec_opnd) | decoupler | fmap(f=f, fcat=czip, lvl=2)
acc, fin_res = result | Union[qtype, qtype] | demux
acc | fifo(intfs=[temp_res], depth=max_size)
return fin_res
| 29.392857
| 74
| 0.684083
| 0
| 0
| 0
| 0
| 508
| 0.617254
| 0
| 0
| 47
| 0.057108
|
a1309a770978d986e457fb2177d6163ed7ae8ec0
| 313
|
py
|
Python
|
atcoder/abc166D_i_hate_factorization.py
|
da-edra/kyopro
|
ad531d15bcccf6aafdaaef3cc69db850b0f7c471
|
[
"BSD-3-Clause"
] | 2
|
2020-08-31T17:19:07.000Z
|
2021-01-08T21:35:48.000Z
|
atcoder/abc166D_i_hate_factorization.py
|
edglaz/kyopro
|
b8ac4f6873418ad20ad417e46d731c35a8062c0d
|
[
"BSD-3-Clause"
] | null | null | null |
atcoder/abc166D_i_hate_factorization.py
|
edglaz/kyopro
|
b8ac4f6873418ad20ad417e46d731c35a8062c0d
|
[
"BSD-3-Clause"
] | null | null | null |
# unihernandez22
# https://atcoder.jp/contests/abc166/tasks/abc166_d
# math, brute force
n = int(input())
for a in range(n):
breaked = True
for b in range(-1000, 1000):
if a**5 - b**5 == n:
print(a, b)
break;
else:
breaked = False
if breaked:
break
| 20.866667
| 51
| 0.539936
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 86
| 0.27476
|
a130aee35a17b1d7653613de1de880f9a3444608
| 305
|
py
|
Python
|
packages/grid/apps/worker/src/main/core/database/groups/groups.py
|
exityan/PySyft
|
35166c487a5be57f9ad28929ed88a8ba6bdd5aeb
|
[
"Apache-2.0"
] | 425
|
2019-09-22T06:14:53.000Z
|
2022-03-30T02:17:34.000Z
|
packages/grid/apps/worker/src/main/core/database/groups/groups.py
|
Metrix1010/PySyft
|
6477f64b63dc285059c3766deab3993653cead2e
|
[
"Apache-2.0"
] | 352
|
2019-09-17T15:32:51.000Z
|
2022-03-12T01:07:35.000Z
|
packages/grid/apps/worker/src/main/core/database/groups/groups.py
|
Metrix1010/PySyft
|
6477f64b63dc285059c3766deab3993653cead2e
|
[
"Apache-2.0"
] | 208
|
2019-09-18T18:32:10.000Z
|
2022-03-24T01:10:11.000Z
|
# grid relative
from .. import BaseModel
from .. import db
class Group(BaseModel):
__tablename__ = "group"
id = db.Column(db.Integer(), primary_key=True, autoincrement=True)
name = db.Column(db.String(255))
def __str__(self):
return f"<Group id: {self.id}, name: {self.name}>"
| 21.785714
| 70
| 0.655738
| 243
| 0.796721
| 0
| 0
| 0
| 0
| 0
| 0
| 65
| 0.213115
|
a130d81a095f620365d47a00f587d3671ea0c357
| 2,416
|
py
|
Python
|
libraries/urx_python/urx_scripts/demo_apple_tree.py
|
giacomotomasi/tennisball_demo
|
f71cd552e64fe21533abe47b986db6999947c3a9
|
[
"Apache-2.0"
] | null | null | null |
libraries/urx_python/urx_scripts/demo_apple_tree.py
|
giacomotomasi/tennisball_demo
|
f71cd552e64fe21533abe47b986db6999947c3a9
|
[
"Apache-2.0"
] | null | null | null |
libraries/urx_python/urx_scripts/demo_apple_tree.py
|
giacomotomasi/tennisball_demo
|
f71cd552e64fe21533abe47b986db6999947c3a9
|
[
"Apache-2.0"
] | null | null | null |
import urx
import logging
import time
if __name__ == "__main__":
logging.basicConfig(level=logging.WARN)
#gripper_remove_pos = [0.0755, -0.2824, 0.3477, -0.0387, -3.0754, 0.4400] # rest position (good to place/remove gripper)
rob = urx.Robot("192.168.56.1")
#rob.set_tcp((0,0,0,0,0,0))
#rob.set_payload(0.5, (0,0,0))
home_pos = [-0.0153, -0.4213, 0.3469, 1.2430, 2.6540, -0.9590]
appro1 = [-0.0762, -0.5575, 0.3546, 0.6110, 2.7090, -1.7840]
apple1 = [-0.1042, -0.6244, 0.3209, 1.4510, 1.9160, -1.4980]
get_far1 = [-0.0510, -0.5086, 0.3215, 0.4900, 2.6510, -1.8690]
appro2 = [-0.1767, -0.4281, 0.3204, 1.8210, 2.0030, -1.5280]
apple2 = [-0.2129, -0.4926, 0.2951, 1.8210, 2.0030, -1.5280]
get_far2 = [-0.1324, -0.3790, 0.3112, 1.8210, 2.0030, -1.5280]
appro_place = [0.3571, -0.3540, 0.3563, 1.2360, 2.8850, -0.0780]
place_pos = [0.3571, -0.3540, 0.2983, 1.2360, 2.8850, -0.0780]
try:
v = 0.2
a = 0.3
rob.set_digital_out(0,0) # initialize gripper
# open gripper
rob.set_digital_out(0, 1)
time.sleep(0.5)
rob.set_digital_out(0,0)
pose = rob.getl() #gives a lists with 6 elements (x, y, z, rx, ry, rz) --> rotation vector
#print("robot tcp is at: ", pose)
# move to home position
#rob.movej(joint_pose, acc=a, vel=v) # it takes as inputs the joints goal values!
rob.movej_to_pose(home_pos, acc=a, vel=0.3)
time.sleep(0.01)
# move towards the first apple to pick (approach it, move to a suitable grabbing position, get away)
rob.movej_to_pose(appro1, acc=a, vel=v)
time.sleep(0.01)
rob.movel(apple1, acc=a, vel=v)
# close gripper
rob.set_digital_out(0, 1)
time.sleep(0.5)
rob.set_digital_out(0,0)
time.sleep(1)
rob.movel(get_far1, a, v)
#move towards the place position
rob.movej_to_pose(appro_place, a, vel=0.3)
time.sleep(0.01)
rob.movel(place_pos, a, v)
# open gripper
rob.set_digital_out(0, 1)
time.sleep(0.5)
rob.set_digital_out(0,0)
time.sleep(1)
rob.movel(appro_place, a, v)
# move to home position
rob.movej_to_pose(home_pos, a, v)
pose_final = rob.getl()
print("robot tcp is at (final): ", pose_final)
finally:
rob.close()
| 32.213333
| 124
| 0.577815
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 655
| 0.271109
|
a1311c3c3114e32c6b986776dfaae1a0d9bb6825
| 403
|
py
|
Python
|
solution/data_structure2/1302/main.py
|
jungyoonoh/baekjoon-1
|
2b4437a4b5e06244fa47fae6c7b7be0157d0f94f
|
[
"MIT"
] | 2,236
|
2019-08-05T00:36:59.000Z
|
2022-03-31T16:03:53.000Z
|
solution/data_structure2/1302/main.py
|
juy4556/baekjoon
|
bc0b0a0ebaa45a5bbd32751f84c458a9cfdd9f92
|
[
"MIT"
] | 225
|
2020-12-17T10:20:45.000Z
|
2022-01-05T17:44:16.000Z
|
solution/data_structure2/1302/main.py
|
juy4556/baekjoon
|
bc0b0a0ebaa45a5bbd32751f84c458a9cfdd9f92
|
[
"MIT"
] | 602
|
2019-08-05T00:46:25.000Z
|
2022-03-31T13:38:23.000Z
|
# Authored by : gusdn3477
# Co-authored by : -
# Link : http://boj.kr/8adc986ae26b461eadd65abdff3cfba9
import sys
def input():
return sys.stdin.readline().rstrip()
N = int(input())
book = {}
for i in range(N):
name = input()
if name not in book:
book[name] = 1
else:
book[name] += 1
book = list(book.items())
book.sort(key = lambda x : (-x[1],x[0]))
print(book[0][0])
| 19.190476
| 55
| 0.600496
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 100
| 0.248139
|
a13162f4cb62e368c73037f36a88c321b285f2d8
| 1,152
|
py
|
Python
|
testflows/_core/utils/sort.py
|
testflows/TestFlows-Core
|
0aa17247dffd2f7199465031ab16cc4f12c9cfb0
|
[
"Apache-2.0"
] | 3
|
2020-06-25T19:23:19.000Z
|
2021-10-20T19:29:56.000Z
|
testflows/_core/utils/sort.py
|
testflows/TestFlows-Core
|
0aa17247dffd2f7199465031ab16cc4f12c9cfb0
|
[
"Apache-2.0"
] | null | null | null |
testflows/_core/utils/sort.py
|
testflows/TestFlows-Core
|
0aa17247dffd2f7199465031ab16cc4f12c9cfb0
|
[
"Apache-2.0"
] | 1
|
2020-02-24T12:31:45.000Z
|
2020-02-24T12:31:45.000Z
|
# Copyright 2020 Katteli Inc.
# TestFlows.com Open-Source Software Testing Framework (http://testflows.com)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
def human(l, key=None):
"""Sort in human readable format.
Credit: https://blog.codinghorror.com/sorting-for-humans-natural-sort-order/
:key: optional function to retrieve the key from the element
"""
get_key = key
if get_key is None:
get_key = lambda x: x
convert = lambda text: int(text) if text.isdigit() else text
alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', get_key(key)) ]
l.sort(key=alphanum_key)
return l
| 38.4
| 89
| 0.717882
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 839
| 0.728299
|
a13291eccf29b835c30e820b06c59c45c1cf58bf
| 3,220
|
py
|
Python
|
tests/build/test_flash.py
|
cyliangtw/mbed-tools
|
69c600c0a5ac1eb0d52b481b5ba020da8bb73d33
|
[
"Apache-2.0"
] | 39
|
2020-04-03T13:52:34.000Z
|
2022-03-23T13:08:22.000Z
|
tests/build/test_flash.py
|
cyliangtw/mbed-tools
|
69c600c0a5ac1eb0d52b481b5ba020da8bb73d33
|
[
"Apache-2.0"
] | 306
|
2020-02-06T18:08:43.000Z
|
2022-03-25T14:50:18.000Z
|
tests/build/test_flash.py
|
cyliangtw/mbed-tools
|
69c600c0a5ac1eb0d52b481b5ba020da8bb73d33
|
[
"Apache-2.0"
] | 23
|
2020-03-17T11:42:23.000Z
|
2022-01-30T02:56:18.000Z
|
#
# Copyright (c) 2020-2021 Arm Limited and Contributors. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
import pathlib
import tempfile
from unittest import TestCase, mock
from mbed_tools.build.flash import flash_binary, _build_binary_file_path, _flash_dev
from mbed_tools.build.exceptions import BinaryFileNotFoundError
from tests.build.factories import DeviceFactory
@mock.patch("mbed_tools.build.flash._build_binary_file_path")
@mock.patch("mbed_tools.build.flash._flash_dev")
class TestFlashBinary(TestCase):
def test_check_flashing(self, _flash_dev, _build_binary_file_path):
test_device = DeviceFactory()
_flash_dev.return_value = True
with tempfile.TemporaryDirectory() as tmpDir:
base_dir = pathlib.Path(tmpDir)
build_dir = base_dir / "cmake_build"
build_dir.mkdir()
bin_file = base_dir.name + ".bin"
bin_file = build_dir / bin_file
bin_file.touch()
_build_binary_file_path.return_value = bin_file
flash_binary(test_device.mount_points[0].resolve(), base_dir, build_dir, "TEST", False)
_build_binary_file_path.assert_called_once_with(base_dir, build_dir, False)
_flash_dev.assert_called_once_with(test_device.mount_points[0].resolve(), bin_file)
class TestBuildBinFilePath(TestCase):
def test_build_bin_file_path(self):
with tempfile.TemporaryDirectory() as tmpDir:
base_dir = pathlib.Path(tmpDir)
build_dir = base_dir / "cmake_build"
build_dir.mkdir()
bin_file = base_dir.name + ".bin"
bin_file = build_dir / bin_file
bin_file.touch()
self.assertEqual(_build_binary_file_path(base_dir, build_dir, False), bin_file)
def test_build_hex_file_path(self):
with tempfile.TemporaryDirectory() as tmpDir:
base_dir = pathlib.Path(tmpDir)
build_dir = base_dir / "cmake_build"
build_dir.mkdir()
bin_file = base_dir.name + ".hex"
bin_file = build_dir / bin_file
bin_file.touch()
self.assertEqual(_build_binary_file_path(base_dir, build_dir, True), bin_file)
def test_missing_binary_file(self):
with tempfile.TemporaryDirectory() as tmpDir:
base_dir = pathlib.Path(tmpDir)
build_dir = base_dir / "cmake_build"
build_dir.mkdir()
with self.assertRaises(BinaryFileNotFoundError):
_build_binary_file_path(base_dir, build_dir, False)
@mock.patch("mbed_tools.build.flash.shutil.copy")
class TestCopyToDevice(TestCase):
def test_copy_to_device(self, copy):
test_device = DeviceFactory()
with tempfile.TemporaryDirectory() as tmpDir:
base_dir = pathlib.Path(tmpDir)
build_dir = base_dir / "cmake_build"
build_dir.mkdir()
bin_file = base_dir.name + ".bin"
bin_file = build_dir / bin_file
bin_file.touch()
_flash_dev(test_device.mount_points[0].resolve(), bin_file)
copy.assert_called_once_with(bin_file, test_device.mount_points[0].resolve(), follow_symlinks=False)
| 36.590909
| 112
| 0.675776
| 2,663
| 0.827019
| 0
| 0
| 1,581
| 0.490994
| 0
| 0
| 329
| 0.102174
|
a133567cd81f4bb8edf05a69d95e9fb2d7bf451d
| 2,795
|
py
|
Python
|
packettotal_sdk/search_tools.py
|
RogerDeng/HoneyBot
|
3843ec6d684786091ced053857d1718ef1fa495c
|
[
"MIT"
] | 67
|
2019-08-16T05:03:19.000Z
|
2021-11-25T01:48:23.000Z
|
packettotal_sdk/search_tools.py
|
RogerDeng/HoneyBot
|
3843ec6d684786091ced053857d1718ef1fa495c
|
[
"MIT"
] | 1
|
2020-09-01T02:40:31.000Z
|
2020-09-01T02:40:31.000Z
|
packettotal_sdk/search_tools.py
|
RogerDeng/HoneyBot
|
3843ec6d684786091ced053857d1718ef1fa495c
|
[
"MIT"
] | 16
|
2020-02-20T12:38:40.000Z
|
2022-03-22T17:45:25.000Z
|
import time
import typing
import requests
from sys import stderr
from datetime import datetime
from packettotal_sdk import packettotal_api
class SearchTools(packettotal_api.PacketTotalApi):
def __init__(self, api_key: str):
"""
:param api_key: An API authentication token
"""
super().__init__(api_key)
def search_by_pcap(self, pcap_file_obj: typing.BinaryIO) -> requests.Response:
"""
Search by a pcap/pcapng file, get list list of similar packet captures
:param pcap_file_obj: A file like object that provides a .read() interface (E.G open('path_to_pcap.pcap, 'rb') )
:return: A request.Response instance, containing a graph of similar pcaps with matched terms
"""
response = super().analyze(pcap_file_obj)
if response.status_code == 200:
sim_response = super().pcap_similar(response.json()['pcap_metadata']['md5'])
elif response.status_code == 202:
pcap_id = response.json()['id']
info_response = super().pcap_info(pcap_id)
while info_response.status_code == 404:
print('[{}] Waiting for {} to finish analyzing.'.format(datetime.utcnow(), pcap_id))
info_response = super().pcap_info(response.json()['id'])
time.sleep(10)
print('[{}] Fetching results for {}.'.format(datetime.utcnow(), pcap_id))
time.sleep(5)
sim_response = super().pcap_similar(response.json()['id'])
else:
return response
return sim_response
def search_by_iocs(self, ioc_file: typing.TextIO) -> requests.Response:
"""
Search up to 100 IOC terms at once, and get matching packet captures
:param ioc_file: A file like object that provides a .read() interface (E.G open('path_to_iocs.txt, 'r')
contents are line delim
:return: A request.Response instance containing the search results containing at least one matching IOC
"""
text = ioc_file.read()
delim = '\n'
if '\r\n' in text[0:2048]:
delim = '\r\n'
elif '\r' in text[0:2048]:
delim = '\r'
elif ',' in text[0:2048]:
delim = ','
elif '\t' in text[0:2048]:
delim = '\t'
text_delimd = text.split(delim)
search_str = ''
for i, ioc in enumerate(text_delimd[0: -2]):
search_str += '"{}" OR '.format(ioc.strip())
if i > 100:
print('Warning searching only the first 100 IOC terms of {}.'.format(len(text_delimd)), file=stderr)
break
search_str += '"{}"'.format(text_delimd[-1].strip())
response = super().search(search_str)
return response
| 39.366197
| 120
| 0.594633
| 2,651
| 0.948479
| 0
| 0
| 0
| 0
| 0
| 0
| 966
| 0.345617
|
a133fa0afcdcf42b74dd45b66f95e50ddbf7734f
| 41
|
py
|
Python
|
actfw_core/v4l2/__init__.py
|
Idein/actfw-core
|
44c979bbe5d32d068eed20b7d565a6de2fb9acd3
|
[
"MIT"
] | 2
|
2021-03-15T11:44:37.000Z
|
2021-05-12T09:58:35.000Z
|
actfw_core/v4l2/__init__.py
|
Idein/actfw-core
|
44c979bbe5d32d068eed20b7d565a6de2fb9acd3
|
[
"MIT"
] | 28
|
2020-12-24T02:53:37.000Z
|
2022-03-14T09:02:28.000Z
|
actfw_core/v4l2/__init__.py
|
Idein/actfw-core
|
44c979bbe5d32d068eed20b7d565a6de2fb9acd3
|
[
"MIT"
] | null | null | null |
from . import types, video # noqa: F401
| 20.5
| 40
| 0.682927
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 12
| 0.292683
|
a13428de836fe2ca966877503cf126c867ad3cd6
| 531
|
py
|
Python
|
xos/synchronizers/openstack/model_policies/model_policy_Sliver.py
|
xmaruto/mcord
|
3678a3d10c3703c2b73f396c293faebf0c82a4f4
|
[
"Apache-2.0"
] | null | null | null |
xos/synchronizers/openstack/model_policies/model_policy_Sliver.py
|
xmaruto/mcord
|
3678a3d10c3703c2b73f396c293faebf0c82a4f4
|
[
"Apache-2.0"
] | null | null | null |
xos/synchronizers/openstack/model_policies/model_policy_Sliver.py
|
xmaruto/mcord
|
3678a3d10c3703c2b73f396c293faebf0c82a4f4
|
[
"Apache-2.0"
] | null | null | null |
def handle(instance):
from core.models import Controller, ControllerSlice, ControllerNetwork, NetworkSlice
networks = [ns.network for ns in NetworkSlice.objects.filter(slice=instance.slice)]
controller_networks = ControllerNetwork.objects.filter(network__in=networks,
controller=instance.node.site_deployment.controller)
for cn in controller_networks:
if (cn.lazy_blocked):
cn.lazy_blocked=False
cn.backend_register = '{}'
cn.save()
| 37.928571
| 116
| 0.6742
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 4
| 0.007533
|
a1357146c1bfe43fcbbabe34684a165daba3ef28
| 4,987
|
py
|
Python
|
tests/unit/test_s3.py
|
tejuafonja/SDGym
|
7c20c588a4c9f5940885467406e73274a5b01a8e
|
[
"MIT"
] | 19
|
2019-05-23T14:27:02.000Z
|
2019-12-08T16:04:20.000Z
|
tests/unit/test_s3.py
|
tejuafonja/SDGym
|
7c20c588a4c9f5940885467406e73274a5b01a8e
|
[
"MIT"
] | 11
|
2019-05-30T21:29:27.000Z
|
2019-12-10T16:49:28.000Z
|
tests/unit/test_s3.py
|
tejuafonja/SDGym
|
7c20c588a4c9f5940885467406e73274a5b01a8e
|
[
"MIT"
] | 11
|
2019-05-23T14:27:06.000Z
|
2020-01-02T14:29:00.000Z
|
from unittest.mock import Mock, patch
import pandas as pd
from sdgym.s3 import is_s3_path, parse_s3_path, write_csv, write_file
def test_is_s3_path_with_local_dir():
"""Test the ``sdgym.s3.is_s3_path`` function with a local directory.
If the path is not an s3 path, it should return ``False``.
Input:
- path to a local directory
Output:
- False
"""
# setup
path = 'path/to/local/dir'
# run
result = is_s3_path(path)
# asserts
assert not result
def test_is_s3_path_with_s3_bucket():
"""Test the ``sdgym.s3.is_s3_path`` function with an s3 directory.
If the path is an s3 path, it should return ``True``.
Input:
- path to an s3 directory
Output:
- True
"""
# setup
path = 's3://my-bucket/my/path'
# run
result = is_s3_path(path)
# asserts
assert result
def test_parse_s3_path_bucket_only():
"""Test the ``sdgym.s3.parse_s3_path`` function with an s3 path.
If the s3 path contains only the bucket name, the returned tuple
should be ``(bucket_name, '')``.
Input:
- path to s3 bucket
Output:
- ('my-bucket', '')
"""
# setup
expected_bucket_name = 'my-bucket'
expected_key_prefix = ''
path = f's3://{expected_bucket_name}/{expected_key_prefix}'
# run
bucket_name, key_prefix = parse_s3_path(path)
# asserts
assert bucket_name == expected_bucket_name
assert key_prefix == expected_key_prefix
def test_parse_s3_path_bucket_and_dir_path():
"""Test the `sdgym.s3.parse_s3_path`` function with an s3 path.
If the s3 path contains the bucket and a sub directory, the returned
tuple should be ``(bucket_name, subdirectory)``.
Input:
- path to s3 directory
Output:
- ('my-bucket', 'path/to/dir')
"""
# setup
expected_bucket_name = 'my-bucket'
expected_key_prefix = 'path/to/dir'
path = f's3://{expected_bucket_name}/{expected_key_prefix}'
# run
bucket_name, key_prefix = parse_s3_path(path)
# asserts
assert bucket_name == expected_bucket_name
assert key_prefix == expected_key_prefix
def test_write_file(tmpdir):
"""Test the `sdgym.s3.write_file`` function with a local path.
If the path is a local path, a file with the correct
contents should be created at the specified path.
Input:
- contents of the local file
- path to the local file
- aws_key is None
- aws_secret is None
Output:
- None
Side effects:
- file creation at the specified path with the given contents
"""
# setup
content_str = 'test_content'
path = f'{tmpdir}/test.txt'
# run
write_file(content_str.encode('utf-8'), path, None, None)
# asserts
with open(path, 'r') as f:
assert f.read() == content_str
@patch('sdgym.s3.boto3')
def test_write_file_s3(boto3_mock):
"""Test the `sdgym.s3.write_file`` function with an s3 path.
If the path is an s3 path, a file with the given contents
should be created at the specified s3 path.
Input:
- contents of the s3 file
- path to the s3 file location
- aws_key for aws authentication
- aws_secret for aws authentication
Output:
- None
Side effects:
- s3 client creation with aws credentials (aws_key, aws_secret)
- s3 method call to create a file in the given bucket with the
given contents
"""
# setup
content_str = 'test_content'
bucket_name = 'my-bucket'
key = 'test.txt'
path = f's3://{bucket_name}/{key}'
aws_key = 'my-key'
aws_secret = 'my-secret'
s3_mock = Mock()
boto3_mock.client.return_value = s3_mock
# run
write_file(content_str.encode('utf-8'), path, aws_key, aws_secret)
# asserts
boto3_mock.client.assert_called_once_with(
's3',
aws_access_key_id=aws_key,
aws_secret_access_key=aws_secret
)
s3_mock.put_object.assert_called_once_with(
Bucket=bucket_name,
Key=key,
Body=content_str.encode('utf-8'),
ContentEncoding='',
)
@patch('sdgym.s3.write_file')
def test_write_csv(write_file_mock):
"""Test the ``sdgym.s3.write_csv`` function.
If ``write_csv`` is called with a DataFrame,
``write_file`` should be called with the expected DataFrame
contents.
Input:
- data to be written to the csv file
- path of the desired csv file
- aws_key is None
- aws_secret is None
Output:
- None
Side effects:
- call to write_file with the correct contents and path
"""
# setup
data = pd.DataFrame({"col1": [1, 2], "col2": [3, 4]})
path = 'tmp/path'
# run
write_csv(data, path, None, None)
# asserts
input_data = pd.DataFrame({"col1": [1, 2], "col2": [3, 4]})
expected_content = input_data.to_csv(index=False).encode('utf-8')
write_file_mock.assert_called_once_with(
expected_content,
path,
None,
None
)
| 23.195349
| 72
| 0.645278
| 0
| 0
| 0
| 0
| 2,157
| 0.432525
| 0
| 0
| 2,861
| 0.573692
|
a1360dd0640d6fe332d03889c6a40e96f3ddedfb
| 3,227
|
py
|
Python
|
vet_care/scripts/generate_from_history.py
|
neerajvkn/vet_care
|
14914b22e7a83265d736f9f9dc5186271ae62d66
|
[
"MIT"
] | 2
|
2020-11-23T11:14:32.000Z
|
2021-02-03T06:40:33.000Z
|
vet_care/scripts/generate_from_history.py
|
neerajvkn/vet_care
|
14914b22e7a83265d736f9f9dc5186271ae62d66
|
[
"MIT"
] | null | null | null |
vet_care/scripts/generate_from_history.py
|
neerajvkn/vet_care
|
14914b22e7a83265d736f9f9dc5186271ae62d66
|
[
"MIT"
] | 7
|
2019-11-16T14:36:33.000Z
|
2021-08-25T07:54:51.000Z
|
import csv
import datetime
import frappe
# bench execute vet_care.scripts.generate_from_history.execute --args "['./data/important_data.csv']"
def execute(filename):
patient_activities = []
not_created = []
with open(filename, 'r') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
timestamp = int(row.get('Date'))
cirrusvet_id = row.get('AnimalID')
description = row.get('Notes')
date = datetime.datetime.utcfromtimestamp(timestamp).strftime('%Y-%m-%d')
patient = _get_patient_via_cirrusvet_id(cirrusvet_id)
if patient:
patient_activity = _pick_or_new_patient_activity(patient_activities, patient, date)
patient_activity.append('items', {'description': description})
patient_activities.append(patient_activity)
else:
not_created.append(cirrusvet_id)
created = 0
total = len(patient_activities)
for patient_activity in patient_activities:
patient_activity.save()
created = created + 1
print(f'Created ${created}/${total} patient activities')
print(not_created)
# bench execute vet_care.scripts.generate_from_history.execute --args "['./data/important_data.csv', ['1010', '2920']]"
def execute_with_filter(filename, missing_animals):
patient_activities = []
not_created = []
with open(filename, 'r') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
timestamp = int(row.get('Date'))
cirrusvet_id = row.get('AnimalID')
description = row.get('Notes')
if cirrusvet_id in missing_animals:
date = datetime.datetime.utcfromtimestamp(timestamp).strftime('%Y-%m-%d')
patient = _get_patient_via_cirrusvet_id(cirrusvet_id)
if patient:
patient_activity = _pick_or_new_patient_activity(patient_activities, patient, date)
patient_activity.append('items', {'description': description})
patient_activities.append(patient_activity)
else:
not_created.append(cirrusvet_id)
created = 0
total = len(patient_activities)
for patient_activity in patient_activities:
patient_activity.save()
created = created + 1
print(f'Created ${created}/${total} patient activities')
print(not_created)
def _pick_or_new_patient_activity(patient_activities, patient, date):
def filter_activity(activity):
return activity.patient == patient and activity.posting_date == date
existing = list(filter(filter_activity, patient_activities))
if existing:
return existing[0]
return frappe.get_doc({
'doctype': 'Patient Activity',
'patient': patient,
'posting_date': date
})
def _get_patient_via_cirrusvet_id(cirrusvet_id):
patient_data = frappe.db.sql(
"""SELECT name FROM `tabPatient` WHERE vc_cirrusvet=%s""",
cirrusvet_id,
as_dict=True
)
if patient_data:
return patient_data[0].get('name')
return None
| 37.964706
| 119
| 0.634645
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 543
| 0.168268
|
a1362909e583305f43ba83685760d08284ce8f25
| 594
|
py
|
Python
|
aws_interface/cloud/auth/delete_sessions.py
|
hubaimaster/aws-interface
|
162dd056546d58b6eb29afcae1c3c2d78e4309b2
|
[
"Apache-2.0"
] | 53
|
2018-10-02T05:58:54.000Z
|
2020-09-15T08:58:26.000Z
|
aws_interface/cloud/auth/delete_sessions.py
|
hubaimaster/aws-interface
|
162dd056546d58b6eb29afcae1c3c2d78e4309b2
|
[
"Apache-2.0"
] | 52
|
2018-09-26T05:16:09.000Z
|
2022-03-11T23:51:14.000Z
|
aws_interface/cloud/auth/delete_sessions.py
|
hubaimaster/aws-interface
|
162dd056546d58b6eb29afcae1c3c2d78e4309b2
|
[
"Apache-2.0"
] | 10
|
2019-03-11T16:35:14.000Z
|
2019-10-23T08:03:54.000Z
|
from cloud.permission import Permission, NeedPermission
# Define the input output format of the function.
# This information is used when creating the *SDK*.
info = {
'input_format': {
'session_ids': ['str'],
},
'output_format': {
'success': 'bool'
},
'description': 'Delete sessions'
}
@NeedPermission(Permission.Run.Auth.delete_sessions)
def do(data, resource):
body = {}
params = data['params']
session_ids = params.get('session_ids')
success = resource.db_delete_item_batch(session_ids)
body['success'] = success
return body
| 22.846154
| 56
| 0.6633
| 0
| 0
| 0
| 0
| 266
| 0.447811
| 0
| 0
| 222
| 0.373737
|
a137958aa6262c5d4af45fea5f852cfe4e0fb7c7
| 5,509
|
py
|
Python
|
plugin/autoWHUT.py
|
PPeanutButter/MediaServer
|
a6a0b3f424ca3fc4ea73d78db380ec3cc882bfd2
|
[
"MIT"
] | 2
|
2021-09-23T15:09:25.000Z
|
2022-01-16T01:04:07.000Z
|
plugin/autoWHUT.py
|
PPeanutButter/MediaServer
|
a6a0b3f424ca3fc4ea73d78db380ec3cc882bfd2
|
[
"MIT"
] | 1
|
2022-02-23T04:00:16.000Z
|
2022-02-23T04:10:06.000Z
|
plugin/autoWHUT.py
|
PPeanutButter/MediaServer
|
a6a0b3f424ca3fc4ea73d78db380ec3cc882bfd2
|
[
"MIT"
] | 1
|
2021-09-23T15:09:26.000Z
|
2021-09-23T15:09:26.000Z
|
# coding=<utf-8>
import requests
import re
import socket
import base64
import psutil
import pywifi
from pywifi import const
import subprocess
import os
import time
def get_host_ip():
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('8.8.8.8', 80))
ip = s.getsockname()[0]
finally:
s.close()
return ip
def encrypt(password):
password = base64.b64encode(password.encode('utf-8'))
return password.decode('utf-8')
def getNetIfAddr():
dic = psutil.net_if_addrs()
mac = ''
for adapter in dic:
print(adapter)
if adapter != 'wls1':
continue
snicList = dic[adapter]
mac = ''
ipv4 = ''
ipv6 = ''
for snic in snicList:
if snic.family.name in {'AF_LINK', 'AF_PACKET'}:
mac = snic.address
elif snic.family.name == 'AF_INET':
ipv4 = snic.address
elif snic.family.name == 'AF_INET6':
ipv6 = snic.address
print('%s, %s, %s, %s' % (adapter, mac, ipv4, ipv6))
return mac
def get_mac_address():
return getNetIfAddr().lower()
class AutoWHUT:
def get_param(self, username: str, password: str, cookies: str):
header = {
'Origin': 'http://172.30.16.34',
'Referer': 'http://172.30.16.34/srun_portal_pc.php?ac_id=1&cmd=login&switchip=172.30.14.104&mac=84:ef:18'
':91:e5:5b&ip=' + get_host_ip() +
'&essid=WHUT-WLAN6&apname=JB-JH-J4-0901-E&apgroup=WHUT-WLAN-Dual&url=http://www.gstatic.com'
'/generate_204',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/70.0.3538.102 Safari/537.36 Edge/18.18362',
'Accept': '*/*',
'Accept-Language': 'zh-CN',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'X-Requested-With': 'XMLHttpRequest',
'Accept-Encoding': 'gzip, deflate',
'Host': '172.30.16.34',
'Connection': 'Keep-Alive',
'Pragma': 'no-cache',
'Cookie': cookies
}
data = 'action=login&username=&password=&ac_id=64&user_ip=&nas_ip=&user_mac=&save_me=1&ajax=1'
data = re.sub("username=.*?&", "username=" + username + '&', data)
data = re.sub("password=.*?&", "password={B}" + encrypt(password) + '&', data)
data = re.sub("user_ip=.*?&", "user_ip=" + get_host_ip() + '&', data)
data = re.sub("user_mac=.*?&", "user_mac=" + get_mac_address() + '&', data)
return header, data
def sign_in(self):
try:
username = ''
password = ''
cookies = 'login=bQ0pOyR6IXU7PJaQQqRAcBPxGAvxAcrvEe0UJsVvdkTHxMBomR2HUS3oxriFtDiSt7XrDS' \
'%2BmurcIcGKHmgRZbb8fUGzw%2FUGvJFIjk0nAVIEwPGYVt7br7b5u1t4sMp' \
'%2BAfr4VZ5VcKPDr8eaBrOt2YRrH9Bdy6bogpY89dPj' \
'%2BzwrVuc4xmFUoWD8peECGHshewZRrIVvucbx652F2TRxF3VtHNL9H0fs5GjjmJjQMtecd; ' \
'NSC_tsvo_4l_TH=ffffffffaf160e3a45525d5f4f58455e445a4a423660; ' \
'login=bQ0pOyR6IXU7PJaQQqRAcBPxGAvxAcrvEe0UJsVvdkTHxMBomR2HUS3oxriFtDiSt7XrDS' \
'%2BmurcIcGKHmgRZbb8fUGzw%2FUGvJFIjk0nAVIEwPGYVt7br7b5u1t4sMp' \
'%2BAfr4VZ5VcKPDr8eaBrOt2YRrH9Bdy6bogpY89dPj' \
'%2BzwrVuc4xmFUoWD8peECGHshewZRrIVvucbx652F2TRxF3VtHNL9H0fs5GjjmJjQMtecd '
header, data = self.get_param(username, password, cookies)
print(data)
result = requests.post('http://172.30.16.34/include/auth_action.php', headers=header, data=data)
print(result.text, '\n{}\n'.format('*' * 79), result.encoding)
except BaseException as arg:
print(arg)
class WifiManager:
def __init__(self):
self.wifi = pywifi.PyWiFi()
self.ifaces = self.wifi.interfaces()[1]
self.autoWHUT = AutoWHUT()
self.sleepTime = 1
def is_connected_wifi(self):
return self.ifaces.status() == const.IFACE_CONNECTED
def get_current_wifi(self):
cmd = 'netsh wlan show interfaces'
p = subprocess.Popen(cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True)
ret = p.stdout.read()
ret = ret.decode('gbk')
index = ret.find("SSID")
if index > 0:
return ret[index:].split(':')[1].split('\r\n')[0].strip()
else:
return None
def check_net(self):
try:
result = requests.post('http://www.baidu.com')
return result.text.find("?cmd=redirect") == -1
except Exception:
return False
def auto_check(self):
if self.is_connected_wifi():
if not self.check_net():
self.autoWHUT.sign_in()
print("2s")
self.sleepTime = 2
else:
self.sleepTime = 60
print("60s")
else:
self.sleepTime = 4
print("no wifi")
def start(self):
while True:
self.auto_check()
time.sleep(self.sleepTime)
if __name__ == '__main__':
wifiManager = WifiManager()
wifiManager.start()
| 34.43125
| 117
| 0.554547
| 4,252
| 0.771828
| 0
| 0
| 0
| 0
| 0
| 0
| 1,703
| 0.309131
|
a137f706cc16a7ddd946b13b277853a20e68de35
| 3,096
|
py
|
Python
|
active_subspaces/gradients.py
|
ftalbrecht/active_subspaces
|
64817a19db250e4b01bcd51055ad0f7d2a5665b8
|
[
"MIT"
] | 51
|
2015-04-24T13:52:00.000Z
|
2022-02-16T13:30:39.000Z
|
active_subspaces/gradients.py
|
JIMMY-KSU/active_subspaces
|
64817a19db250e4b01bcd51055ad0f7d2a5665b8
|
[
"MIT"
] | 10
|
2015-02-03T01:00:09.000Z
|
2022-03-06T07:48:46.000Z
|
active_subspaces/gradients.py
|
JIMMY-KSU/active_subspaces
|
64817a19db250e4b01bcd51055ad0f7d2a5665b8
|
[
"MIT"
] | 44
|
2015-01-12T06:05:59.000Z
|
2022-02-02T18:53:34.000Z
|
"""Utilities for approximating gradients."""
import numpy as np
from utils.misc import process_inputs
from utils.simrunners import SimulationRunner
def local_linear_gradients(X, f, p=None, weights=None):
"""Estimate a collection of gradients from input/output pairs.
Given a set of input/output pairs, choose subsets of neighboring points and
build a local linear model for each subset. The gradients of these local
linear models comprise estimates of sampled gradients.
Parameters
----------
X : ndarray
M-by-m matrix that contains the m-dimensional inputs
f : ndarray
M-by-1 matrix that contains scalar outputs
p : int, optional
how many nearest neighbors to use when constructing the local linear
model (default 1)
weights : ndarray, optional
M-by-1 matrix that contains the weights for each observation (default
None)
Returns
-------
df : ndarray
M-by-m matrix that contains estimated partial derivatives approximated
by the local linear models
Notes
-----
If `p` is not specified, the default value is floor(1.7*m).
"""
X, M, m = process_inputs(X)
if M<=m: raise Exception('Not enough samples for local linear models.')
if p is None:
p = int(np.minimum(np.floor(1.7*m), M))
elif not isinstance(p, int):
raise TypeError('p must be an integer.')
if p < m+1 or p > M:
raise Exception('p must be between m+1 and M')
if weights is None:
weights = np.ones((M, 1)) / M
MM = np.minimum(int(np.ceil(10*m*np.log(m))), M-1)
df = np.zeros((MM, m))
for i in range(MM):
ii = np.random.randint(M)
x = X[ii,:]
D2 = np.sum((X - x)**2, axis=1)
ind = np.argsort(D2)
ind = ind[D2 != 0]
A = np.hstack((np.ones((p,1)), X[ind[:p],:])) * np.sqrt(weights[ii])
b = f[ind[:p]] * np.sqrt(weights[ii])
u = np.linalg.lstsq(A, b)[0]
df[i,:] = u[1:].T
return df
def finite_difference_gradients(X, fun, h=1e-6):
"""Compute finite difference gradients with a given interface.
Parameters
----------
X : ndarray
M-by-m matrix that contains the points to estimate the gradients with
finite differences
fun : function
function that returns the simulation's quantity of interest given inputs
h : float, optional
the finite difference step size (default 1e-6)
Returns
-------
df : ndarray
M-by-m matrix that contains estimated partial derivatives approximated
by finite differences
"""
X, M, m = process_inputs(X)
# points to run simulations including the perturbed inputs
XX = np.kron(np.ones((m+1, 1)),X) + \
h*np.kron(np.vstack((np.zeros((1, m)), np.eye(m))), np.ones((M, 1)))
# run the simulation
if isinstance(fun, SimulationRunner):
F = fun.run(XX)
else:
F = SimulationRunner(fun).run(XX)
df = (F[M:].reshape((m, M)).transpose() - F[:M]) / h
return df.reshape((M,m))
| 31.591837
| 80
| 0.609173
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,737
| 0.561047
|
a1385e4aefd67a6e8363bc3fce53670aa1ea871f
| 6,861
|
py
|
Python
|
covidaid/tools/read_data.py
|
sabuj7177/CovidProject
|
b4b7bcfa5ace165520507f489dc74da7b695e2f0
|
[
"Apache-2.0"
] | null | null | null |
covidaid/tools/read_data.py
|
sabuj7177/CovidProject
|
b4b7bcfa5ace165520507f489dc74da7b695e2f0
|
[
"Apache-2.0"
] | null | null | null |
covidaid/tools/read_data.py
|
sabuj7177/CovidProject
|
b4b7bcfa5ace165520507f489dc74da7b695e2f0
|
[
"Apache-2.0"
] | null | null | null |
# encoding: utf-8
"""
Read images and corresponding labels.
"""
import torch
from torch.utils.data import Dataset
from PIL import Image
import os
import random
class ChestXrayDataSetTest(Dataset):
def __init__(self, image_list_file, transform=None, combine_pneumonia=False):
"""
Create the Data Loader.
Since class 3 (Covid) has limited covidaid_data, dataset size will be accordingly at train time.
Code is written in generic form to assume last class as the rare class
Args:
image_list_file: path to the file containing images
with corresponding labels.
transform: optional transform to be applied on a sample.
combine_pneumonia: True for combining Baterial and Viral Pneumonias into one class
"""
self.NUM_CLASSES = 3 if combine_pneumonia else 4
# Set of images for each class
image_names = []
with open(image_list_file, "r") as f:
for line in f:
items = line.split()
image_name = items[0]
label = int(items[1])
image_names.append((image_name, label))
self.image_names = image_names
self.transform = transform
def __getitem__(self, index):
"""
Args:
index: the index of item
Returns:
image and its labels
"""
def __one_hot_encode(l):
v = [0] * self.NUM_CLASSES
v[l] = 1
return v
image_name, label = self.image_names[index]
label = __one_hot_encode(label)
image = Image.open(image_name).convert('RGB')
if self.transform is not None:
image = self.transform(image)
return image, torch.FloatTensor(label)
def __len__(self):
return len(self.image_names)
class ChestXrayDataSet(Dataset):
def __init__(self, image_list_file, transform=None, combine_pneumonia=False):
"""
Create the Data Loader.
Since class 3 (Covid) has limited covidaid_data, dataset size will be accordingly at train time.
Code is written in generic form to assume last class as the rare class
Args:
image_list_file: path to the file containing images
with corresponding labels.
transform: optional transform to be applied on a sample.
combine_pneumonia: True for combining Baterial and Viral Pneumonias into one class
"""
self.NUM_CLASSES = 3 if combine_pneumonia else 4
# Set of images for each class
image_names = [[] for _ in range(self.NUM_CLASSES)]
with open(image_list_file, "r") as f:
for line in f:
items = line.split()
image_name = items[0]
label = int(items[1])
image_names[label].append(image_name)
self.image_names = image_names
self.transform = transform
label_dist = [len(cnames) for cnames in image_names]
# Number of images of each class desired
self.num_covid = int(label_dist[-1])
if combine_pneumonia:
covid_factor = 7.0
self.num_normal = int(self.num_covid * covid_factor)
self.num_pneumonia = int(self.num_covid * covid_factor)
self.total = self.num_covid + self.num_pneumonia + self.num_normal
self.loss_weight_minus = torch.FloatTensor([self.num_normal, self.num_pneumonia, self.num_covid]).unsqueeze(0).cuda() / self.total
self.loss_weight_plus = 1.0 - self.loss_weight_minus
else:
covid_factor = 5.0
self.num_normal = int(self.num_covid * covid_factor)
self.num_viral = int(self.num_covid * covid_factor)
self.num_bact = int(self.num_covid * covid_factor)
self.total = self.num_covid + self.num_viral + self.num_bact + self.num_normal
self.loss_weight_minus = torch.FloatTensor([self.num_normal, self.num_bact, self.num_viral, self.num_covid]).unsqueeze(0).cuda() / self.total
self.loss_weight_plus = 1.0 - self.loss_weight_minus
# print (self.loss_weight_plus, self.loss_weight_minus)
if combine_pneumonia:
self.partitions = [self.num_covid,
self.num_covid + self.num_normal,
self.num_covid + self.num_normal + self.num_pneumonia]
else:
self.partitions = [self.num_covid,
self.num_covid + self.num_normal,
self.num_covid + self.num_normal + self.num_bact,
self.num_covid + self.num_normal + self.num_bact + self.num_viral]
assert len(self.partitions) == self.NUM_CLASSES
def __getitem__(self, index):
"""
Args:
index: the index of item
Returns:
image and its labels
"""
def __one_hot_encode(l):
v = [0] * self.NUM_CLASSES
v[l] = 1
return v
image_name = None
# print (index, self.partitions, len(self), sum([len(cnames) for cnames in self.image_names]))
if index < self.partitions[0]:
# Return a covid image
data_idx = index
image_name = self.image_names[self.NUM_CLASSES - 1][data_idx]
label = __one_hot_encode(self.NUM_CLASSES - 1)
else:
# Return non-covid image
for l in range(1, self.NUM_CLASSES):
if index < self.partitions[l]:
class_idx = l - 1
label = __one_hot_encode(class_idx)
# Return a random image
image_name = random.choice(self.image_names[class_idx])
break
assert image_name is not None
image = Image.open(image_name).convert('RGB')
if self.transform is not None:
image = self.transform(image)
return image, torch.FloatTensor(label)
def __len__(self):
return self.partitions[-1]
def loss(self, output, target):
"""
Binary weighted cross-entropy loss for each class
"""
weight_plus = torch.autograd.Variable(self.loss_weight_plus.repeat(1, target.size(0)).view(-1, self.loss_weight_plus.size(1)).cuda())
weight_neg = torch.autograd.Variable(self.loss_weight_minus.repeat(1, target.size(0)).view(-1, self.loss_weight_minus.size(1)).cuda())
loss = output
pmask = (target >= 0.5).data
nmask = (target < 0.5).data
epsilon = 1e-15
loss[pmask] = (loss[pmask] + epsilon).log() * weight_plus[pmask]
loss[nmask] = (1-loss[nmask] + epsilon).log() * weight_plus[nmask]
loss = -loss.sum()
return loss
| 36.887097
| 153
| 0.594957
| 6,694
| 0.97566
| 0
| 0
| 0
| 0
| 0
| 0
| 1,737
| 0.25317
|
a13861d4cfee522305c9e242f88c3b1859a889ba
| 7,996
|
py
|
Python
|
helper/evaluator.py
|
manipopopo/TC-ResNet
|
7dff6f4f865f1e63ff705d8e0267cf3b9a0d70a3
|
[
"Apache-2.0"
] | 185
|
2019-04-06T12:54:25.000Z
|
2022-03-24T12:06:59.000Z
|
helper/evaluator.py
|
manipopopo/TC-ResNet
|
7dff6f4f865f1e63ff705d8e0267cf3b9a0d70a3
|
[
"Apache-2.0"
] | 23
|
2019-05-15T09:19:01.000Z
|
2022-02-10T00:07:03.000Z
|
helper/evaluator.py
|
manipopopo/TC-ResNet
|
7dff6f4f865f1e63ff705d8e0267cf3b9a0d70a3
|
[
"Apache-2.0"
] | 61
|
2019-04-06T12:33:46.000Z
|
2022-03-01T06:41:53.000Z
|
import csv
import sys
from pathlib import Path
from abc import abstractmethod
import numpy as np
import tensorflow as tf
from tqdm import tqdm
import common.tf_utils as tf_utils
import metrics.manager as metric_manager
from common.model_loader import Ckpt
from common.utils import format_text
from common.utils import get_logger
from helper.base import AudioBase
from metrics.summaries import BaseSummaries
from metrics.summaries import Summaries
class Evaluator(object):
def __init__(self, model, session, args, dataset, dataset_name, name):
self.log = get_logger(name)
self.model = model
self.session = session
self.args = args
self.dataset = dataset
self.dataset_name = dataset_name
if Path(self.args.checkpoint_path).is_dir():
latest_checkpoint = tf.train.latest_checkpoint(self.args.checkpoint_path)
if latest_checkpoint is not None:
self.args.checkpoint_path = latest_checkpoint
self.log.info(f"Get latest checkpoint and update to it: {self.args.checkpoint_path}")
self.watch_path = self._build_watch_path()
self.session.run(tf.global_variables_initializer())
self.session.run(tf.local_variables_initializer())
self.ckpt_loader = Ckpt(
session=session,
include_scopes=args.checkpoint_include_scopes,
exclude_scopes=args.checkpoint_exclude_scopes,
ignore_missing_vars=args.ignore_missing_vars,
use_ema=self.args.use_ema,
ema_decay=self.args.ema_decay,
)
@abstractmethod
def setup_metric_manager(self):
raise NotImplementedError
@abstractmethod
def setup_metric_ops(self):
raise NotImplementedError
@abstractmethod
def build_non_tensor_data_from_eval_dict(self, eval_dict, **kwargs):
raise NotImplementedError
@abstractmethod
def setup_dataset_iterator(self):
raise NotImplementedError
def _build_watch_path(self):
if Path(self.args.checkpoint_path).is_dir():
return Path(self.args.checkpoint_path)
else:
return Path(self.args.checkpoint_path).parent
def build_evaluation_step(self, checkpoint_path):
if "-" in checkpoint_path and checkpoint_path.split("-")[-1].isdigit():
return int(checkpoint_path.split("-")[-1])
else:
return 0
def build_checkpoint_paths(self, checkpoint_path):
checkpoint_glob = Path(checkpoint_path + "*")
checkpoint_path = Path(checkpoint_path)
return checkpoint_glob, checkpoint_path
def build_miscellaneous_path(self, name):
target_dir = self.watch_path / "miscellaneous" / self.dataset_name / name
if not target_dir.exists():
target_dir.mkdir(parents=True)
return target_dir
def setup_best_keeper(self):
metric_with_modes = self.metric_manager.get_best_keep_metric_with_modes()
self.log.debug(metric_with_modes)
self.best_keeper = tf_utils.BestKeeper(
metric_with_modes,
self.dataset_name,
self.watch_path,
self.log,
)
def evaluate_once(self, checkpoint_path):
self.log.info("Evaluation started")
self.setup_dataset_iterator()
self.ckpt_loader.load(checkpoint_path)
step = self.build_evaluation_step(checkpoint_path)
checkpoint_glob, checkpoint_path = self.build_checkpoint_paths(checkpoint_path)
self.session.run(tf.local_variables_initializer())
eval_metric_dict = self.run_evaluation(step, is_training=False)
best_keep_metric_dict = self.metric_manager.filter_best_keep_metric(eval_metric_dict)
is_keep, metrics_keep = self.best_keeper.monitor(self.dataset_name, best_keep_metric_dict)
if self.args.save_best_keeper:
meta_info = {
"step": step,
"model_size": self.model.total_params,
}
self.best_keeper.remove_old_best(self.dataset_name, metrics_keep)
self.best_keeper.save_best(self.dataset_name, metrics_keep, checkpoint_glob)
self.best_keeper.remove_temp_dir()
self.best_keeper.save_scores(self.dataset_name, metrics_keep, best_keep_metric_dict, meta_info)
self.metric_manager.write_evaluation_summaries(step=step,
collection_keys=[BaseSummaries.KEY_TYPES.DEFAULT])
self.metric_manager.log_metrics(step=step)
self.log.info("Evaluation finished")
if step >= self.args.max_step_from_restore:
self.log.info("Evaluation stopped")
sys.exit()
def build_train_directory(self):
if Path(self.args.checkpoint_path).is_dir():
return str(self.args.checkpoint_path)
else:
return str(Path(self.args.checkpoint_path).parent)
@staticmethod
def add_arguments(parser):
g = parser.add_argument_group("(Evaluator) arguments")
g.add_argument("--valid_type", default="loop", type=str, choices=["loop", "once"])
g.add_argument("--max_outputs", default=5, type=int)
g.add_argument("--maximum_num_labels_for_metric", default=10, type=int,
help="Maximum number of labels for using class-specific metrics(e.g. precision/recall/f1score)")
g.add_argument("--no-save_best_keeper", dest="save_best_keeper", action="store_false")
g.add_argument("--save_best_keeper", dest="save_best_keeper", action="store_true")
g.set_defaults(save_best_keeper=True)
g.add_argument("--no-flatten_output", dest="flatten_output", action="store_false")
g.add_argument("--flatten_output", dest="flatten_output", action="store_true")
g.set_defaults(flatten_output=False)
g.add_argument("--max_step_from_restore", default=1e20, type=int)
class SingleLabelAudioEvaluator(Evaluator, AudioBase):
def __init__(self, model, session, args, dataset, dataset_name):
super().__init__(model, session, args, dataset, dataset_name, "SingleLabelAudioEvaluator")
self.setup_dataset_related_attr()
self.setup_metric_manager()
self.setup_metric_ops()
self.setup_best_keeper()
def setup_dataset_related_attr(self):
assert len(self.dataset.label_names) == self.args.num_classes
self.use_class_metrics = len(self.dataset.label_names) < self.args.maximum_num_labels_for_metric
def setup_metric_manager(self):
self.metric_manager = metric_manager.AudioMetricManager(
is_training=False,
use_class_metrics=self.use_class_metrics,
exclude_metric_names=self.args.exclude_metric_names,
summary=Summaries(
session=self.session,
train_dir=self.build_train_directory(),
is_training=False,
base_name=self.dataset.dataset_split_name,
max_summary_outputs=self.args.max_summary_outputs,
),
)
def setup_metric_ops(self):
losses = self.build_basic_loss_ops()
self.metric_tf_op = self.metric_manager.build_metric_ops({
"dataset_split_name": self.dataset_name,
"label_names": self.dataset.label_names,
"losses": losses,
"learning_rate": None,
"wavs": self.model.audio_original,
})
def build_non_tensor_data_from_eval_dict(self, eval_dict, **kwargs):
return {
"dataset_split_name": self.dataset.dataset_split_name,
"label_names": self.dataset.label_names,
"predictions_onehot": eval_dict["predictions_onehot"],
"labels_onehot": eval_dict["labels_onehot"],
}
def setup_dataset_iterator(self):
self.dataset.setup_iterator(
self.session,
self.dataset.placeholders,
self.dataset.data,
)
| 37.539906
| 119
| 0.674212
| 7,541
| 0.943097
| 0
| 0
| 1,368
| 0.171086
| 0
| 0
| 786
| 0.098299
|
a139c61e93bd3d976aaa5d706da3d269f7d52385
| 7,483
|
py
|
Python
|
src/sync.py
|
neybar/icloud-drive-docker
|
c7e59400c01b304c0f8ed7fd0b3ea2a623623b2e
|
[
"BSD-3-Clause"
] | null | null | null |
src/sync.py
|
neybar/icloud-drive-docker
|
c7e59400c01b304c0f8ed7fd0b3ea2a623623b2e
|
[
"BSD-3-Clause"
] | null | null | null |
src/sync.py
|
neybar/icloud-drive-docker
|
c7e59400c01b304c0f8ed7fd0b3ea2a623623b2e
|
[
"BSD-3-Clause"
] | null | null | null |
__author__ = 'Mandar Patil (mandarons@pm.me)'
import datetime
import os
import re
import time
from pathlib import Path
from shutil import copyfileobj, rmtree
from pyicloud import PyiCloudService, utils, exceptions
from src import config_parser
from src import notify
def wanted_file(filters, file_path, verbose=False):
if not file_path:
return False
if not filters or len(filters) == 0:
return True
for file_extension in filters:
if re.search(f'{file_extension}$', file_path, re.IGNORECASE):
return True
if verbose:
print(f'Skipping the unwanted file {file_path}')
return False
def wanted_folder(filters, root, folder_path, verbose=False):
if not filters or not folder_path or not root or len(filters) == 0:
# Nothing to filter, return True
return True
# Something to filter
folder_path = Path(folder_path)
for folder in filters:
child_path = Path(os.path.join(os.path.abspath(root), folder.removeprefix('/').removesuffix('/')))
if folder_path in child_path.parents or child_path in folder_path.parents or folder_path == child_path:
return True
return False
def wanted_parent_folder(filters, root, folder_path, verbose=False):
if not filters or not folder_path or not root or len(filters) == 0:
return True
folder_path = Path(folder_path)
for folder in filters:
child_path = Path(os.path.join(os.path.abspath(root), folder.removeprefix('/').removesuffix('/')))
if child_path in folder_path.parents or folder_path == child_path:
return True
return False
def process_folder(item, destination_path, filters, root, verbose=False):
if not (item and destination_path and filters and root):
return None
new_directory = os.path.join(destination_path, item.name)
if not wanted_folder(filters=filters, folder_path=new_directory, root=root, verbose=verbose):
if verbose:
print(f'Skipping the unwanted folder {new_directory}...')
return None
os.makedirs(new_directory, exist_ok=True)
return new_directory
def file_exists(item, local_file, verbose=False):
if item and local_file and os.path.isfile(local_file):
local_file_modified_time = int(os.path.getmtime(local_file))
remote_file_modified_time = int(item.date_modified.timestamp())
local_file_size = os.path.getsize(local_file)
remote_file_size = item.size
if local_file_modified_time == remote_file_modified_time and local_file_size == remote_file_size:
if verbose:
print(f'No changes detected. Skipping the file {local_file}')
return True
return False
def download_file(item, local_file, verbose=False):
if not (item and local_file):
return False
if verbose:
print(f'Downloading {local_file} ...')
try:
with item.open(stream=True) as response:
with open(local_file, 'wb') as file_out:
copyfileobj(response.raw, file_out)
item_modified_time = time.mktime(item.date_modified.timetuple())
os.utime(local_file, (item_modified_time, item_modified_time))
except (exceptions.PyiCloudAPIResponseException, FileNotFoundError, Exception) as e:
print(f'Failed to download {local_file}: {str(e)}')
return False
return True
def process_file(item, destination_path, filters, files, verbose=False):
if not (item and destination_path and files is not None):
return False
local_file = os.path.join(destination_path, item.name)
if not wanted_file(filters=filters, file_path=local_file, verbose=verbose):
return False
files.add(local_file)
if file_exists(item=item, local_file=local_file, verbose=verbose):
return False
download_file(item=item, local_file=local_file, verbose=verbose)
return True
def remove_obsolete(destination_path, files, verbose=False):
removed_paths = set()
if not (destination_path and files is not None):
return removed_paths
for path in Path(destination_path).rglob('*'):
local_file = str(path.absolute())
if local_file not in files:
if verbose:
print(f'Removing {local_file}')
if path.is_file():
path.unlink(missing_ok=True)
removed_paths.add(local_file)
elif path.is_dir():
rmtree(local_file)
removed_paths.add(local_file)
return removed_paths
def sync_directory(drive, destination_path, items, root, top=True, filters=None, remove=False, verbose=False):
files = set()
if drive and destination_path and items and root:
for i in items:
item = drive[i]
if item.type == 'folder':
new_folder = process_folder(item=item, destination_path=destination_path,
filters=filters['folders'] if 'folders' in filters else None, root=root,
verbose=verbose)
if not new_folder:
continue
files.add(new_folder)
files.update(sync_directory(drive=item, destination_path=new_folder, items=item.dir(), root=root,
top=False, filters=filters, verbose=verbose))
elif item.type == 'file':
if wanted_parent_folder(filters=filters['folders'], root=root, folder_path=destination_path,
verbose=verbose):
process_file(item=item, destination_path=destination_path,
filters=filters['file_extensions'] if 'file_extensions' in filters else None,
files=files, verbose=verbose)
if top and remove:
remove_obsolete(destination_path=destination_path, files=files, verbose=verbose)
return files
def sync_drive():
last_send = None
while True:
config = config_parser.read_config()
verbose = config_parser.get_verbose(config=config)
username = config_parser.get_username(config=config)
destination_path = config_parser.prepare_destination(config=config)
if username and destination_path:
try:
api = PyiCloudService(apple_id=username, password=utils.get_password_from_keyring(username=username))
if not api.requires_2sa:
sync_directory(drive=api.drive, destination_path=destination_path, root=destination_path,
items=api.drive.dir(), top=True, filters=config['filters'],
remove=config_parser.get_remove_obsolete(config=config), verbose=verbose)
else:
print('Error: 2FA is required. Please log in.')
last_send = notify.send(config, last_send)
except exceptions.PyiCloudNoStoredPasswordAvailableException:
print('password is not stored in keyring. Please save the password in keyring.')
sleep_for = config_parser.get_sync_interval(config=config)
next_sync = (datetime.datetime.now() +
datetime.timedelta(minutes=sleep_for)).strftime('%l:%M%p %Z on %b %d, %Y')
print(f'Resyncing at {next_sync} ...')
if sleep_for < 0:
break
time.sleep(sleep_for)
| 42.276836
| 117
| 0.647735
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 621
| 0.082988
|
a13a98235b9b2f72025d1bf03dbd61547e3c8d9f
| 2,163
|
py
|
Python
|
sphinx-sources/Examples/Interference/MultiSlit.py
|
jccmak/lightpipes
|
1a296fe08bdd97fc9a0e11f92bab25c85f68e57d
|
[
"BSD-3-Clause"
] | null | null | null |
sphinx-sources/Examples/Interference/MultiSlit.py
|
jccmak/lightpipes
|
1a296fe08bdd97fc9a0e11f92bab25c85f68e57d
|
[
"BSD-3-Clause"
] | null | null | null |
sphinx-sources/Examples/Interference/MultiSlit.py
|
jccmak/lightpipes
|
1a296fe08bdd97fc9a0e11f92bab25c85f68e57d
|
[
"BSD-3-Clause"
] | null | null | null |
#! python3
import numpy as np
import matplotlib.pyplot as plt
from LightPipes import *
"""
MultiSlit.py
Demonstrates the RowOfFields command. Two wavelengths are used to show
the principles of a grating.
cc Fred van Goor, June 2020.
"""
wavelength=1000*nm
Dlambda=150*nm
size=11*mm
N=2000
N2=int(N/2)
SlitSeparation=0.5*mm
f=30*cm
Nslits=20
SlitHeight=5*mm
SlitWidth=0.1*mm
Nheight=int(SlitHeight/size*N)
Nwidth=int(SlitWidth/size*N)
Fslit=np.ones((Nheight,Nwidth))
F1=Begin(size,wavelength,N)
F1=RowOfFields(F1,Fslit,Nslits,SlitSeparation)
Islits=Intensity(F1)
F1=Lens(F1,f)
F1=Forvard(F1,f)
F11=Interpol(F1,size,N,magnif=4)
Iscreen1=Intensity(F11)
F2=Begin(size,wavelength+Dlambda,N)
F2=RowOfFields(F2,Fslit,Nslits,SlitSeparation)
F2=Lens(F2,f)
F2=Forvard(F2,f)
F22=Interpol(F2,size,N,magnif=4)
Iscreen2=Intensity(F22)
X=np.arange(N)
X=(X/N-1/2)*size/mm
s= r'LightPipes for Python,' + '\n' +\
r'MultiSlit.py'+ '\n\n'\
r'size = {:4.2f} mm'.format(size/mm) + '\n' +\
r'$\lambda$ = {:4.2f} nm'.format(wavelength/nm) + '\n' +\
r'$\Delta\lambda$ = {:4.2f} nm'.format(Dlambda/nm) + '\n' +\
r'N = {:d}'.format(N) + '\n' +\
r'width of the slits: {:4.2f} mm'.format(SlitWidth/mm) + '\n' +\
r'height of the slits: {:4.2f} mm'.format(SlitHeight/mm) + '\n' +\
r'separation of the slits: {:4.2f} mm'.format(SlitSeparation/mm) + '\n' +\
r'number of slits: {:d}'.format(Nslits) + '\n' +\
r'focal length lens: {:4.2f} cm'.format(f/cm) + '\n\n' +\
r'${\copyright}$ Fred van Goor, May 2020'
fig=plt.figure(figsize=(10,6))
ax1 = fig.add_subplot(221)
ax2 = fig.add_subplot(222);#ax2.set_ylim(bottom=900,top=1100)
ax3 = fig.add_subplot(223)
ax4 = fig.add_subplot(224)
ax1.imshow(Islits,cmap='gray',aspect='equal');ax1.axis('off'); ax1.set_title('Screen with slits')
ax2.imshow(Iscreen1+Iscreen2,cmap='jet',aspect='equal');ax2.axis('off'); ax2.set_title('Intensity distribution at the focus of the lens')
#ax2.margins(x=0, y=-0.45)
ax3.plot(X,(Iscreen1+Iscreen2)[N2]); ax3.set_xlabel('x [mm]'); ax3.set_ylabel('Intensity [a.u.]'); ax3.set_title('Cross section of intensity at the focus')
ax4.text(0,0,s); ax4.axis('off')
plt.show()
| 31.808824
| 155
| 0.680536
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 793
| 0.36662
|
a13baec342fa639fe6142ecd977281a346771177
| 389
|
py
|
Python
|
genshimacro/__init__.py
|
trac-hacks/trac-GenshiMacro
|
d9da1a50f6d73904fdda2e9e7cbc4c056b929267
|
[
"BSD-3-Clause"
] | 1
|
2015-02-19T21:08:53.000Z
|
2015-02-19T21:08:53.000Z
|
genshimacro/__init__.py
|
ejucovy/trac-GenshiMacro
|
d9da1a50f6d73904fdda2e9e7cbc4c056b929267
|
[
"BSD-3-Clause"
] | null | null | null |
genshimacro/__init__.py
|
ejucovy/trac-GenshiMacro
|
d9da1a50f6d73904fdda2e9e7cbc4c056b929267
|
[
"BSD-3-Clause"
] | null | null | null |
from genshi.template import MarkupTemplate
from trac.core import *
from trac.web.chrome import Chrome
from trac.wiki.macros import WikiMacroBase
class GenshiMacro(WikiMacroBase):
def expand_macro(self, formatter, name, text, args):
template = MarkupTemplate(text)
chrome = Chrome(self.env)
return template.generate(**chrome.populate_data(formatter.req, {}))
| 29.923077
| 75
| 0.742931
| 241
| 0.619537
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
a13d6b6264ad2abf3168edf6c36418b077a9e067
| 2,110
|
py
|
Python
|
scripts/WIPS2015/WIPS_anydiag_time.py
|
eclee25/flu-SDI-exploratory-age
|
2f5a4d97b84d2116e179e85fe334edf4556aa946
|
[
"MIT"
] | 3
|
2018-03-29T23:02:43.000Z
|
2020-08-10T12:01:50.000Z
|
scripts/WIPS2015/WIPS_anydiag_time.py
|
eclee25/flu-SDI-exploratory-age
|
2f5a4d97b84d2116e179e85fe334edf4556aa946
|
[
"MIT"
] | null | null | null |
scripts/WIPS2015/WIPS_anydiag_time.py
|
eclee25/flu-SDI-exploratory-age
|
2f5a4d97b84d2116e179e85fe334edf4556aa946
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
##############################################
###Python template
###Author: Elizabeth Lee
###Date: 10/11/14
###Function: Any diagnosis per 100,000 population vs. week number for flu weeks (wks 40-20). Population size is from the calendar year of the week of calculation.
###Import data: SQL_export/anydiag_outpatient_allweeks.csv
### branch from v2/Supp_anydiag_time.py
###Command Line: python WIPS_anydiag_time.py
##############################################
### notes ###
# Incidence per 100,000 is normalized by total population by second calendar year of the flu season
### packages/modules ###
import csv
import matplotlib.pyplot as plt
## local modules ##
import functions_v5 as fxn
### data structures ###
### functions ###
### data files ###
anydiagin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/SQL_export/anydiag_allweeks_outpatient.csv','r')
anydiagin.readline() # rm header
anydiag = csv.reader(anydiagin, delimiter=',')
### called/local plotting parameters ###
ps = fxn.pseasons
fw = fxn.gp_fluweeks
sl = fxn.gp_seasonlabels
colvec = fxn.gp_colors
wklab = fxn.gp_weeklabels
fs = 24
fssml = 16
### program ###
# dict_wk[week] = seasonnum, dict_any[week] = visits per 100,000 in US population in calendar year of week,d_any53ls[seasonnum] = [anydiag wk 40 per 100000, anydiag wk 41 per 100000,...]
d_wk, d_any, d_any53ls = fxn.week_anydiag_processing(anydiag)
# plot values
for s in ps:
plt.plot(xrange(53), d_any53ls[s], marker = fxn.gp_marker, color = colvec[s-2], label = sl[s-2], linewidth = fxn.gp_linewidth)
plt.fill([7, 8, 8, 7], [0, 0, 4000, 4000], facecolor='grey', alpha=0.4)
plt.fill([12, 14, 14, 12], [0, 0, 4000, 4000], facecolor='grey', alpha=0.4)
plt.xlim([0, fw-1])
plt.xticks(range(53)[::5], wklab[::5])
plt.ylim([0, 4000])
plt.xlabel('Week Number', fontsize=fs)
plt.ylabel('Outpatient Visit per 100,000', fontsize=fs)
plt.legend(loc='upper right')
plt.savefig('/home/elee/Dropbox/Department/Presentations/2015_WIPS/Figures/anydiag_time.png', transparent=False, bbox_inches='tight', pad_inches=0)
plt.close()
# plt.show()
| 32.461538
| 186
| 0.691469
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,223
| 0.579621
|
a13d78de55aa35e5195b6d00dd9af4b319aa1688
| 5,290
|
py
|
Python
|
misc/src/scheduler_plugin.py
|
hivesolutions/colony_plugins
|
cfd8fb2ac58037e01002966704b8a642feb37895
|
[
"Apache-1.1"
] | 1
|
2016-10-30T09:51:06.000Z
|
2016-10-30T09:51:06.000Z
|
misc/src/scheduler_plugin.py
|
hivesolutions/colony_plugins
|
cfd8fb2ac58037e01002966704b8a642feb37895
|
[
"Apache-1.1"
] | 1
|
2015-12-29T18:51:07.000Z
|
2015-12-29T18:51:07.000Z
|
misc/src/scheduler_plugin.py
|
hivesolutions/colony_plugins
|
cfd8fb2ac58037e01002966704b8a642feb37895
|
[
"Apache-1.1"
] | 1
|
2018-01-26T12:54:13.000Z
|
2018-01-26T12:54:13.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Hive Colony Framework
# Copyright (c) 2008-2020 Hive Solutions Lda.
#
# This file is part of Hive Colony Framework.
#
# Hive Colony Framework is free software: you can redistribute it and/or modify
# it under the terms of the Apache License as published by the Apache
# Foundation, either version 2.0 of the License, or (at your option) any
# later version.
#
# Hive Colony Framework is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Apache License for more details.
#
# You should have received a copy of the Apache License along with
# Hive Colony Framework. If not, see <http://www.apache.org/licenses/>.
__author__ = "João Magalhães <joamag@hive.pt>"
""" The author(s) of the module """
__version__ = "1.0.0"
""" The version of the module """
__revision__ = "$LastChangedRevision$"
""" The revision number of the module """
__date__ = "$LastChangedDate$"
""" The last change date of the module """
__copyright__ = "Copyright (c) 2008-2020 Hive Solutions Lda."
""" The copyright for the module """
__license__ = "Apache License, Version 2.0"
""" The license for the module """
import colony
class SchedulerPlugin(colony.Plugin):
"""
The main class for the Scheduler plugin.
"""
id = "pt.hive.colony.plugins.misc.scheduler"
name = "Scheduler"
description = "A plugin to manage the scheduling of tasks"
version = "1.0.0"
author = "Hive Solutions Lda. <development@hive.pt>"
platforms = [
colony.CPYTHON_ENVIRONMENT
]
capabilities = [
"main",
"scheduler",
"console_command_extension"
]
dependencies = [
colony.PluginDependency("pt.hive.colony.plugins.misc.guid"),
colony.PluginDependency("pt.hive.colony.plugins.console")
]
main_modules = [
"scheduler_c"
]
def load_plugin(self):
colony.Plugin.load_plugin(self)
import scheduler_c
self.system = scheduler_c.Scheduler(self)
self.console = scheduler_c.ConsoleScheduler(self)
self.release_ready_semaphore()
def end_load_plugin(self):
colony.Plugin.end_load_plugin(self)
self.system.load_scheduler()
def unload_plugin(self):
colony.Plugin.unload_plugin(self)
self.system.unload_scheduler()
self.release_ready_semaphore()
def end_unload_plugin(self):
colony.Plugin.end_unload_plugin(self)
self.release_ready_semaphore()
@colony.set_configuration_property
def set_configuration_property(self, property_name, property):
colony.Plugin.set_configuration_property(self, property_name, property)
@colony.unset_configuration_property
def unset_configuration_property(self, property_name):
colony.Plugin.unset_configuration_property(self, property_name)
def get_console_extension_name(self):
return self.console.get_console_extension_name()
def get_commands_map(self):
return self.console.get_commands_map()
def register_task(self, task, time):
return self.system.register_task(task, time)
def register_task_absolute(self, task, absolute_time):
return self.system.register_task_absolute(task, absolute_time)
def register_task_date_time(self, task, date_time):
return self.system.register_task_date_time(task, date_time)
def register_task_date_time_absolute(self, task, absolute_date_time):
return self.system.register_task_date_time_absolute(task, absolute_date_time)
def register_task_recursive(self, task, time, recursion_list):
return self.system.register_task_recursive(task, time, recursion_list)
def register_task_absolute_recursive(self, task, absolute_time, recursion_list):
return self.system.register_task_absolute_recursive(task, absolute_time, recursion_list)
def register_task_date_time_recursive(self, task, date_time, recursion_list):
return self.system.register_task_date_time_recursive(task, date_time, recursion_list)
def register_task_date_time_absolute_recursive(self, task, absolute_date_time, recursion_list):
return self.system.register_task_date_time_absolute_recursive(task, absolute_date_time, recursion_list)
def unregister_task(self, task):
return self.system.unregister_task(task)
def get_task_class(self):
"""
Retrieves the class that represents
a task in the current scope.
:rtype: Class
:return: The task class for the current scope.
"""
return self.system.get_task_class()
@colony.set_configuration_property_method("startup_configuration")
def startup_configuration_set_configuration_property(self, property_name, property):
self.system.set_startup_configuration_property(property)
@colony.unset_configuration_property_method("startup_configuration")
def startup_configuration_unset_configuration_property(self, property_name):
self.system.unset_startup_configuration_property()
| 35.986395
| 112
| 0.709074
| 3,969
| 0.75
| 0
| 0
| 784
| 0.148148
| 0
| 0
| 1,713
| 0.323696
|
a13e0be2220cebb57badaee86dd77ccad221768a
| 3,458
|
py
|
Python
|
source/vistas/ui/controls/gl_camera.py
|
VISTAS-IVES/pyvistas
|
2de1541c0fb40ccbac4014af758ff329ba0677b1
|
[
"BSD-3-Clause"
] | 1
|
2017-08-26T20:18:38.000Z
|
2017-08-26T20:18:38.000Z
|
source/vistas/ui/controls/gl_camera.py
|
VISTAS-IVES/pyvistas
|
2de1541c0fb40ccbac4014af758ff329ba0677b1
|
[
"BSD-3-Clause"
] | 89
|
2017-06-10T21:03:16.000Z
|
2022-03-11T23:19:56.000Z
|
source/vistas/ui/controls/gl_camera.py
|
VISTAS-IVES/pyvistas
|
2de1541c0fb40ccbac4014af758ff329ba0677b1
|
[
"BSD-3-Clause"
] | 1
|
2019-03-05T21:44:29.000Z
|
2019-03-05T21:44:29.000Z
|
import os
import wx
from vistas.core.graphics.camera_interactor import *
from vistas.core.graphics.overlay import BasicOverlayButton
from vistas.core.paths import get_resources_directory
from vistas.ui.events import CameraChangedEvent
from vistas.ui.utils import get_main_window
class GLCameraControls(wx.EvtHandler):
"""
Event handler for controlling the camera interaction for a GLCanvas. Allows a user to switch between different
camera interaction modes.
"""
SPHERE = 0
FREELOOK = 1
PAN = 2
def __init__(self, gl_canvas, camera):
super().__init__()
self.camera = camera
self.canvas = gl_canvas
self.visible = False
self.sphere_button = BasicOverlayButton(
os.path.join(get_resources_directory(), 'images', 'glyphicons-372-global.png'), (0, 0)
)
self.sphere_button.opaque = True
self.freelook_button = BasicOverlayButton(
os.path.join(get_resources_directory(), 'images', 'glyphicons-52-eye-open.png'), (0, 0)
)
self.pan_button = BasicOverlayButton(
os.path.join(get_resources_directory(), 'images', 'glyphicons-187-move.png'), (0, 0)
)
self.camera_interactor = SphereInteractor(camera=self.camera)
self.reposition()
self.show()
self.canvas.Bind(wx.EVT_SIZE, lambda event: self.reposition())
self.sphere_button.Bind(wx.EVT_BUTTON, lambda event: self.set_type(self.SPHERE))
self.freelook_button.Bind(wx.EVT_BUTTON, lambda event: self.set_type(self.FREELOOK))
self.pan_button.Bind(wx.EVT_BUTTON, lambda event: self.set_type(self.PAN))
def reset(self):
self.set_type(self.camera_interactor.camera_type, False)
def reposition(self):
width = self.canvas.GetSize().width
y_offset = 10
for button in (self.sphere_button, self.freelook_button, self.pan_button):
button.position = (width - button.size[0], y_offset)
y_offset += 5 + button.size[1]
self.canvas.Refresh()
def show(self):
if not self.visible:
self.canvas.overlay.add_button(self.sphere_button)
self.canvas.overlay.add_button(self.freelook_button)
self.canvas.overlay.add_button(self.pan_button)
self.visible = True
def hide(self):
if self.visible:
self.canvas.overlay.remove_button(self.sphere_button)
self.canvas.overlay.remove_button(self.freelook_button)
self.canvas.overlay.remove_button(self.pan_button)
self.visible = False
def set_type(self, interactor, send_event=True):
self.sphere_button.opaque = False
self.freelook_button.opaque = False
self.pan_button.opaque = False
if interactor in (self.SPHERE, CameraInteractor.SPHERE):
self.sphere_button.opaque = True
self.camera_interactor = SphereInteractor(self.camera, False)
elif interactor in (self.FREELOOK, CameraInteractor.FREELOOK):
self.freelook_button.opaque = True
self.camera_interactor = FreelookInteractor(self.camera, False)
elif interactor in (self.PAN, CameraInteractor.PAN):
self.pan_button.opaque = True
self.camera_interactor = PanInteractor(self.camera, False)
self.canvas.Refresh()
if send_event:
wx.PostEvent(get_main_window(), CameraChangedEvent())
| 34.237624
| 114
| 0.669751
| 3,174
| 0.917872
| 0
| 0
| 0
| 0
| 0
| 0
| 260
| 0.075188
|
a13f0a11b4555fcfbf9c924b7e7de9f674331ec4
| 8,678
|
py
|
Python
|
src/_sever_qt4.py
|
Joy917/fast-transfer
|
dfbcf5c4239da3d550b721500dff05fb6d40b756
|
[
"MIT"
] | null | null | null |
src/_sever_qt4.py
|
Joy917/fast-transfer
|
dfbcf5c4239da3d550b721500dff05fb6d40b756
|
[
"MIT"
] | null | null | null |
src/_sever_qt4.py
|
Joy917/fast-transfer
|
dfbcf5c4239da3d550b721500dff05fb6d40b756
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'D:\SVNzhangy\fast-transfer\src\_sever.ui'
#
# Created by: PyQt4 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
from PySide import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName(_fromUtf8("Form"))
Form.resize(798, 732)
self.gridLayout = QtGui.QGridLayout(Form)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.groupBox_2 = QtGui.QGroupBox(Form)
self.groupBox_2.setObjectName(_fromUtf8("groupBox_2"))
self.verticalLayout_2 = QtGui.QVBoxLayout(self.groupBox_2)
self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2"))
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
spacerItem = QtGui.QSpacerItem(20, 20, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.checkBox_time = QtGui.QCheckBox(self.groupBox_2)
self.checkBox_time.setObjectName(_fromUtf8("checkBox_time"))
self.horizontalLayout.addWidget(self.checkBox_time)
self.dateTimeEdit_start = QtGui.QDateTimeEdit(self.groupBox_2)
self.dateTimeEdit_start.setDateTime(QtCore.QDateTime(QtCore.QDate(2017, 1, 1), QtCore.QTime(0, 0, 0)))
self.dateTimeEdit_start.setCalendarPopup(True)
self.dateTimeEdit_start.setObjectName(_fromUtf8("dateTimeEdit_start"))
self.horizontalLayout.addWidget(self.dateTimeEdit_start)
self.label_2 = QtGui.QLabel(self.groupBox_2)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.horizontalLayout.addWidget(self.label_2)
self.dateTimeEdit_end = QtGui.QDateTimeEdit(self.groupBox_2)
self.dateTimeEdit_end.setDateTime(QtCore.QDateTime(QtCore.QDate(2018, 1, 1), QtCore.QTime(0, 0, 0)))
self.dateTimeEdit_end.setCalendarPopup(True)
self.dateTimeEdit_end.setObjectName(_fromUtf8("dateTimeEdit_end"))
self.horizontalLayout.addWidget(self.dateTimeEdit_end)
spacerItem1 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem1)
self.verticalLayout_2.addLayout(self.horizontalLayout)
self.horizontalLayout_3 = QtGui.QHBoxLayout()
self.horizontalLayout_3.setObjectName(_fromUtf8("horizontalLayout_3"))
spacerItem2 = QtGui.QSpacerItem(20, 20, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_3.addItem(spacerItem2)
self.checkBox_ip = QtGui.QCheckBox(self.groupBox_2)
self.checkBox_ip.setObjectName(_fromUtf8("checkBox_ip"))
self.horizontalLayout_3.addWidget(self.checkBox_ip)
self.lineEdit_ip = QtGui.QLineEdit(self.groupBox_2)
self.lineEdit_ip.setObjectName(_fromUtf8("lineEdit_ip"))
self.horizontalLayout_3.addWidget(self.lineEdit_ip)
spacerItem3 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_3.addItem(spacerItem3)
self.verticalLayout_2.addLayout(self.horizontalLayout_3)
self.horizontalLayout_4 = QtGui.QHBoxLayout()
self.horizontalLayout_4.setObjectName(_fromUtf8("horizontalLayout_4"))
spacerItem4 = QtGui.QSpacerItem(20, 20, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_4.addItem(spacerItem4)
self.checkBox_fuzzy = QtGui.QCheckBox(self.groupBox_2)
self.checkBox_fuzzy.setObjectName(_fromUtf8("checkBox_fuzzy"))
self.horizontalLayout_4.addWidget(self.checkBox_fuzzy)
self.lineEdit_fuzzysearch = QtGui.QLineEdit(self.groupBox_2)
self.lineEdit_fuzzysearch.setObjectName(_fromUtf8("lineEdit_fuzzysearch"))
self.horizontalLayout_4.addWidget(self.lineEdit_fuzzysearch)
spacerItem5 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_4.addItem(spacerItem5)
self.verticalLayout_2.addLayout(self.horizontalLayout_4)
self.gridLayout.addWidget(self.groupBox_2, 1, 0, 1, 2)
self.groupBox = QtGui.QGroupBox(Form)
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.verticalLayout = QtGui.QVBoxLayout(self.groupBox)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.textBrowser_log = QtGui.QTextBrowser(self.groupBox)
self.textBrowser_log.viewport().setProperty("cursor", QtGui.QCursor(QtCore.Qt.IBeamCursor))
self.textBrowser_log.setMouseTracking(True)
self.textBrowser_log.setObjectName(_fromUtf8("textBrowser_log"))
self.verticalLayout.addWidget(self.textBrowser_log)
self.horizontalLayout_2 = QtGui.QHBoxLayout()
self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2"))
self.lineEdit_pagenumStart = QtGui.QLineEdit(self.groupBox)
self.lineEdit_pagenumStart.setMaximumSize(QtCore.QSize(50, 16777215))
self.lineEdit_pagenumStart.setObjectName(_fromUtf8("lineEdit_pagenumStart"))
self.horizontalLayout_2.addWidget(self.lineEdit_pagenumStart)
self.label_3 = QtGui.QLabel(self.groupBox)
self.label_3.setMaximumSize(QtCore.QSize(20, 16777215))
self.label_3.setObjectName(_fromUtf8("label_3"))
self.horizontalLayout_2.addWidget(self.label_3)
self.lineEdit_pagenumEnd = QtGui.QLineEdit(self.groupBox)
self.lineEdit_pagenumEnd.setMaximumSize(QtCore.QSize(50, 16777215))
self.lineEdit_pagenumEnd.setObjectName(_fromUtf8("lineEdit_pagenumEnd"))
self.horizontalLayout_2.addWidget(self.lineEdit_pagenumEnd)
spacerItem6 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem6)
self.pushButton_pageup = QtGui.QPushButton(self.groupBox)
self.pushButton_pageup.setObjectName(_fromUtf8("pushButton_pageup"))
self.horizontalLayout_2.addWidget(self.pushButton_pageup)
self.pushButton_pagedown = QtGui.QPushButton(self.groupBox)
self.pushButton_pagedown.setObjectName(_fromUtf8("pushButton_pagedown"))
self.horizontalLayout_2.addWidget(self.pushButton_pagedown)
self.verticalLayout.addLayout(self.horizontalLayout_2)
self.gridLayout.addWidget(self.groupBox, 0, 0, 1, 2)
self.horizontalLayout_5 = QtGui.QHBoxLayout()
self.horizontalLayout_5.setObjectName(_fromUtf8("horizontalLayout_5"))
self.label_notice = QtGui.QLabel(Form)
self.label_notice.setMinimumSize(QtCore.QSize(600, 0))
self.label_notice.setObjectName(_fromUtf8("label_notice"))
self.horizontalLayout_5.addWidget(self.label_notice)
spacerItem7 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_5.addItem(spacerItem7)
self.pushButton_check = QtGui.QPushButton(Form)
self.pushButton_check.setObjectName(_fromUtf8("pushButton_check"))
self.horizontalLayout_5.addWidget(self.pushButton_check)
self.gridLayout.addLayout(self.horizontalLayout_5, 2, 0, 1, 2)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(_translate("Form", "LogManager", None))
self.groupBox_2.setTitle(_translate("Form", "Search Setting", None))
self.checkBox_time.setText(_translate("Form", "time:", None))
self.label_2.setText(_translate("Form", "-----", None))
self.checkBox_ip.setText(_translate("Form", "IP: ", None))
self.checkBox_fuzzy.setText(_translate("Form", "fuzzy:", None))
self.groupBox.setTitle(_translate("Form", "Log Display", None))
self.label_3.setText(_translate("Form", "---", None))
self.pushButton_pageup.setText(_translate("Form", "page up ", None))
self.pushButton_pagedown.setText(_translate("Form", "page down", None))
self.label_notice.setText(_translate("Form", "Notice:", None))
self.pushButton_check.setText(_translate("Form", "Check", None))
| 58.635135
| 110
| 0.735999
| 7,999
| 0.921119
| 0
| 0
| 0
| 0
| 0
| 0
| 857
| 0.098687
|
a1423e6a2572b095e511d07a5f47171e04381471
| 4,579
|
py
|
Python
|
aleph/tests/test_documents_api.py
|
gazeti/aleph
|
f6714c4be038471cfdc6408bfe88dc9e2ed28452
|
[
"MIT"
] | 1
|
2017-07-28T12:54:09.000Z
|
2017-07-28T12:54:09.000Z
|
aleph/tests/test_documents_api.py
|
gazeti/aleph
|
f6714c4be038471cfdc6408bfe88dc9e2ed28452
|
[
"MIT"
] | 7
|
2017-08-16T12:49:23.000Z
|
2018-02-16T10:22:11.000Z
|
aleph/tests/test_documents_api.py
|
gazeti/aleph
|
f6714c4be038471cfdc6408bfe88dc9e2ed28452
|
[
"MIT"
] | 6
|
2017-07-26T12:29:53.000Z
|
2017-08-18T09:35:50.000Z
|
import json
from aleph.tests.util import TestCase
class DocumentsApiTestCase(TestCase):
def setUp(self):
super(DocumentsApiTestCase, self).setUp()
self.load_fixtures('docs.yaml')
def test_index(self):
res = self.client.get('/api/1/documents')
assert res.status_code == 200, res
self.login(is_admin=True)
res = self.client.get('/api/1/documents')
assert res.status_code == 200, res
assert res.json['total'] == 4, res.json
fix = '720badc9cfa9a80fc455239f86c56273dc5c8291'
res = self.client.get('/api/1/documents?content_hash=%s' % fix)
assert res.status_code == 200, res
assert res.json['total'] == 1, res.json
assert res.json['results'][0]['content_hash'] == fix, res.json
def test_view(self):
doc_id = 1000
res = self.client.get('/api/1/documents/%s' % doc_id)
assert res.status_code == 200, res
assert res.json['foreign_id'] == 'test1', res
res = self.client.get('/api/1/documents/328984')
assert res.status_code == 404, res
def test_view_tables(self):
doc_id = 1003
res = self.client.get('/api/1/documents/%s/tables/0' % doc_id)
assert res.status_code == 200, res
assert 'sheet_name' in res.json, res.json
res = self.client.get('/api/1/documents/%s/tables/444' % doc_id)
assert res.status_code == 404, res
def test_view_records(self):
res = self.client.get('/api/1/documents/1003/records')
assert res.status_code == 200, res
assert 'results' in res.json, res.json
assert len(res.json['results']) == 10, res.json
def test_view_record_by_id(self):
doc_id = 1000
res = self.client.get('/api/1/documents/%s/records/1' % doc_id)
assert res.status_code == 200, res
assert 'banana' in res.json['text'], res
assert 'total' not in res.json['text'], res
res = self.client.get('/api/1/documents/%s/records/2' % doc_id)
assert 'total' in res.json['text'], res
res = self.client.get('/api/1/documents/%s/records/2000' % doc_id)
assert res.status_code == 404, res
def test_records_search(self):
res = self.client.get('/api/1/documents/1003/records?q=kwazulu')
assert res.status_code == 200, res
assert res.json['total'] == 1, res.json
def test_view_pdf(self):
res = self.client.get('/api/1/documents/1003/pdf')
assert res.status_code == 400, res
res = self.client.get('/api/1/documents/1000/pdf')
assert res.status_code == 404, res
def test_view_references(self):
doc_id = 1001
res = self.client.get('/api/1/documents/%s/references' % doc_id)
assert res.status_code == 403, res
self.login(is_admin=True)
res = self.client.get('/api/1/documents/%s/references' % doc_id)
assert res.status_code == 200, res
assert 'results' in res.json, res.json
# assert len(res.json['results']) == 2, res.json
def test_update_simple(self):
url = '/api/1/documents/1000'
res = self.client.get(url)
assert res.status_code == 200, res
data = res.json
res = self.client.post(url, data=json.dumps(data),
content_type='application/json')
assert res.status_code == 403, res.json
data['title'] = 'Eaten by a pumpkin'
self.login(is_admin=True)
res = self.client.post(url, data=json.dumps(data),
content_type='application/json')
assert res.status_code == 200, res.json
assert res.json['title'] == data['title'], res.json
def test_update_invalid(self):
url = '/api/1/documents/1000'
ores = self.client.get(url)
self.login(is_admin=True)
data = ores.json.copy()
data['countries'] = ['xz']
res = self.client.post(url, data=json.dumps(data),
content_type='application/json')
assert res.status_code == 400, res.json
data = ores.json.copy()
data['urls'] = ['lalala']
res = self.client.post(url, data=json.dumps(data),
content_type='application/json')
assert res.status_code == 400, res.json
data = ores.json.copy()
data['dates'] = ['2011-XX-XX']
res = self.client.post(url, data=json.dumps(data),
content_type='application/json')
assert res.status_code == 400, res.json
| 37.227642
| 74
| 0.592487
| 4,525
| 0.988207
| 0
| 0
| 0
| 0
| 0
| 0
| 932
| 0.203538
|
a143abc8dbbd62332b147ee1258deecef9896d32
| 649
|
py
|
Python
|
acropolis.py
|
andreasa13/Flask_WebApp_TripAdvisor
|
ea77291280676128b224da02c4938a42bbbb5200
|
[
"MIT"
] | null | null | null |
acropolis.py
|
andreasa13/Flask_WebApp_TripAdvisor
|
ea77291280676128b224da02c4938a42bbbb5200
|
[
"MIT"
] | 1
|
2021-12-13T20:52:54.000Z
|
2021-12-13T20:52:54.000Z
|
acropolis.py
|
andreasagap/Flask_WebApp_TripAdvisor
|
06fd682248ea12ee440834719c113ec974635dd0
|
[
"MIT"
] | 1
|
2021-06-09T18:29:33.000Z
|
2021-06-09T18:29:33.000Z
|
import json
import pandas as pd
from geopy.geocoders import Nominatim
def getAcropolisStatistics():
data = pd.read_csv("Analytics/demographics_old.csv")
ratings_acropolis = len(data)
gender = data.gender.str.lower().value_counts()
ages = data.age_group.value_counts()
return ratings_acropolis, gender["man"], gender["woman"], ages
def getAcropolisMap():
data = pd.read_csv("Analytics/demographics.csv")
dictionary = {}
for i, v in data.geocode.value_counts().items():
dictionary[i] = v
#a = pd.read_json(data.geocode.value_counts(), typ='series', orient='records')
return json.dumps(dictionary)
| 28.217391
| 82
| 0.70416
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 150
| 0.231125
|
a146f1a5836a0723e015b88316d930723a68dc51
| 1,464
|
py
|
Python
|
share/pegasus/init/split/daxgen.py
|
fengggli/pegasus
|
b68f588d90eb2b832086ed627d61414691f8ba95
|
[
"Apache-2.0"
] | null | null | null |
share/pegasus/init/split/daxgen.py
|
fengggli/pegasus
|
b68f588d90eb2b832086ed627d61414691f8ba95
|
[
"Apache-2.0"
] | null | null | null |
share/pegasus/init/split/daxgen.py
|
fengggli/pegasus
|
b68f588d90eb2b832086ed627d61414691f8ba95
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
import os
import pwd
import sys
import time
from Pegasus.DAX3 import *
# The name of the DAX file is the first argument
if len(sys.argv) != 2:
sys.stderr.write("Usage: %s DAXFILE\n" % (sys.argv[0]))
sys.exit(1)
daxfile = sys.argv[1]
USER = pwd.getpwuid(os.getuid())[0]
# Create a abstract dag
dax = ADAG("split")
# Add some workflow-level metadata
dax.metadata("creator", "%s@%s" % (USER, os.uname()[1]))
dax.metadata("created", time.ctime())
webpage = File("pegasus.html")
# the split job that splits the webpage into smaller chunks
split = Job("split")
split.addArguments("-l","100","-a","1",webpage,"part.")
split.uses(webpage, link=Link.INPUT)
# associate the label with the job. all jobs with same label
# are run with PMC when doing job clustering
split.addProfile( Profile("pegasus","label","p1"))
dax.addJob(split)
# we do a parmeter sweep on the first 4 chunks created
for c in "abcd":
part = File("part.%s" % c)
split.uses(part, link=Link.OUTPUT, transfer=False, register=False)
count = File("count.txt.%s" % c)
wc = Job("wc")
wc.addProfile( Profile("pegasus","label","p1"))
wc.addArguments("-l",part)
wc.setStdout(count)
wc.uses(part, link=Link.INPUT)
wc.uses(count, link=Link.OUTPUT, transfer=True, register=True)
dax.addJob(wc)
#adding dependency
dax.depends(wc, split)
f = open(daxfile, "w")
dax.writeXML(f)
f.close()
print "Generated dax %s" %daxfile
| 25.684211
| 70
| 0.672814
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 556
| 0.379781
|
a147e22d5aeaabe35ccc4c56ea5539f536e24407
| 3,685
|
py
|
Python
|
lbrynet/wallet/ledger.py
|
ttkopec/lbry
|
03415415ed397730e6f691f527f51b429a834ed5
|
[
"MIT"
] | null | null | null |
lbrynet/wallet/ledger.py
|
ttkopec/lbry
|
03415415ed397730e6f691f527f51b429a834ed5
|
[
"MIT"
] | 110
|
2018-11-26T05:41:35.000Z
|
2021-08-03T15:37:20.000Z
|
lbrynet/wallet/ledger.py
|
ttkopec/lbry
|
03415415ed397730e6f691f527f51b429a834ed5
|
[
"MIT"
] | 1
|
2018-09-20T22:15:59.000Z
|
2018-09-20T22:15:59.000Z
|
import logging
from six import int2byte
from binascii import unhexlify
from twisted.internet import defer
from .resolve import Resolver
from lbryschema.error import URIParseError
from lbryschema.uri import parse_lbry_uri
from torba.baseledger import BaseLedger
from .account import Account
from .network import Network
from .database import WalletDatabase
from .transaction import Transaction
from .header import Headers, UnvalidatedHeaders
log = logging.getLogger(__name__)
class MainNetLedger(BaseLedger):
name = 'LBRY Credits'
symbol = 'LBC'
network_name = 'mainnet'
account_class = Account
database_class = WalletDatabase
headers_class = Headers
network_class = Network
transaction_class = Transaction
secret_prefix = int2byte(0x1c)
pubkey_address_prefix = int2byte(0x55)
script_address_prefix = int2byte(0x7a)
extended_public_key_prefix = unhexlify('0488b21e')
extended_private_key_prefix = unhexlify('0488ade4')
max_target = 0x0000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
genesis_hash = '9c89283ba0f3227f6c03b70216b9f665f0118d5e0fa729cedf4fb34d6a34f463'
genesis_bits = 0x1f00ffff
target_timespan = 150
default_fee_per_byte = 50
default_fee_per_name_char = 200000
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fee_per_name_char = self.config.get('fee_per_name_char', self.default_fee_per_name_char)
@property
def resolver(self):
return Resolver(self.headers.claim_trie_root, self.headers.height, self.transaction_class,
hash160_to_address=self.hash160_to_address, network=self.network)
@defer.inlineCallbacks
def resolve(self, page, page_size, *uris):
for uri in uris:
try:
parse_lbry_uri(uri)
except URIParseError as err:
defer.returnValue({'error': err.message})
resolutions = yield self.network.get_values_for_uris(self.headers.hash().decode(), *uris)
return (yield self.resolver._handle_resolutions(resolutions, uris, page, page_size))
@defer.inlineCallbacks
def get_claim_by_claim_id(self, claim_id):
result = (yield self.network.get_claims_by_ids(claim_id)).pop(claim_id, {})
return (yield self.resolver.get_certificate_and_validate_result(result))
@defer.inlineCallbacks
def get_claim_by_outpoint(self, txid, nout):
claims = (yield self.network.get_claims_in_tx(txid)) or []
for claim in claims:
if claim['nout'] == nout:
return (yield self.resolver.get_certificate_and_validate_result(claim))
return 'claim not found'
@defer.inlineCallbacks
def start(self):
yield super().start()
yield defer.DeferredList([
a.maybe_migrate_certificates() for a in self.accounts
])
class TestNetLedger(MainNetLedger):
network_name = 'testnet'
pubkey_address_prefix = int2byte(111)
script_address_prefix = int2byte(196)
extended_public_key_prefix = unhexlify('043587cf')
extended_private_key_prefix = unhexlify('04358394')
class RegTestLedger(MainNetLedger):
network_name = 'regtest'
headers_class = UnvalidatedHeaders
pubkey_address_prefix = int2byte(111)
script_address_prefix = int2byte(196)
extended_public_key_prefix = unhexlify('043587cf')
extended_private_key_prefix = unhexlify('04358394')
max_target = 0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
genesis_hash = '6e3fcf1299d4ec5d79c3a4c91d624a4acf9e2e173d95a1a0504f677669687556'
genesis_bits = 0x207fffff
target_timespan = 1
| 34.12037
| 101
| 0.735414
| 3,195
| 0.867028
| 1,074
| 0.291452
| 1,404
| 0.381004
| 0
| 0
| 287
| 0.077883
|
a14898fc9eb718d11bd7d8fbc8f0101300add0a6
| 297
|
py
|
Python
|
MATA37-ILP 2021.2/JUDE/Lista 3 e Prova 3 - Loop/lista3_D.py
|
jeffersonraimon/Programming-UFBA
|
6a6803bfd0e6aa72f8c2b9ffa120792d73c727ca
|
[
"MIT"
] | 1
|
2021-12-09T12:55:56.000Z
|
2021-12-09T12:55:56.000Z
|
MATA37-ILP 2021.2/JUDE/Lista 3 e Prova 3 - Loop/lista3_D.py
|
jeffersonraimon/Programming-UFBA
|
6a6803bfd0e6aa72f8c2b9ffa120792d73c727ca
|
[
"MIT"
] | null | null | null |
MATA37-ILP 2021.2/JUDE/Lista 3 e Prova 3 - Loop/lista3_D.py
|
jeffersonraimon/Programming-UFBA
|
6a6803bfd0e6aa72f8c2b9ffa120792d73c727ca
|
[
"MIT"
] | 1
|
2022-02-21T12:01:53.000Z
|
2022-02-21T12:01:53.000Z
|
T = int(input())
P = int(input())
controle = 0 #Uso para guardar o valor maior que o limite
while P != 0:
P = int(input())
if P >= T:
controle = 1 #coloquei 1 so pra ser diferente de 0
if controle == 1:
print("ALARME")
else:
print("O Havai pode dormir tranquilo")
| 14.142857
| 58
| 0.592593
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 120
| 0.40404
|
a1489d0338a6be1fe32c5e1421435901d7f812f7
| 1,387
|
py
|
Python
|
dopamine/fetch_cam_train/fetch_cam/test/fetch_dis_error.py
|
kbehouse/dopamine
|
1922481d9c23d6c3cf3ee3ec06e613c6eb87cbc1
|
[
"Apache-2.0"
] | null | null | null |
dopamine/fetch_cam_train/fetch_cam/test/fetch_dis_error.py
|
kbehouse/dopamine
|
1922481d9c23d6c3cf3ee3ec06e613c6eb87cbc1
|
[
"Apache-2.0"
] | null | null | null |
dopamine/fetch_cam_train/fetch_cam/test/fetch_dis_error.py
|
kbehouse/dopamine
|
1922481d9c23d6c3cf3ee3ec06e613c6eb87cbc1
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import gym
import time
from matplotlib import pyplot as plt
from fetch_cam import FetchCameraEnv
from fsm import FSM
dis_tolerance = 0.0001 # 1mm
env = FetchCameraEnv()
obs = env.reset()
done = False
want_pos = (obs['eeinfo'][0]).copy()
ori_pos = (obs['eeinfo'][0]).copy()
print('---ori_pos = ' , obs['eeinfo'][0],'----')
step = 0
robot_step = 0
s_time = time.time()
while True:
# env.render()
now_pos = obs['eeinfo'][0]
dis = np.linalg.norm(now_pos - want_pos)
print('dis = ',dis)
if dis < dis_tolerance:
x, y, z, g = 0.01, 0.01, 0.01, 0.
want_pos = obs['eeinfo'][0] + np.array([x, y, z])
print('want_pos =' , want_pos)
step +=1
if step>=11:
break
else:
x, y, z, g = 0., 0.0, 0., 0.
a = np.array([x, y, z, g])
obs, r, done, info = env.step(a)
robot_step +=1
if abs(x) > 0 or abs(y) > 0 or abs(z) > 0 :
diff_x = obs['eeinfo'][0] - want_pos
# print("pre_obs['eeinfo'][0] = ", pre_x)
print("obs['eeinfo'][0] = {}, diff_x={}".format( obs['eeinfo'][0], diff_x) )
# time.sleep(0.5)
print('---final_pos = ' , obs['eeinfo'][0],'----')
print('---pos_diff = ' , obs['eeinfo'][0] - ori_pos,'----')
print('step = {}, robot_step={}'.format(step, robot_step))
print('use time = {:.2f}'.format(time.time()-s_time))
| 24.333333
| 85
| 0.539293
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 314
| 0.226388
|
a1490edf966fa802ac0a01963e5d3d0e3138778b
| 5,091
|
py
|
Python
|
pyHarvest_build_151223/pyHarvest_Analyse_Data_v1.py
|
bl305/pyHarvest
|
d4c62d443ca657f9d31245c3c3f24c741cf2ae0b
|
[
"CC0-1.0"
] | null | null | null |
pyHarvest_build_151223/pyHarvest_Analyse_Data_v1.py
|
bl305/pyHarvest
|
d4c62d443ca657f9d31245c3c3f24c741cf2ae0b
|
[
"CC0-1.0"
] | null | null | null |
pyHarvest_build_151223/pyHarvest_Analyse_Data_v1.py
|
bl305/pyHarvest
|
d4c62d443ca657f9d31245c3c3f24c741cf2ae0b
|
[
"CC0-1.0"
] | null | null | null |
# coding=utf-8
from packages import *
import os
#SET PARAMETERS
myverbosity=-1
mymaxencode=5
TXT_filetypes=(
#simple text files
'txt','lst',
#config files
'ini','cfg',
#programming languages
'c','cpp',
#scripts
'vbs','py','pl')
XLS_filetypes=('xls','xlsx')
DOC_filetypes=('doc',)
DOCX_filetypes=('docx',)
PDF_filetypes=('pdf',)
#TEMPLATE FILES
myXLSpath=r'c:\LENBAL\Trainings\Securitytube_Python_Expert_PRIVATE\My_Network_Discovery_Project\Main_Program\AllTestFiles\XLS\test.xlsx'
myTXTpath=r'c:\LENBAL\Trainings\Securitytube_Python_Expert_PRIVATE\My_Network_Discovery_Project\Main_Program\AllTestFiles\TXT\normal.txt'
#myTXTpath=r'c:\LENBAL\Trainings\Securitytube_Python_Expert_PRIVATE\My_Network_Discovery_Project\Main_Program\AllTestFiles\TXT\unicode.txt'
#myTXTpath=r'c:\LENBAL\Trainings\Securitytube_Python_Expert_PRIVATE\My_Network_Discovery_Project\Main_Program\AllTestFiles\TXT\unicode_big.txt'
#myTXTpath=r'c:\LENBAL\Trainings\Securitytube_Python_Expert_PRIVATE\My_Network_Discovery_Project\Main_Program\AllTestFiles\TXT\unicode_utf8.txt'
#myTXTpath=r'c:\LENBAL\Trainings\Securitytube_Python_Expert_PRIVATE\My_Network_Discovery_Project\Main_Program\AllTestFiles\TXT\x.txt'
#myPDFpath=r'c:\LENBAL\Trainings\Securitytube_Python_Expert_PRIVATE\My_Network_Discovery_Project\Main_Program\AllTestFiles\PDF\test.pdf'
#myPDFpath=r'c:\LENBAL\Trainings\Securitytube_Python_Expert_PRIVATE\My_Network_Discovery_Project\Main_Program\AllTestFiles\PDF\xtest.pdf'
myPDFpath=r'c:\LENBAL\Trainings\Securitytube_Python_Expert_PRIVATE\My_Network_Discovery_Project\Main_Program\AllTestFiles\PDF\ztest.pdf'
myDOCpath=r'c:\LENBAL\Trainings\Securitytube_Python_Expert_PRIVATE\My_Network_Discovery_Project\Main_Program\AllTestFiles\DOC\xtest.doc'
myDOCXpath=r'c:\LENBAL\Trainings\Securitytube_Python_Expert_PRIVATE\My_Network_Discovery_Project\Main_Program\AllTestFiles\DOC\xtest.docx'
mydirpath=r'c:\LENBAL\Trainings\Securitytube_Python_Expert_PRIVATE\My_Network_Discovery_Project\Main_Program\AllTestFiles'
#mydirpath=r'c:\LENBAL\Trainings\Securitytube_Python_Expert_PRIVATE\My_Network_Discovery_Project\Main_Program\DataGathered'
#mypath=myTXTpath
#mypath=myXLSpath
#mypath=myPDFpath
#mypath=myDOCpath
#mypath=myDOCXpath
#PROGRAM START
def process_myfile(thepath,verbosity=0):
#Select file type
fileextension=""
result=()
if '.' in thepath:
fileextension = thepath.rsplit('.', 1)[1]
if fileextension in DOC_filetypes:
doc_match=doc_full_search_tuple(thepath,myverbosity)
if doc_match:
result+=(doc_match,'doc')
if verbosity>1:
print doc_match
elif fileextension in DOCX_filetypes:
docx_match=docx_full_search_tuple(thepath,myverbosity)
if docx_match:
result+=(docx_match,'docx')
if verbosity>1:
print docx_match
elif fileextension in XLS_filetypes:
#PROCESS XLS
#xls_match=xls_full_search_tuple(thepath,verbosity=myverbosity)
xls_match=xls_full_search_tuple(thepath,myverbosity)
if xls_match:
result+=(xls_match,'xlsx')
if verbosity>1:
print xls_match
#print xls_match[-1]
elif fileextension in PDF_filetypes:
pdf_match=pdf_full_search_tuple(thepath,myverbosity)
if pdf_match:
result+=(pdf_match,'pdf')
if verbosity>1:
print pdf_match
#print pdf_match[-1]
elif fileextension in TXT_filetypes:
#PROCESS TXT
#txt_match=txt_full_search_tuple(thepath,maxencode=mymaxencode,verbosity=myverbosity)
txt_match=txt_full_search_tuple(thepath,mymaxencode,myverbosity)
if txt_match:
result+=(txt_match,'txt')
if verbosity>1:
print txt_match
#print txt_match[-1]
else:
print "[-] UNKNOWN filetype",thepath
return result
def process_localdir(localdir,recursive=0):
results=()
if recursive==0:
#files = [ f for f in os.listdir(localdir) if os.path.isfile(os.path.join(localdir,f)) ]
for files in os.listdir(localdir):
if os.path.isfile(os.path.join(localdir,files)):
abspath=os.path.join(localdir,files)
abspath = os.path.normpath(abspath).replace('//','/')
#print abspath
results+=(abspath,)
else:
for subdir, dirs, files in os.walk(localdir):
for file in files:
abspath=os.path.join(subdir,file)
abspath = os.path.normpath(abspath).replace('//','/')
#print abspath
results+=(abspath,)
return results
#print "##########################Main Program Started##########################"
#ANALYSE A SPECIFIC FILE
#process_myfile(mypath)
#ANALYSE ALL FILES IN A SPECIFIED DIRECTORY
filesindir=process_localdir(mydirpath,1)
Analysisconn, Analysisc = db_connect(Analysis_sqlite_file)
create_host_db(Analysisconn, Analysis_create_script,print_out=False)
filecount=len(filesindir)
filecounter=1
if filecount==0:
print "No files to analyse"
for fn in range(len(filesindir)):
mytext=process_myfile(filesindir[fn])
print "Analysing file %d/%d %s"%(filecounter,filecount,filesindir[fn])
filecounter+=1
if mytext:
ftype=mytext[1]
mytextdata=mytext[0]
insert_analysis_data(Analysisc,Analysis_table_name,mytextdata,ftype,print_out=False)
db_commit(Analysisconn)
pass
db_commit(Analysisconn)
db_close(Analysisconn)
print (raw_input('Press Enter to Exit!'))
| 36.891304
| 144
| 0.792772
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,651
| 0.520723
|
a1499e6c4207a38f095d2e507e2c6116418ae733
| 2,732
|
py
|
Python
|
functions/update_modeling_results.py
|
zheng-da/covid19-severity-prediction
|
205ab5aa13a5e91a4c23ccd73e65939e4003626b
|
[
"MIT"
] | 2
|
2020-05-15T14:42:02.000Z
|
2020-05-22T08:51:47.000Z
|
functions/update_modeling_results.py
|
rahul263-stack/covid19-severity-prediction
|
f581adb2fccb12d5ab3f3c59ee120f484703edf5
|
[
"MIT"
] | null | null | null |
functions/update_modeling_results.py
|
rahul263-stack/covid19-severity-prediction
|
f581adb2fccb12d5ab3f3c59ee120f484703edf5
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
from os.path import join as oj
import os
import pygsheets
import pandas as pd
import sys
import inspect
from datetime import datetime, timedelta
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.append(parentdir)
sys.path.append(parentdir + '/modeling')
import load_data
from fit_and_predict import fit_and_predict_ensemble
from functions import merge_data
from viz import viz_interactive
import matplotlib.pyplot as plt
import plotly.express as px
import plotly
def predictions_plot(df_county, NUM_DAYS_LIST, num_days_in_past, output_key):
today = datetime.today().strftime("%B %d")
day_past = (datetime.now() - timedelta(days=num_days_in_past)).strftime("%B %d")
pred_key = f'Predicted deaths by {today}\n(predicted on {day_past})'
deaths_key = f'Actual deaths by {today}'
d = df_county.rename(columns={
output_key: pred_key,
'tot_deaths': deaths_key,
})
minn = min(min(d[pred_key]), min(d[deaths_key])) + 1
maxx = max(max(d[pred_key]), max(d[deaths_key]))
px.colors.DEFAULT_PLOTLY_COLORS[:3] = ['rgb(239,138,98)','rgb(247,247,247)','rgb(103,169,207)']
fig = px.scatter(d,
x=deaths_key,
y=pred_key,
size='PopulationEstimate2018',
hover_name="CountyName",
hover_data=["CountyName", 'StateName'],
log_x=True, log_y=True)
fig.update_layout(shapes=[
dict(
type= 'line',
yref= 'y', y0=minn, y1=maxx,
xref= 'x', x0=minn, x1=maxx,
opacity=0.2
)
])
fig.update_layout(
paper_bgcolor='rgba(0,0,0,255)',
plot_bgcolor='rgba(0,0,0,255)',
template='plotly_dark',
title='County-level predictions'
)
plotly.offline.plot(fig, filename=oj(parentdir, 'results', 'predictions.html'), auto_open=False)
if __name__ == '__main__':
print('loading data...')
NUM_DAYS_LIST = [1, 2, 3, 4, 5, 6, 7]
df_county = load_data.load_county_level(data_dir=oj(parentdir, 'data'))
num_days_in_past = 3
output_key = f'Predicted Deaths {num_days_in_past}-day'
df_county = fit_and_predict_ensemble(df_county,
outcome='deaths',
mode='eval_mode',
target_day=np.array([num_days_in_past]),
output_key=output_key)
df_county[output_key] = [v[0] for v in df_county[output_key].values]
predictions_plot(df_county, NUM_DAYS_LIST, num_days_in_past, output_key)
| 35.947368
| 100
| 0.625915
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 439
| 0.160688
|
a14c5c58cf2881b62cfe95e034f42cf5c934399c
| 4,582
|
py
|
Python
|
zun/tests/unit/common/test_rpc.py
|
wanghuiict/zun
|
2f4a3a2ba06d7ca83002418d4003ee5dece70952
|
[
"Apache-2.0"
] | 83
|
2016-09-14T22:06:26.000Z
|
2022-01-27T03:49:52.000Z
|
zun/tests/unit/common/test_rpc.py
|
wanghuiict/zun
|
2f4a3a2ba06d7ca83002418d4003ee5dece70952
|
[
"Apache-2.0"
] | 2
|
2017-06-22T21:58:47.000Z
|
2019-04-10T03:17:44.000Z
|
zun/tests/unit/common/test_rpc.py
|
wanghuiict/zun
|
2f4a3a2ba06d7ca83002418d4003ee5dece70952
|
[
"Apache-2.0"
] | 54
|
2016-09-29T10:16:02.000Z
|
2022-01-28T19:12:49.000Z
|
# Copyright 2017 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from oslo_serialization import jsonutils as json
from zun.common import context
from zun.common import rpc
from zun.tests import base
class TestRpc(base.TestCase):
def test_add_extra_exmods(self):
rpc.EXTRA_EXMODS = []
rpc.add_extra_exmods('foo', 'bar')
self.assertEqual(['foo', 'bar'], rpc.EXTRA_EXMODS)
def test_clear_extra_exmods(self):
rpc.EXTRA_EXMODS = ['foo', 'bar']
rpc.clear_extra_exmods()
self.assertEqual(0, len(rpc.EXTRA_EXMODS))
def test_serialize_entity(self):
with mock.patch.object(json, 'to_primitive') as mock_prim:
rpc.JsonPayloadSerializer.serialize_entity('context', 'entity')
mock_prim.assert_called_once_with('entity', convert_instances=True)
class TestRequestContextSerializer(base.TestCase):
def setUp(self):
super(TestRequestContextSerializer, self).setUp()
self.mock_base = mock.Mock()
self.ser = rpc.RequestContextSerializer(self.mock_base)
self.ser_null = rpc.RequestContextSerializer(None)
def test_serialize_entity(self):
self.mock_base.serialize_entity.return_value = 'foo'
ser_ent = self.ser.serialize_entity('context', 'entity')
self.mock_base.serialize_entity.assert_called_once_with('context',
'entity')
self.assertEqual('foo', ser_ent)
def test_serialize_entity_null_base(self):
ser_ent = self.ser_null.serialize_entity('context', 'entity')
self.assertEqual('entity', ser_ent)
def test_deserialize_entity(self):
self.mock_base.deserialize_entity.return_value = 'foo'
deser_ent = self.ser.deserialize_entity('context', 'entity')
self.mock_base.deserialize_entity.assert_called_once_with('context',
'entity')
self.assertEqual('foo', deser_ent)
def test_deserialize_entity_null_base(self):
deser_ent = self.ser_null.deserialize_entity('context', 'entity')
self.assertEqual('entity', deser_ent)
def test_serialize_context(self):
context = mock.Mock()
self.ser.serialize_context(context)
context.to_dict.assert_called_once_with()
@mock.patch.object(context, 'RequestContext')
def test_deserialize_context(self, mock_req):
self.ser.deserialize_context('context')
mock_req.from_dict.assert_called_once_with('context')
class TestProfilerRequestContextSerializer(base.TestCase):
def setUp(self):
super(TestProfilerRequestContextSerializer, self).setUp()
self.ser = rpc.ProfilerRequestContextSerializer(mock.Mock())
@mock.patch('zun.common.rpc.profiler')
def test_serialize_context(self, mock_profiler):
prof = mock_profiler.get.return_value
prof.hmac_key = 'swordfish'
prof.get_base_id.return_value = 'baseid'
prof.get_id.return_value = 'parentid'
context = mock.Mock()
context.to_dict.return_value = {'project_id': 'test'}
self.assertEqual({
'project_id': 'test',
'trace_info': {
'hmac_key': 'swordfish',
'base_id': 'baseid',
'parent_id': 'parentid'
}
}, self.ser.serialize_context(context))
@mock.patch('zun.common.rpc.profiler')
def test_deserialize_context(self, mock_profiler):
serialized = {'project_id': 'test',
'trace_info': {
'hmac_key': 'swordfish',
'base_id': 'baseid',
'parent_id': 'parentid'}}
context = self.ser.deserialize_context(serialized)
self.assertEqual('test', context.project_id)
mock_profiler.init.assert_called_once_with(
hmac_key='swordfish', base_id='baseid', parent_id='parentid')
| 34.19403
| 78
| 0.654518
| 3,775
| 0.823876
| 0
| 0
| 1,402
| 0.30598
| 0
| 0
| 1,172
| 0.255784
|
a14da1829b09a4bac353d3762281e3ef271e99d4
| 26,935
|
py
|
Python
|
skidl/Pin.py
|
arjenroodselaar/skidl
|
0bf801bd3b74e6ef94bd9aa1b68eef756b568276
|
[
"MIT"
] | null | null | null |
skidl/Pin.py
|
arjenroodselaar/skidl
|
0bf801bd3b74e6ef94bd9aa1b68eef756b568276
|
[
"MIT"
] | null | null | null |
skidl/Pin.py
|
arjenroodselaar/skidl
|
0bf801bd3b74e6ef94bd9aa1b68eef756b568276
|
[
"MIT"
] | 1
|
2020-09-21T23:31:41.000Z
|
2020-09-21T23:31:41.000Z
|
# -*- coding: utf-8 -*-
# MIT license
#
# Copyright (C) 2018 by XESS Corp.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
Handles part pins.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from builtins import range, super
from collections import defaultdict
from copy import copy
from enum import IntEnum
from future import standard_library
from .Alias import *
from .baseobj import SkidlBaseObject
from .defines import *
from .logger import erc_logger, logger
from .utilities import *
standard_library.install_aliases()
class Pin(SkidlBaseObject):
"""
A class for storing data about pins for a part.
Args:
attribs: Key/value pairs of attributes to add to the library.
Attributes:
nets: The electrical nets this pin is connected to (can be >1).
part: Link to the Part object this pin belongs to.
func: Pin function such as PinType.types.INPUT.
do_erc: When false, the pin is not checked for ERC violations.
"""
# Various types of pins.
types = IntEnum(
"types",
(
"INPUT",
"OUTPUT",
"BIDIR",
"TRISTATE",
"PASSIVE",
"UNSPEC",
"PWRIN",
"PWROUT",
"OPENCOLL",
"OPENEMIT",
"PULLUP",
"PULLDN",
"NOCONNECT",
),
)
@classmethod
def add_type(cls, *pin_types):
"""
Add new pin type identifiers to the list of pin types.
Args:
pin_types: Strings identifying zero or more pin types.
"""
cls.types = IntEnum("types", [m.name for m in cls.types] + list(pin_types))
# Also add the pin types as attributes of the Pin class so
# existing SKiDL part libs will still work (e.g. Pin.INPUT
# still works as well as the newer Pin.types.INPUT).
for m in cls.types:
setattr(cls, m.name, m)
# Various drive levels a pin can output.
# The order of these is important! The first entry has the weakest
# drive and the drive increases for each successive entry.
drives = IntEnum(
"drives",
(
"NOCONNECT", # NC pin drive.
"NONE", # No drive capability (like an input pin).
"PASSIVE", # Small drive capability, but less than a pull-up or pull-down.
"PULLUPDN", # Pull-up or pull-down capability.
"ONESIDE", # Can pull high (open-emitter) or low (open-collector).
"TRISTATE", # Can pull high/low and be in high-impedance state.
"PUSHPULL", # Can actively drive high or low.
"POWER", # A power supply or ground line.
),
)
# Information about the various types of pins:
# function: A string describing the pin's function.
# drive: The drive capability of the pin.
# rcv_min: The minimum amount of drive the pin must receive to function.
# rcv_max: The maximum amount of drive the pin can receive and still function.
pin_info = {
types.INPUT: {
"function": "INPUT",
"func_str": "INPUT",
"drive": drives.NONE,
"max_rcv": drives.POWER,
"min_rcv": drives.PASSIVE,
},
types.OUTPUT: {
"function": "OUTPUT",
"func_str": "OUTPUT",
"drive": drives.PUSHPULL,
"max_rcv": drives.PASSIVE,
"min_rcv": drives.NONE,
},
types.BIDIR: {
"function": "BIDIRECTIONAL",
"func_str": "BIDIR",
"drive": drives.TRISTATE,
"max_rcv": drives.POWER,
"min_rcv": drives.NONE,
},
types.TRISTATE: {
"function": "TRISTATE",
"func_str": "TRISTATE",
"drive": drives.TRISTATE,
"max_rcv": drives.TRISTATE,
"min_rcv": drives.NONE,
},
types.PASSIVE: {
"function": "PASSIVE",
"func_str": "PASSIVE",
"drive": drives.PASSIVE,
"max_rcv": drives.POWER,
"min_rcv": drives.NONE,
},
types.PULLUP: {
"function": "PULLUP",
"func_str": "PULLUP",
"drive": drives.PULLUPDN,
"max_rcv": drives.POWER,
"min_rcv": drives.NONE,
},
types.PULLDN: {
"function": "PULLDN",
"func_str": "PULLDN",
"drive": drives.PULLUPDN,
"max_rcv": drives.POWER,
"min_rcv": drives.NONE,
},
types.UNSPEC: {
"function": "UNSPECIFIED",
"func_str": "UNSPEC",
"drive": drives.NONE,
"max_rcv": drives.POWER,
"min_rcv": drives.NONE,
},
types.PWRIN: {
"function": "POWER-IN",
"func_str": "PWRIN",
"drive": drives.NONE,
"max_rcv": drives.POWER,
"min_rcv": drives.POWER,
},
types.PWROUT: {
"function": "POWER-OUT",
"func_str": "PWROUT",
"drive": drives.POWER,
"max_rcv": drives.PASSIVE,
"min_rcv": drives.NONE,
},
types.OPENCOLL: {
"function": "OPEN-COLLECTOR",
"func_str": "OPENCOLL",
"drive": drives.ONESIDE,
"max_rcv": drives.TRISTATE,
"min_rcv": drives.NONE,
},
types.OPENEMIT: {
"function": "OPEN-EMITTER",
"func_str": "OPENEMIT",
"drive": drives.ONESIDE,
"max_rcv": drives.TRISTATE,
"min_rcv": drives.NONE,
},
types.NOCONNECT: {
"function": "NO-CONNECT",
"func_str": "NOCONNECT",
"drive": drives.NOCONNECT,
"max_rcv": drives.NOCONNECT,
"min_rcv": drives.NOCONNECT,
},
}
def __init__(self, **attribs):
super().__init__()
self.nets = []
self.part = None
self.name = ""
self.num = ""
self.do_erc = True
self.func = self.types.UNSPEC # Pin function defaults to unspecified.
# Attach additional attributes to the pin.
for k, v in list(attribs.items()):
setattr(self, k, v)
def copy(self, num_copies=None, **attribs):
"""
Return copy or list of copies of a pin including any net connection.
Args:
num_copies: Number of copies to make of pin.
Keyword Args:
attribs: Name/value pairs for setting attributes for the pin.
Notes:
An instance of a pin can be copied just by calling it like so::
p = Pin() # Create a pin.
p_copy = p() # This is a copy of the pin.
"""
# If the number of copies is None, then a single copy will be made
# and returned as a scalar (not a list). Otherwise, the number of
# copies will be set by the num_copies parameter or the number of
# values supplied for each part attribute.
num_copies_attribs = find_num_copies(**attribs)
return_list = (num_copies is not None) or (num_copies_attribs > 1)
if num_copies is None:
num_copies = max(1, num_copies_attribs)
# Check that a valid number of copies is requested.
if not isinstance(num_copies, int):
log_and_raise(
logger,
ValueError,
"Can't make a non-integer number ({}) of copies of a pin!".format(
num_copies
),
)
if num_copies < 0:
log_and_raise(
logger,
ValueError,
"Can't make a negative number ({}) of copies of a pin!".format(
num_copies
),
)
copies = []
for _ in range(num_copies):
# Make a shallow copy of the pin.
cpy = copy(self)
# The copy is not on a net, yet.
cpy.nets = []
# Connect the new pin to the same net as the original.
if self.nets:
self.nets[0] += cpy
# Copy the aliases for the pin if it has them.
cpy.aliases = self.aliases
# Attach additional attributes to the pin.
for k, v in list(attribs.items()):
setattr(cpy, k, v)
copies.append(cpy)
# Return a list of the copies made or just a single copy.
if return_list:
return copies
return copies[0]
# Make copies with the multiplication operator or by calling the object.
__call__ = copy
def __mul__(self, num_copies):
if num_copies is None:
num_copies = 0
return self.copy(num_copies=num_copies)
__rmul__ = __mul__
def __getitem__(self, *ids):
"""
Return the pin if the indices resolve to a single index of 0.
Args:
ids: A list of indices. These can be individual
numbers, net names, nested lists, or slices.
Returns:
The pin, otherwise None or raises an Exception.
"""
# Resolve the indices.
indices = list(set(expand_indices(0, self.width - 1, False, *ids)))
if indices is None or len(indices) == 0:
return None
if len(indices) > 1:
log_and_raise(
logger, ValueError, "Can't index a pin with multiple indices."
)
if indices[0] != 0:
log_and_raise(logger, ValueError, "Can't use a non-zero index for a pin.")
return self
def __setitem__(self, ids, *pins_nets_buses):
"""
You can't assign to Pins. You must use the += operator.
This method is a work-around that allows the use of the += for making
connections to pins while prohibiting direct assignment. Python
processes something like net[0] += Net() as follows::
1. Pin.__getitem__ is called with '0' as the index. This
returns a single Pin.
2. The Pin.__iadd__ method is passed the pin and
the thing to connect to it (a Net in this case). This
method makes the actual connection to the net. Then
it creates an iadd_flag attribute in the object it returns.
3. Finally, Pin.__setitem__ is called. If the iadd_flag attribute
is true in the passed argument, then __setitem__ was entered
as part of processing the += operator. If there is no
iadd_flag attribute, then __setitem__ was entered as a result
of using a direct assignment, which is not allowed.
"""
# If the iadd_flag is set, then it's OK that we got
# here and don't issue an error. Also, delete the flag.
if getattr(pins_nets_buses[0], "iadd_flag", False):
del pins_nets_buses[0].iadd_flag
return
# No iadd_flag or it wasn't set. This means a direct assignment
# was made to the pin, which is not allowed.
log_and_raise(logger, TypeError, "Can't assign to a Net! Use the += operator.")
def __iter__(self):
"""
Return an iterator for stepping through the pin.
"""
# You can only iterate a Pin one time.
return (self for i in [0]) # Return generator expr.
def is_connected(self):
"""Return true if a pin is connected to a net (but not a no-connect net)."""
from .Net import Net, NCNet
if not self.nets:
# This pin is not connected to any nets.
return False
# Get the types of things this pin is connected to.
net_types = set([type(n) for n in self.nets])
if set([NCNet]) == net_types:
# This pin is only connected to no-connect nets.
return False
if set([Net]) == net_types:
# This pin is only connected to normal nets.
return True
if set([Net, NCNet]) == net_types:
# Can't be connected to both normal and no-connect nets!
log_and_raise(
logger,
ValueError,
"{} is connected to both normal and no-connect nets!".format(
self.erc_desc()
),
)
# This is just strange...
log_and_raise(
logger,
ValueError,
"{} is connected to something strange: {}.".format(
self.erc_desc(), self.nets
),
)
def is_attached(self, pin_net_bus):
"""Return true if this pin is attached to the given pin, net or bus."""
from .Net import Net
from .Pin import Pin
if not self.is_connected():
return False
if isinstance(pin_net_bus, Pin):
if pin_net_bus.is_connected():
return pin_net_bus.net.is_attached(self.net)
return False
if isinstance(pin_net_bus, Net):
return pin_net_bus.is_attached(self.net)
if isinstance(pin_net_bus, Bus):
for net in pin_net_bus[:]:
if self.net.is_attached(net):
return True
return False
log_and_raise(
logger,
ValueError,
"Pins can't be attached to {}!".format(type(pin_net_bus)),
)
def connect(self, *pins_nets_buses):
"""
Return the pin after connecting it to one or more nets or pins.
Args:
pins_nets_buses: One or more Pin, Net or Bus objects or
lists/tuples of them.
Returns:
The updated pin with the new connections.
Notes:
You can connect nets or pins to a pin like so::
p = Pin() # Create a pin.
n = Net() # Create a net.
p += net # Connect the net to the pin.
"""
from .Net import Net
from .ProtoNet import ProtoNet
# Go through all the pins and/or nets and connect them to this pin.
for pn in expand_buses(flatten(pins_nets_buses)):
if isinstance(pn, ProtoNet):
pn += self
elif isinstance(pn, Pin):
# Connecting pin-to-pin.
if self.is_connected():
# If self is already connected to a net, then add the
# other pin to the same net.
self.nets[0] += pn
elif pn.is_connected():
# If self is unconnected but the other pin is, then
# connect self to the other pin's net.
pn.nets[0] += self
else:
# Neither pin is connected to a net, so create a net
# in the same circuit as the pin and attach both to it.
Net(circuit=self.part.circuit).connect(self, pn)
elif isinstance(pn, Net):
# Connecting pin-to-net, so just connect the pin to the net.
pn += self
else:
log_and_raise(
logger,
TypeError,
"Cannot attach non-Pin/non-Net {} to {}.".format(
type(pn), self.erc_desc()
),
)
# Set the flag to indicate this result came from the += operator.
self.iadd_flag = True # pylint: disable=attribute-defined-outside-init
return self
# Connect a net to a pin using the += operator.
__iadd__ = connect
def disconnect(self):
"""Disconnect this pin from all nets."""
if not self.net:
return
for n in self.nets:
n.disconnect(self)
self.nets = []
def get_nets(self):
"""Return a list containing the Net objects connected to this pin."""
return self.nets
def get_pins(self):
"""Return a list containing this pin."""
return to_list(self)
def create_network(self):
"""Create a network from a single pin."""
from .Network import Network
ntwk = Network()
ntwk.append(self)
return ntwk
def __and__(self, obj):
"""Attach a pin and another part/pin/net in serial."""
from .Network import Network
return Network(self) & obj
def __rand__(self, obj):
"""Attach a pin and another part/pin/net in serial."""
from .Network import Network
return obj & Network(self)
def __or__(self, obj):
"""Attach a pin and another part/pin/net in parallel."""
from .Network import Network
return Network(self) | obj
def __ror__(self, obj):
"""Attach a pin and another part/pin/net in parallel."""
from .Network import Network
return obj | Network(self)
def chk_conflict(self, other_pin):
"""Check for electrical rule conflicts between this pin and another."""
if not self.do_erc or not other_pin.do_erc:
return
[erc_result, erc_msg] = conflict_matrix[self.func][other_pin.func]
# Return if the pins are compatible.
if erc_result == OK:
return
# Otherwise, generate an error or warning message.
if not erc_msg:
erc_msg = " ".join(
(
self.pin_info[self.func]["function"],
"connected to",
other_pin.pin_info[other_pin.func]["function"],
)
)
n = self.net.name
p1 = self.erc_desc()
p2 = other_pin.erc_desc()
msg = "Pin conflict on net {n}, {p1} <==> {p2} ({erc_msg})".format(**locals())
if erc_result == WARNING:
erc_logger.warning(msg)
else:
erc_logger.error(msg)
def erc_desc(self):
"""Return a string describing this pin for ERC."""
desc = "{func} pin {num}/{name} of {part}".format(
part=self.part.erc_desc(),
num=self.num,
name=self.name,
func=Pin.pin_info[self.func]["function"],
)
return desc
def get_pin_info(self):
num = getattr(self, "num", "???")
names = [getattr(self, "name", "???")]
names.extend(self.aliases)
names = ",".join(names)
func = Pin.pin_info[self.func]["function"]
return num, names, func
def __str__(self):
"""Return a description of this pin as a string."""
ref = getattr(self.part, "ref", "???")
num, names, func = self.get_pin_info()
return "Pin {ref}/{num}/{names}/{func}".format(**locals())
__repr__ = __str__
def export(self):
"""Return a string to recreate a Pin object."""
attribs = []
for k in ["num", "name", "func", "do_erc"]:
v = getattr(self, k, None)
if v:
if k == "func":
# Assign the pin function using the actual name of the
# function, not its numerical value (in case that changes
# in the future if more pin functions are added).
v = "Pin.types." + Pin.pin_info[v]["func_str"]
else:
v = repr(v)
attribs.append("{}={}".format(k, v))
return "Pin({})".format(",".join(attribs))
@property
def net(self):
"""Return one of the nets the pin is connected to."""
if self.nets:
return self.nets[0]
return None
@property
def width(self):
"""Return width of a Pin, which is always 1."""
return 1
@property
def drive(self):
"""
Get, set and delete the drive strength of this pin.
"""
try:
return self._drive
except AttributeError:
# Drive unspecified, so use default drive for this type of pin.
return self.pin_info[self.func]["drive"]
@drive.setter
def drive(self, drive):
self._drive = drive
@drive.deleter
def drive(self):
try:
del self._drive
except AttributeError:
pass
def __bool__(self):
"""Any valid Pin is True."""
return True
__nonzero__ = __bool__ # Python 2 compatibility.
##############################################################################
class PhantomPin(Pin):
"""
A pin type that exists solely to tie two pinless nets together.
It will not participate in generating any netlists.
"""
def __init__(self, **attribs):
super().__init__(**attribs)
self.nets = []
self.part = None
self.do_erc = False
##############################################################################
class PinList(list):
"""
A list of Pin objects that's meant to look something like a Pin to a Part.
This is used for vector I/O of XSPICE parts.
"""
def __init__(self, num, name, part):
super().__init__()
# The list needs the following attributes to behave like a Pin.
self.num = num
self.name = name
self.part = part
def __getitem__(self, i):
"""
Get a Pin from the list. Add Pin objects to the list if they don't exist.
"""
if i >= len(self):
self.extend([Pin(num=j, part=self.part) for j in range(len(self), i + 1)])
return super().__getitem__(i)
def copy(self):
"""
Return a copy of a PinList for use when a Part is copied.
"""
cpy = PinList(self.num, self.name, self.part)
for pin in self:
cpy += pin.copy()
return cpy
def disconnect(self):
"""Disconnect all the pins in the list."""
for pin in self:
pin.disconnect()
##############################################################################
# This will make all the Pin.drive members into attributes of the Pin class
# so things like Pin.INPUT will work as well as Pin.types.INPUT.
Pin.add_type()
# Create the pin conflict matrix as a defaultdict of defaultdicts which
# returns OK if the given element is not in the matrix. This would indicate
# the pin types used to index that element have no contention if connected.
conflict_matrix = defaultdict(lambda: defaultdict(lambda: [OK, ""]))
# Add the non-OK pin connections to the matrix.
conflict_matrix[Pin.types.OUTPUT][Pin.types.OUTPUT] = [ERROR, ""]
conflict_matrix[Pin.types.TRISTATE][Pin.types.OUTPUT] = [WARNING, ""]
conflict_matrix[Pin.types.UNSPEC][Pin.types.INPUT] = [WARNING, ""]
conflict_matrix[Pin.types.UNSPEC][Pin.types.OUTPUT] = [WARNING, ""]
conflict_matrix[Pin.types.UNSPEC][Pin.types.BIDIR] = [WARNING, ""]
conflict_matrix[Pin.types.UNSPEC][Pin.types.TRISTATE] = [WARNING, ""]
conflict_matrix[Pin.types.UNSPEC][Pin.types.PASSIVE] = [WARNING, ""]
conflict_matrix[Pin.types.UNSPEC][Pin.types.PULLUP] = [WARNING, ""]
conflict_matrix[Pin.types.UNSPEC][Pin.types.PULLDN] = [WARNING, ""]
conflict_matrix[Pin.types.UNSPEC][Pin.types.UNSPEC] = [WARNING, ""]
conflict_matrix[Pin.types.PWRIN][Pin.types.TRISTATE] = [WARNING, ""]
conflict_matrix[Pin.types.PWRIN][Pin.types.UNSPEC] = [WARNING, ""]
conflict_matrix[Pin.types.PWROUT][Pin.types.OUTPUT] = [ERROR, ""]
conflict_matrix[Pin.types.PWROUT][Pin.types.BIDIR] = [WARNING, ""]
conflict_matrix[Pin.types.PWROUT][Pin.types.TRISTATE] = [ERROR, ""]
conflict_matrix[Pin.types.PWROUT][Pin.types.UNSPEC] = [WARNING, ""]
conflict_matrix[Pin.types.PWROUT][Pin.types.PWROUT] = [ERROR, ""]
conflict_matrix[Pin.types.OPENCOLL][Pin.types.OUTPUT] = [ERROR, ""]
conflict_matrix[Pin.types.OPENCOLL][Pin.types.TRISTATE] = [ERROR, ""]
conflict_matrix[Pin.types.OPENCOLL][Pin.types.UNSPEC] = [WARNING, ""]
conflict_matrix[Pin.types.OPENCOLL][Pin.types.PWROUT] = [ERROR, ""]
conflict_matrix[Pin.types.OPENEMIT][Pin.types.OUTPUT] = [ERROR, ""]
conflict_matrix[Pin.types.OPENEMIT][Pin.types.BIDIR] = [WARNING, ""]
conflict_matrix[Pin.types.OPENEMIT][Pin.types.TRISTATE] = [WARNING, ""]
conflict_matrix[Pin.types.OPENEMIT][Pin.types.UNSPEC] = [WARNING, ""]
conflict_matrix[Pin.types.OPENEMIT][Pin.types.PWROUT] = [ERROR, ""]
conflict_matrix[Pin.types.NOCONNECT][Pin.types.INPUT] = [ERROR, ""]
conflict_matrix[Pin.types.NOCONNECT][Pin.types.OUTPUT] = [ERROR, ""]
conflict_matrix[Pin.types.NOCONNECT][Pin.types.BIDIR] = [ERROR, ""]
conflict_matrix[Pin.types.NOCONNECT][Pin.types.TRISTATE] = [ERROR, ""]
conflict_matrix[Pin.types.NOCONNECT][Pin.types.PASSIVE] = [ERROR, ""]
conflict_matrix[Pin.types.NOCONNECT][Pin.types.PULLUP] = [ERROR, ""]
conflict_matrix[Pin.types.NOCONNECT][Pin.types.PULLDN] = [ERROR, ""]
conflict_matrix[Pin.types.NOCONNECT][Pin.types.UNSPEC] = [ERROR, ""]
conflict_matrix[Pin.types.NOCONNECT][Pin.types.PWRIN] = [ERROR, ""]
conflict_matrix[Pin.types.NOCONNECT][Pin.types.PWROUT] = [ERROR, ""]
conflict_matrix[Pin.types.NOCONNECT][Pin.types.OPENCOLL] = [ERROR, ""]
conflict_matrix[Pin.types.NOCONNECT][Pin.types.OPENEMIT] = [ERROR, ""]
conflict_matrix[Pin.types.NOCONNECT][Pin.types.NOCONNECT] = [ERROR, ""]
conflict_matrix[Pin.types.PULLUP][Pin.types.PULLUP] = [
WARNING,
"Multiple pull-ups connected.",
]
conflict_matrix[Pin.types.PULLDN][Pin.types.PULLDN] = [
WARNING,
"Multiple pull-downs connected.",
]
conflict_matrix[Pin.types.PULLUP][Pin.types.PULLDN] = [
ERROR,
"Pull-up connected to pull-down.",
]
# Fill-in the other half of the symmetrical contention matrix by looking
# for entries that != OK at position (r,c) and copying them to position
# (c,r).
cols = list(conflict_matrix.keys())
for c in cols:
for r in list(conflict_matrix[c].keys()):
conflict_matrix[r][c] = conflict_matrix[c][r]
| 34.57638
| 87
| 0.569742
| 21,292
| 0.790496
| 0
| 0
| 1,338
| 0.049675
| 0
| 0
| 11,516
| 0.427548
|
a14dc76d87023f8e5ab3f4a7babd9708c41bf004
| 34,030
|
py
|
Python
|
Project1/cl1_p1_wsd.py
|
Sanghyun-Hong/NLPProjects
|
9f81fa680946648f64ac25e5ca8197e9f3386deb
|
[
"MIT"
] | null | null | null |
Project1/cl1_p1_wsd.py
|
Sanghyun-Hong/NLPProjects
|
9f81fa680946648f64ac25e5ca8197e9f3386deb
|
[
"MIT"
] | null | null | null |
Project1/cl1_p1_wsd.py
|
Sanghyun-Hong/NLPProjects
|
9f81fa680946648f64ac25e5ca8197e9f3386deb
|
[
"MIT"
] | null | null | null |
import numpy as np
import operator
# SHHONG: custom modules imported
import json
import random
import itertools
from math import pow, log
from collections import Counter
import os
import sys
sys.stdout = open(os.devnull, 'w')
"""
CMSC723 / INST725 / LING723 -- Fall 2016
Project 1: Implementing Word Sense Disambiguation Systems
"""
"""
read one of train, dev, test subsets
subset - one of train, dev, test
output is a tuple of three lists
labels: one of the 6 possible senses <cord, division, formation, phone, product, text >
targets: the index within the text of the token to be disambiguated
texts: a list of tokenized and normalized text input (note that there can be multiple sentences)
"""
import nltk
#### added dev_manual to the subset of allowable files
def read_dataset(subset):
labels = []
texts = []
targets = []
if subset in ['train', 'dev', 'test', 'dev_manual']:
with open('data/wsd_'+subset+'.txt') as inp_hndl:
for example in inp_hndl:
label, text = example.strip().split('\t')
text = nltk.word_tokenize(text.lower().replace('" ','"'))
if 'line' in text:
ambig_ix = text.index('line')
elif 'lines' in text:
ambig_ix = text.index('lines')
else:
ldjal
targets.append(ambig_ix)
labels.append(label)
texts.append(text)
return (labels, targets, texts)
else:
print '>>>> invalid input !!! <<<<<'
"""
computes f1-score of the classification accuracy
gold_labels - is a list of the gold labels
predicted_labels - is a list of the predicted labels
output is a tuple of the micro averaged score and the macro averaged score
"""
import sklearn.metrics
#### changed method name from eval because of naming conflict with python keyword
def eval_performance(gold_labels, predicted_labels):
return ( sklearn.metrics.f1_score(gold_labels, predicted_labels, average='micro'),
sklearn.metrics.f1_score(gold_labels, predicted_labels, average='macro') )
"""
a helper method that takes a list of predictions and writes them to a file (1 prediction per line)
predictions - list of predictions (strings)
file_name - name of the output file
"""
def write_predictions(predictions, file_name):
with open(file_name, 'w') as outh:
for p in predictions:
outh.write(p+'\n')
"""
Trains a naive bayes model with bag of words features and computes the accuracy on the test set
train_texts, train_targets, train_labels are as described in read_dataset above
The same thing applies to the reset of the parameters.
"""
def run_bow_naivebayes_classifier(train_texts, train_targets, train_labels,
dev_texts, dev_targets, dev_labels, test_texts, test_targets, test_labels):
# control variables
improved = True
alpha = 0.04
silent = True
# Part 2.1 (c_s/c_sw)
c_s = dict.fromkeys(set(train_labels), 0)
multiples = list(itertools.product(c_s.keys(), ['time', 'loss', 'export']))
c_sw = dict.fromkeys(multiples, 0)
t_w = [each_word for each_text in train_texts for each_word in each_text]
multiples = list(itertools.product(c_s.keys(), t_w))
t_sw = dict.fromkeys(multiples, 0)
for idx, label in enumerate(train_labels):
cur_text = train_texts[idx]
# compute c_s
c_s[label] += len(cur_text)
# compute c_sw
time_cnt = cur_text.count('time')
loss_cnt = cur_text.count('loss')
export_cnt = cur_text.count('export')
c_sw[(label, 'time')] += time_cnt
c_sw[(label, 'loss')] += loss_cnt
c_sw[(label, 'export')] += export_cnt
# compute t_sw (total occurances): of (label, word): occurances
for each_word in cur_text:
t_sw[(label, each_word)] += 1
# total # of distinct words: will be used for smoothing
t_dw = Counter(t_w)
if not silent:
print '{:<11} | {:<10} | {:<10} | {:<10} | {:<10} | {:<10} | {:<10} |'.\
format('s', 'cord', 'division', 'formation', 'phone', 'product', 'text')
print '------------------------------------------------------------------------------------------'
print '{:<11} | {:<10} | {:<10} | {:<10} | {:<10} | {:<10} | {:<10} |'.\
format('c(s)', c_s['cord'], c_s['division'], c_s['formation'], c_s['phone'], c_s['product'], c_s['text'])
print '{:<11} | {:<10} | {:<10} | {:<10} | {:<10} | {:<10} | {:<10} |'.\
format('c(s,time)', c_sw[('cord', 'time')], c_sw[('division', 'time')], c_sw[('formation', 'time')], \
c_sw[('phone', 'time')], c_sw[('product', 'time')], c_sw[('text', 'time')])
print '{:<11} | {:<10} | {:<10} | {:<10} | {:<10} | {:<10} | {:<10} |'.\
format('c(s,loss)', c_sw[('cord', 'loss')], c_sw[('division', 'loss')], c_sw[('formation', 'loss')], \
c_sw[('phone', 'loss')], c_sw[('product', 'loss')], c_sw[('text', 'loss')])
print '{:<11} | {:<10} | {:<10} | {:<10} | {:<10} | {:<10} | {:<10} |'.\
format('c(s,export)', c_sw[('cord', 'export')], c_sw[('division', 'export')], c_sw[('formation', 'export')], \
c_sw[('phone', 'export')], c_sw[('product', 'export')], c_sw[('text', 'export')])
print '------------------------------------------------------------------------------------------'
print ' total distinct words: %d ' % (len(t_dw.keys()))
# Part 2.2 (p_s/p_ws)
total_occurances = float(sum(c_s.values()))
label_count = Counter(train_labels)
p_s = {key: (value / float( sum( label_count.values() )) ) for key, value in label_count.iteritems()}
if improved:
p_ws = {key: ( (value + alpha) / \
(float(c_s[key[0]]) + alpha*len(t_dw.keys())) ) \
for key, value in c_sw.iteritems()}
t_ws = {key: ( (value + alpha) / \
(float(c_s[key[0]]) + alpha*len(t_dw.keys())) ) \
for key, value in t_sw.iteritems()}
else:
p_ws = {key: (value / float(c_s[key[0]])) for key, value in c_sw.iteritems()}
t_ws = {key: (value / float(c_s[key[0]])) for key, value in t_sw.iteritems()}
# normalization steps
norm_denominators = {
'time': 0.0,
'loss': 0.0,
'export': 0.0
}
for key, value in p_ws.iteritems():
norm_denominators[key[1]] += value
p_ws_norm = {key: (value / norm_denominators[key[1]]) for key, value in p_ws.iteritems()}
p_ws = p_ws_norm
if not silent:
print '------------------------------------------------------------------------------------------'
print '{:<11} | {:<10.8f} | {:<10.8f} | {:<10.8f} | {:<10.8f} | {:<10.8f} | {:<10.8f} |'.\
format('p(s)', p_s['cord'], p_s['division'], p_s['formation'], p_s['phone'], p_s['product'], p_s['text'])
print '{:<11} | {:<10.8f} | {:<10.8f} | {:<10.8f} | {:<10.8f} | {:<10.8f} | {:<10.8f} |'.\
format('p(time|s)', p_ws[('cord', 'time')], p_ws[('division', 'time')], p_ws[('formation', 'time')], \
p_ws[('phone', 'time')], p_ws[('product', 'time')], p_ws[('text', 'time')])
print '{:<11} | {:<10.8f} | {:<10.8f} | {:<10.8f} | {:<10.8f} | {:<10.8f} | {:<10.8f} |'.\
format('p(loss|s)', p_ws[('cord', 'loss')], p_ws[('division', 'loss')], p_ws[('formation', 'loss')], \
p_ws[('phone', 'loss')], p_ws[('product', 'loss')], p_ws[('text', 'loss')])
print '{:<11} | {:<10.8f} | {:<10.8f} | {:<10.8f} | {:<10.8f} | {:<10.8f} | {:<10.8f} |'.\
format('p(export|s)', p_ws[('cord', 'export')], p_ws[('division', 'export')], p_ws[('formation', 'export')], \
p_ws[('phone', 'export')], p_ws[('product', 'export')], p_ws[('text', 'export')])
# Part 2.3 (p_sxd, on the 1st line on test set)
p_sxd = dict.fromkeys(c_s.keys(), 0.0)
lp_sxd = dict.fromkeys(c_s.keys(), 0.0)
cur_text = dev_texts[0]
for key in p_sxd.keys():
# compute p for each class
if improved:
tp_sxd = p_s[key]
tlp_sxd = log(p_s[key])
else:
tp_sxd = p_s[key]
# compute for each word
for each_word in cur_text:
if t_ws.has_key((key, each_word)):
if improved:
tp_sxd *= t_ws[(key, each_word)]
tlp_sxd += log(t_ws[(key, each_word)])
else:
tp_sxd *= t_ws[(key, each_word)]
# add to the dict
if improved:
p_sxd[key] = tp_sxd
lp_sxd[key] = tlp_sxd
else:
p_sxd[key] = tp_sxd
if not silent:
print '------------------------------------------------------------------------------------------'
print ' %s | %s | %s | %s | %s | %s | %s |' % \
('p(s|X)', p_sxd['cord'], p_sxd['division'], p_sxd['formation'], \
p_sxd['phone'], p_sxd['product'], p_sxd['text'])
print '------------------------------------------------------------------------------------------'
print ' 1st label in dev : %s ' % (dev_labels[0])
print ' 1st text in dev[:5]: %s ' % (dev_texts[0][:5])
if improved:
print '------------------------------------------------------------------------------------------'
print '{:<11} | {:<10.8f} | {:<10.8f} | {:<10.8f} | {:<10.8f} | {:<10.8f} | {:<10.8f} |'.\
format('log(p(s|X))', lp_sxd['cord'], lp_sxd['division'], lp_sxd['formation'], \
lp_sxd['phone'], lp_sxd['product'], lp_sxd['text'])
# Part 2.4: compute all the prob on the test dataset
p_sx = list()
for idx, text in enumerate(test_texts):
t_prob = dict.fromkeys(c_s.keys(), 0.0)
for key in t_prob.keys():
# compute p for each class
if improved:
tp_sxt = log(p_s[key])
else:
tp_sxt = p_s[key]
for each_word in text:
if t_ws.has_key((key, each_word)):
if improved:
tp_sxt += log(t_ws[(key, each_word)])
else:
tp_sxt *= t_ws[(key, each_word)]
# add to the dict
t_prob[key] = tp_sxt
# add dict to the entire list
p_sx.append(t_prob)
# Part 2.4 (run the classifier for all)
labels_predicted = list()
for idx, label in enumerate(test_labels):
maximum_probs = max(p_sx[idx].values())
label_prediction = [key for key, value in p_sx[idx].iteritems() if value == maximum_probs]
label_prediction = random.choice(label_prediction)
# based on the prob
labels_predicted.append(label_prediction)
naivebayes_performance = eval_performance(test_labels, labels_predicted)
# save the implementation to the file
with open('q4p2.txt', 'wb') as q4p2_output:
for each_label in labels_predicted:
q4p2_output.write(each_label+'\n')
# Part 2.5 (do more tuning for the classifier)
# - Laplace smoothing
# - Log likelihoods
if not silent:
print '------------------------------------------------------------------------------------------'
return 'Naive Bayes: micro/macro = [%.2f, %.2f] @ (alpha: %s)' % \
(naivebayes_performance[0]*100, naivebayes_performance[1]*100, alpha)
## extract all the distinct words from a set of texts
## return a dictionary {word:index} that maps each word to a unique index
def extract_all_words(texts,prev_set=set()):
all_words = prev_set
for t in texts:
for w in t:
all_words.add(w)
all_words_idx = {}
for i,w in enumerate(all_words):
all_words_idx[w] = i
return all_words_idx
## extract all distinct labels from a dataset
## return a dictionary {label:index} that maps each label to a unique index
def extract_all_labels(labels):
distinct_labels = list(set(labels))
all_labels_idx = {}
for i,l in enumerate(distinct_labels):
all_labels_idx[l] = i
return all_labels_idx
## construct a bow feature matrix for a set of instances
## the returned matrix has the size NUM_INSTANCES X NUM_FEATURES
def extract_features(all_words_idx,all_labels_idx,texts):
NUM_FEATURES = len(all_words_idx.keys())
NUM_INSTANCES = len(texts)
features_matrix = np.zeros((NUM_INSTANCES,NUM_FEATURES))
for i,instance in enumerate(texts):
for word in instance:
if all_words_idx.get(word,None) is None:
continue
features_matrix[i][all_words_idx[word]] += 1
return features_matrix
## compute the feature vector for a set of words and a given label
## the features are computed as described in Slide #19 of:
## http://www.cs.umd.edu/class/fall2016/cmsc723/slides/slides_02.pdf
def get_features_for_label(instance,label,class_labels):
num_labels = len(class_labels)
num_feats = len(instance)
feats = np.zeros(len(instance)*num_labels+1)
assert len(feats[num_feats*label:num_feats*label+num_feats]) == len(instance)
feats[num_feats*label:num_feats*label+num_feats] = instance
return feats
## get the predicted label for a given instance
## the predicted label is the one with the highest dot product of theta*feature_vector
## return the predicted label, the dot product scores for all labels and the features computed for all labels for that instance
def get_predicted_label(inst,class_labels,theta):
all_labels_scores = {}
all_labels_features = {}
for lbl in class_labels:
feat_vec = get_features_for_label(inst,lbl,class_labels)
assert len(feat_vec) == len(theta)
all_labels_scores[lbl] = np.dot(feat_vec,theta)
predicted_label = max(all_labels_scores.iteritems(), key=operator.itemgetter(1))[0]
return predicted_label
## train the perceptron by iterating over the entire training dataset
## the algorithm is an implementation of the pseudocode from Slide #23 of:
## http://www.cs.umd.edu/class/fall2016/cmsc723/slides/slides_03.pdf
def train_perceptron(train_features,train_labels,class_labels,num_features):
NO_MAX_ITERATIONS = 20
np.random.seed(0)
theta = np.zeros(num_features)
print '# Training Instances:',len(train_features)
num_iterations = 0
cnt_updates_total = 0
cnt_updates_prev = 0
m = np.zeros(num_features)
print '# Total Updates / # Current Iteration Updates:'
for piter in range(NO_MAX_ITERATIONS):
shuffled_indices = np.arange(len(train_features))
np.random.shuffle(shuffled_indices)
cnt_updates_crt = 0
for i in shuffled_indices:
inst = train_features[i]
actual_label = train_labels[i]
predicted_label = get_predicted_label(inst,class_labels,theta)
if predicted_label != actual_label:
cnt_updates_total += 1
cnt_updates_crt += 1
theta = theta + get_features_for_label(inst,actual_label,class_labels) - get_features_for_label(inst,predicted_label,class_labels)
m = m + theta
num_iterations += 1
print cnt_updates_total,'/',cnt_updates_crt
if cnt_updates_crt == 0:
break
theta = m/cnt_updates_total
print '# Iterations:',piter
print '# Iterations over instances:',num_iterations
print '# Total Updates:',cnt_updates_total
return theta
## return the predictions of the perceptron on a test set
def test_perceptron(theta,test_features,test_labels,class_labels):
predictions = []
for inst in test_features:
predicted_label = get_predicted_label(inst,class_labels,theta)
predictions.append(predicted_label)
return predictions
"""
Trains a perceptron model with bag of words features and computes the accuracy on the test set
train_texts, train_targets, train_labels are as described in read_dataset above
The same thing applies to the reset of the parameters.
"""
def run_bow_perceptron_classifier(train_texts, train_targets,train_labels,
dev_texts, dev_targets,dev_labels, test_texts, test_targets, test_labels):
all_words_idx = extract_all_words(train_texts)
all_labels_idx = extract_all_labels(train_labels)
num_features = len(all_words_idx.keys())*len(all_labels_idx.keys())+1
class_labels = all_labels_idx.values()
train_features = extract_features(all_words_idx,all_labels_idx,train_texts)
train_labels = map(lambda e: all_labels_idx[e],train_labels)
test_features = extract_features(all_words_idx,all_labels_idx,test_texts)
test_labels = map(lambda e: all_labels_idx[e],test_labels)
for l in class_labels:
inst = train_features[0]
ffl = get_features_for_label(inst,l,class_labels)
assert False not in (inst == ffl[l*len(inst):(l+1)*len(inst)])
theta = train_perceptron(train_features,train_labels,class_labels,num_features)
test_predictions = test_perceptron(theta,test_features,test_labels,class_labels)
eval_test = eval_performance(test_labels,test_predictions)
inverse_labels_index = {}
for k in all_labels_idx.keys():
inverse_labels_index[all_labels_idx[k]] = k
test_predictions_names = map(lambda e: inverse_labels_index[e],test_predictions)
with open('q3p3.txt', 'wb') as file_output:
for each_label in test_predictions_names:
file_output.write(each_label+'\n')
return ('test-micro=%d%%, test-macro=%d%%' % (int(eval_test[0]*100),int(eval_test[1]*100)))
"""
Trains a naive bayes model with bag of words features + two additional features
and computes the accuracy on the test set
train_texts, train_targets, train_labels are as described in read_dataset above
The same thing applies to the reset of the parameters.
"""
def run_extended_bow_naivebayes_classifier(train_texts, train_targets,train_labels,
dev_texts, dev_targets,dev_labels, test_texts, test_targets, test_labels):
# control variables
improved = True
alpha = 0.04
silent = True
RUN_EXP = 'Both' # set to 'B', None, or 'Both'
# feature extensions (A)
if 'A' in RUN_EXP:
train_features, dev_features, test_features = get_feature_A(train_texts, train_targets, train_labels,
dev_texts, dev_targets, dev_labels,
test_texts, test_targets, test_labels)
for idx, each_text in enumerate(train_texts):
each_text.append(str(float(train_features[idx])))
for idx, each_text in enumerate(dev_texts):
each_text.append(str(float(dev_features[idx])))
for idx, each_text in enumerate(test_texts):
each_text.append(str(float(test_features[idx])))
# feature extensions (B)
elif 'B' in RUN_EXP:
train_features, dev_features, test_features = get_feature_B(train_texts, train_targets, train_labels,
dev_texts, dev_targets, dev_labels,
test_texts, test_targets, test_labels)
for idx, each_text in enumerate(train_texts):
each_text.append(str(int(train_features[idx])))
for idx, each_text in enumerate(dev_texts):
each_text.append(str(int(dev_features[idx])))
for idx, each_text in enumerate(test_texts):
each_text.append(str(int(test_features[idx])))
# feature extensions with both two A and B
elif 'Both' in RUN_EXP:
train_features_A, dev_features_A, test_features_A = get_feature_A(train_texts, train_targets, train_labels,
dev_texts, dev_targets, dev_labels,
test_texts, test_targets, test_labels)
train_features_B, dev_features_B, test_features_B = get_feature_B(train_texts, train_targets, train_labels,
dev_texts, dev_targets, dev_labels,
test_texts, test_targets, test_labels)
for idx, each_text in enumerate(train_texts):
each_text.append(str(float(train_features_A[idx])))
each_text.append(str(int(train_features_B[idx])))
for idx, each_text in enumerate(dev_texts):
each_text.append(str(float(dev_features_A[idx])))
each_text.append(str(intern(train_features_B[idx])))
for idx, each_text in enumerate(test_texts):
each_text.append(str(float(test_features_A[idx])))
each_text.append(str(int(train_features_B[idx])))
else:
train_features, dev_features, test_features = None, None, None
if not silent:
print ' extension of the Naive Bayes classifier w. feature set: [%s] ' % (RUN_EXP)
print '------------------------------------------------------------------------------------------'
# Part 2.1 (c_s/c_sw)
c_s = dict.fromkeys(set(train_labels), 0)
multiples = list(itertools.product(c_s.keys(), ['time', 'loss', 'export']))
c_sw = dict.fromkeys(multiples, 0)
t_w = [each_word for each_text in train_texts for each_word in each_text]
multiples = list(itertools.product(c_s.keys(), t_w))
t_sw = dict.fromkeys(multiples, 0)
for idx, label in enumerate(train_labels):
cur_text = train_texts[idx]
# compute c_s
c_s[label] += len(cur_text)
# compute c_sw
time_cnt = cur_text.count('time')
loss_cnt = cur_text.count('loss')
export_cnt = cur_text.count('export')
c_sw[(label, 'time')] += time_cnt
c_sw[(label, 'loss')] += loss_cnt
c_sw[(label, 'export')] += export_cnt
# compute t_sw (total occurances): of (label, word): occurances
for each_word in cur_text:
t_sw[(label, each_word)] += 1
# total # of distinct words: will be used for smoothing
t_dw = Counter(t_w)
if not silent:
print '{:<11} | {:<10} | {:<10} | {:<10} | {:<10} | {:<10} | {:<10} |'.\
format('s', 'cord', 'division', 'formation', 'phone', 'product', 'text')
print '------------------------------------------------------------------------------------------'
print '{:<11} | {:<10} | {:<10} | {:<10} | {:<10} | {:<10} | {:<10} |'.\
format('c(s)', c_s['cord'], c_s['division'], c_s['formation'], c_s['phone'], c_s['product'], c_s['text'])
print '{:<11} | {:<10} | {:<10} | {:<10} | {:<10} | {:<10} | {:<10} |'.\
format('c(s,time)', c_sw[('cord', 'time')], c_sw[('division', 'time')], c_sw[('formation', 'time')], \
c_sw[('phone', 'time')], c_sw[('product', 'time')], c_sw[('text', 'time')])
print '{:<11} | {:<10} | {:<10} | {:<10} | {:<10} | {:<10} | {:<10} |'.\
format('c(s,loss)', c_sw[('cord', 'loss')], c_sw[('division', 'loss')], c_sw[('formation', 'loss')], \
c_sw[('phone', 'loss')], c_sw[('product', 'loss')], c_sw[('text', 'loss')])
print '{:<11} | {:<10} | {:<10} | {:<10} | {:<10} | {:<10} | {:<10} |'.\
format('c(s,export)', c_sw[('cord', 'export')], c_sw[('division', 'export')], c_sw[('formation', 'export')], \
c_sw[('phone', 'export')], c_sw[('product', 'export')], c_sw[('text', 'export')])
print '------------------------------------------------------------------------------------------'
print ' total distinct words: %d ' % (len(t_dw.keys()))
# Part 2.2 (p_s/p_ws)
total_occurances = float(sum(c_s.values()))
label_count = Counter(train_labels)
p_s = {key: (value / float( sum( label_count.values() )) ) for key, value in label_count.iteritems()}
if improved:
p_ws = {key: ( (value + alpha) / \
(float(c_s[key[0]]) + alpha*len(t_dw.keys())) ) \
for key, value in c_sw.iteritems()}
t_ws = {key: ( (value + alpha) / \
(float(c_s[key[0]]) + alpha*len(t_dw.keys())) ) \
for key, value in t_sw.iteritems()}
else:
p_ws = {key: (value / float(c_s[key[0]])) for key, value in c_sw.iteritems()}
t_ws = {key: (value / float(c_s[key[0]])) for key, value in t_sw.iteritems()}
# normalization steps
norm_denominators = {
'time': 0.0,
'loss': 0.0,
'export': 0.0
}
for key, value in p_ws.iteritems():
norm_denominators[key[1]] += value
p_ws_norm = {key: (value / norm_denominators[key[1]]) for key, value in p_ws.iteritems()}
p_ws = p_ws_norm
if not silent:
print '------------------------------------------------------------------------------------------'
print '{:<11} | {:<10.8f} | {:<10.8f} | {:<10.8f} | {:<10.8f} | {:<10.8f} | {:<10.8f} |'.\
format('p(s)', p_s['cord'], p_s['division'], p_s['formation'], p_s['phone'], p_s['product'], p_s['text'])
print '{:<11} | {:<10.8f} | {:<10.8f} | {:<10.8f} | {:<10.8f} | {:<10.8f} | {:<10.8f} |'.\
format('p(time|s)', p_ws[('cord', 'time')], p_ws[('division', 'time')], p_ws[('formation', 'time')], \
p_ws[('phone', 'time')], p_ws[('product', 'time')], p_ws[('text', 'time')])
print '{:<11} | {:<10.8f} | {:<10.8f} | {:<10.8f} | {:<10.8f} | {:<10.8f} | {:<10.8f} |'.\
format('p(loss|s)', p_ws[('cord', 'loss')], p_ws[('division', 'loss')], p_ws[('formation', 'loss')], \
p_ws[('phone', 'loss')], p_ws[('product', 'loss')], p_ws[('text', 'loss')])
print '{:<11} | {:<10.8f} | {:<10.8f} | {:<10.8f} | {:<10.8f} | {:<10.8f} | {:<10.8f} |'.\
format('p(export|s)', p_ws[('cord', 'export')], p_ws[('division', 'export')], p_ws[('formation', 'export')], \
p_ws[('phone', 'export')], p_ws[('product', 'export')], p_ws[('text', 'export')])
# Part 2.3 (p_sxd, on the 1st line on test set)
p_sxd = dict.fromkeys(c_s.keys(), 0.0)
lp_sxd = dict.fromkeys(c_s.keys(), 0.0)
cur_text = dev_texts[0]
for key in p_sxd.keys():
# compute p for each class
if improved:
tp_sxd = p_s[key]
tlp_sxd = log(p_s[key])
else:
tp_sxd = p_s[key]
# compute for each word
for each_word in cur_text:
if t_ws.has_key((key, each_word)):
if improved:
tp_sxd *= t_ws[(key, each_word)]
tlp_sxd += log(t_ws[(key, each_word)])
else:
tp_sxd *= t_ws[(key, each_word)]
# add to the dict
if improved:
p_sxd[key] = tp_sxd
lp_sxd[key] = tlp_sxd
else:
p_sxd[key] = tp_sxd
if not silent:
print '------------------------------------------------------------------------------------------'
print ' %s | %s | %s | %s | %s | %s | %s |' % \
('p(s|X)', p_sxd['cord'], p_sxd['division'], p_sxd['formation'], \
p_sxd['phone'], p_sxd['product'], p_sxd['text'])
print '------------------------------------------------------------------------------------------'
print ' 1st label in dev : %s ' % (dev_labels[0])
print ' 1st text in dev[:5]: %s ' % (dev_texts[0][:5])
if improved:
print '------------------------------------------------------------------------------------------'
print '{:<11} | {:<10.8f} | {:<10.8f} | {:<10.8f} | {:<10.8f} | {:<10.8f} | {:<10.8f} |'.\
format('log(p(s|X))', lp_sxd['cord'], lp_sxd['division'], lp_sxd['formation'], \
lp_sxd['phone'], lp_sxd['product'], lp_sxd['text'])
# Part 2.4: compute all the prob on the test dataset
p_sx = list()
for idx, text in enumerate(test_texts):
t_prob = dict.fromkeys(c_s.keys(), 0.0)
for key in t_prob.keys():
# compute p for each class
if improved:
tp_sxt = log(p_s[key])
else:
tp_sxt = p_s[key]
for each_word in text:
if t_ws.has_key((key, each_word)):
if improved:
tp_sxt += log(t_ws[(key, each_word)])
else:
tp_sxt *= t_ws[(key, each_word)]
# add to the dict
t_prob[key] = tp_sxt
# add dict to the entire list
p_sx.append(t_prob)
# Part 2.4 (run the classifier for all)
labels_predicted = list()
for idx, label in enumerate(test_labels):
maximum_probs = max(p_sx[idx].values())
label_prediction = [key for key, value in p_sx[idx].iteritems() if value == maximum_probs]
label_prediction = random.choice(label_prediction)
# based on the prob
labels_predicted.append(label_prediction)
naivebayes_performance = eval_performance(test_labels, labels_predicted)
# save the implementation to the file
with open('q4p4_nb.txt', 'wb') as q4p4_nb_output:
for each_label in labels_predicted:
q4p4_nb_output.write(each_label+'\n')
# Part 2.5 (do more tuning for the classifier)
# - Laplace smoothing
# - Log likelihoods
if not silent:
print '------------------------------------------------------------------------------------------'
return 'Naive Bayes: micro/macro = [%.2f, %.2f] @ (alpha: %s)' % \
(naivebayes_performance[0]*100, naivebayes_performance[1]*100, alpha)
## this feature is just a random number generated for each instance
def get_feature_A(train_texts, train_targets,train_labels,
dev_texts, dev_targets,dev_labels, test_texts, test_targets, test_label):
# call this everytime, makes the same random number
np.random.seed(0)
train_feature_vector = np.random.random_sample((len(train_texts),))
dev_feature_vector = np.random.random_sample((len(dev_texts),))
test_feature_vector = np.random.random_sample((len(test_texts),))
return train_feature_vector,dev_feature_vector,test_feature_vector
## this feature encodes the number of distinct words in each instance
def get_feature_B(train_texts, train_targets,train_labels,
dev_texts, dev_targets,dev_labels, test_texts, test_targets, test_label):
train_feature_vector = np.zeros(len(train_texts))
dev_feature_vector = np.zeros(len(dev_texts))
test_feature_vector = np.zeros(len(test_texts))
for i,text in enumerate(train_texts):
nw = len(set(text))
train_feature_vector[i] = nw
for i,text in enumerate(dev_texts):
nw = len(set(text))
dev_feature_vector[i] = nw
for i,text in enumerate(test_texts):
nw = len(set(text))
test_feature_vector[i] = nw
return train_feature_vector,dev_feature_vector,test_feature_vector
"""
Trains a perceptron model with bag of words features + two additional features
and computes the accuracy on the test set
train_texts, train_targets, train_labels are as described in read_dataset above
The same thing applies to the reset of the parameters.
"""
def run_extended_bow_perceptron_classifier(train_texts, train_targets,train_labels,
dev_texts, dev_targets,dev_labels, test_texts, test_targets, test_labels):
RUN_EXP_A = True # set to True for running on feature A
RUN_EXP_B = True # set to True for running on feature B
num_extra_features = 0
if RUN_EXP_A:
train_new_feature_vectorA,dev_new_feature_vectorA,test_new_feature_vectorA = get_feature_A(train_texts, train_targets,train_labels, dev_texts, dev_targets,dev_labels, test_texts, test_targets, test_labels)
num_extra_features += 1
if RUN_EXP_B:
train_new_feature_vectorB,dev_new_feature_vectorB,test_new_feature_vectorB = get_feature_B(train_texts, train_targets,train_labels, dev_texts, dev_targets,dev_labels, test_texts, test_targets, test_labels)
num_extra_features += 1
all_words_idx = extract_all_words(train_texts)
all_labels_idx = extract_all_labels(train_labels)
num_features = (len(all_words_idx.keys())+num_extra_features)*len(all_labels_idx.keys())+1
class_labels = all_labels_idx.values()
train_features = extract_features(all_words_idx,all_labels_idx,train_texts)
train_labels = map(lambda e: all_labels_idx[e],train_labels)
test_features = extract_features(all_words_idx,all_labels_idx,test_texts)
test_labels = map(lambda e: all_labels_idx[e],test_labels)
if RUN_EXP_A:
train_features = np.c_[train_features, train_new_feature_vectorA]
test_features = np.c_[test_features, test_new_feature_vectorA]
if RUN_EXP_B:
train_features = np.c_[train_features, train_new_feature_vectorB]
test_features = np.c_[test_features, test_new_feature_vectorB]
for l in class_labels:
inst = train_features[0]
ffl = get_features_for_label(inst,l,class_labels)
assert False not in (inst == ffl[l*len(inst):(l+1)*len(inst)])
theta = train_perceptron(train_features,train_labels,class_labels,num_features)
test_predictions = test_perceptron(theta,test_features,test_labels,class_labels)
eval_test = eval_performance(test_labels,test_predictions)
inverse_labels_index = {}
for k in all_labels_idx.keys():
inverse_labels_index[all_labels_idx[k]] = k
test_predictions_names = map(lambda e: inverse_labels_index[e],test_predictions)
with open('q4p4_pn.txt', 'wb') as file_output:
for each_label in test_predictions_names:
file_output.write(each_label+'\n')
return ('test-micro=%d%%, test-macro=%d%%' % (int(eval_test[0]*100),int(eval_test[1]*100)))
# Part 1.1
def run_most_frequent_class_classifier(train_texts, train_targets,train_labels,
dev_texts, dev_targets,dev_labels, test_texts, test_targets, test_labels):
labels_freq = {}
for l in train_labels:
if labels_freq.get(l,None) is None:
labels_freq[l] = 0
labels_freq[l] += 1
most_frequent_label = max(labels_freq.iteritems(), key=operator.itemgetter(1))[0]
train_pred = [most_frequent_label]*len(train_labels)
dev_pred = [most_frequent_label]*len(dev_labels)
assert train_pred[2] == train_labels[2]
eval_train = eval_performance(train_labels,train_pred)
eval_dev = eval_performance(dev_labels,dev_pred)
return ('training-micro=%d%%, training-macro=%d%%, dev-micro=%d%%, dev-macro=%d%%' % (int(eval_train[0]*100),int(eval_train[1]*100),int(eval_dev[0]*100),int(eval_dev[1]*100)))
# Part 1.2
def run_inner_annotator_agreement(train_texts, train_targets,train_labels,
dev_texts, dev_targets,dev_labels, test_texts, test_targets, test_labels):
dev_labels_manual, dev_targets_manual, dev_texts_manual = read_dataset('dev_manual')
return '%.2f' % sklearn.metrics.cohen_kappa_score(dev_labels[:20],dev_labels_manual)
"""
Main (able to change the classifier to other ones)
"""
if __name__ == "__main__":
# reading, tokenizing, and normalizing data
train_labels, train_targets, train_texts = read_dataset('train')
dev_labels, dev_targets, dev_texts = read_dataset('dev')
test_labels, test_targets, test_texts = read_dataset('test')
#running the classifier
test_scores = run_bow_perceptron_classifier(train_texts, train_targets, train_labels,
dev_texts, dev_targets, dev_labels, test_texts, test_targets, test_labels)
print test_scores
| 43.075949
| 211
| 0.614634
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 10,731
| 0.315339
|
a14fb8c57a2911a94e991dd47b577ec949e53771
| 640
|
py
|
Python
|
Week 7 Web pages/Task05.py
|
retverd/python_hse
|
cb9bfb092c1cf68ae0c53b9919ca24a71a8cbf88
|
[
"MIT"
] | null | null | null |
Week 7 Web pages/Task05.py
|
retverd/python_hse
|
cb9bfb092c1cf68ae0c53b9919ca24a71a8cbf88
|
[
"MIT"
] | null | null | null |
Week 7 Web pages/Task05.py
|
retverd/python_hse
|
cb9bfb092c1cf68ae0c53b9919ca24a71a8cbf88
|
[
"MIT"
] | null | null | null |
# Мы сохранили страницу с википедии про языки программирования и сохранили по адресу
# https://stepik.org/media/attachments/lesson/209717/1.html
#
# Скачайте её с помощью скрипта на Питоне и посчитайте, какой язык упоминается чаще Python или C++ (ответ должен быть
# одной из этих двух строк). Необходимо просто подсчитать количество вхождений слова Python или C++ как подстроки.
from urllib.request import urlopen
response = urlopen('https://stepik.org/media/attachments/lesson/209717/1.html')
html = response.read().decode('utf-8')
c = html.count('C++')
p = html.count('Python')
if c > p:
print('C++')
else:
print('Python')
| 30.47619
| 117
| 0.739063
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 709
| 0.803855
|
a150c0cbc599ebc411b4f81c6fa3b0405cf1395b
| 31,794
|
py
|
Python
|
tests/test_bio/test_cell.py
|
jfaccioni/clovars
|
64e24286a2dc185490384aeb08027d88eb9462c4
|
[
"MIT"
] | null | null | null |
tests/test_bio/test_cell.py
|
jfaccioni/clovars
|
64e24286a2dc185490384aeb08027d88eb9462c4
|
[
"MIT"
] | null | null | null |
tests/test_bio/test_cell.py
|
jfaccioni/clovars
|
64e24286a2dc185490384aeb08027d88eb9462c4
|
[
"MIT"
] | null | null | null |
import unittest
from unittest import mock
from unittest.mock import MagicMock
from clovars.abstract import Circle
from clovars.bio import Cell, Treatment
from clovars.scientific import ConstantCellSignal, CellSignal, GaussianCellSignal, Gaussian
from clovars.utils import SimulationError
from tests import NotEmptyTestCase
class TestCell(NotEmptyTestCase):
"""Class representing unit-tests for clovars.bio.cell.Cell class."""
default_delta = 100
control_treatment = Treatment(
name="Control",
division_curve=Gaussian(loc=24.0, scale=5),
death_curve=Gaussian(loc=32, scale=5),
)
@classmethod
def setUpClass(cls) -> None:
"""Sets up the entire test suite by setting the default Treatment."""
pass
def setUp(self) -> None:
"""Sets up the test case subject (a Cell instance)."""
self.cell = Cell()
# def test_cell_has_default_treatment_class_attribute(self) -> None:
# """Tests whether a Cell has a "default_treatment" class attribute (a Treatment instance)."""
# self.assertTrue(hasattr(self.cell, 'default_treatment'))
# self.assertTrue(hasattr(Cell, 'default_treatment'))
# self.assertIsInstance(self.cell.default_treatment, Treatment)
def test_cell_has_name_attribute(self) -> None:
"""Tests whether a Cell has a "name" attribute (a string)."""
self.assertTrue(hasattr(self.cell, 'name'))
self.assertIsInstance(self.cell.name, str)
def test_cell_has_max_speed_attribute(self) -> None:
"""Tests whether a Cell has a "max_speed" attribute (a float value)."""
self.assertTrue(hasattr(self.cell, 'max_speed'))
self.assertIsInstance(self.cell.max_speed, float)
def test_cell_has_fate_attribute(self) -> None:
"""Tests whether a Cell has a "fate" attribute (a string)."""
self.assertTrue(hasattr(self.cell, 'fate'))
self.assertIsInstance(self.cell.fate, str)
def test_fate_attribute_starts_as_migration(self) -> None:
"""Tests whether a Cell starts with its "fate" attribute set to "migration"."""
self.assertEqual(Cell().fate, "migration")
def test_cell_has_seconds_since_birth_attribute(self) -> None:
"""Tests whether a Cell has a "seconds_since_birth" attribute (an integer)."""
self.assertTrue(hasattr(self.cell, 'seconds_since_birth'))
self.assertIsInstance(self.cell.seconds_since_birth, int)
def test_seconds_since_birth_attribute_starts_at_zero(self) -> None:
"""Tests whether a Cell starts with its "seconds_since_birth" attribute set to 0."""
self.assertEqual(Cell().seconds_since_birth, 0)
def test_cell_has_alive_attribute(self) -> None:
"""Tests whether a Cell has an "alive" attribute (a boolean value)."""
self.assertTrue(hasattr(self.cell, 'alive'))
self.assertIsInstance(self.cell.alive, bool)
def test_alive_attribute_starts_true(self) -> None:
"""Tests whether a Cell starts with its "alive" attribute set to True."""
self.assertEqual(Cell().alive, True)
def test_cell_has_senescent_attribute(self) -> None:
"""Tests whether a Cell has a "senescent" attribute (a boolean value)."""
self.assertTrue(hasattr(self.cell, 'senescent'))
self.assertIsInstance(self.cell.senescent, bool)
def test_senescent_attribute_starts_false(self) -> None:
"""Tests whether a Cell starts with its "senescent" attribute set to False."""
self.assertEqual(Cell().senescent, False)
def test_cell_has_fitness_memory_attribute(self) -> None:
"""Tests whether a Cell has a "fitness_memory" attribute (a float)."""
self.assertTrue(hasattr(self.cell, 'fitness_memory'))
self.assertIsInstance(self.cell.fitness_memory, float)
def test_fitness_memory_outside_zero_one_range_raises_error(self) -> None:
"""
Tests whether a Cell raises a SimulationError only when its "fitness_memory"
attribute is initialized outside the [0, 1] interval.
"""
for fitness_memory in [0.0, 0.2, 0.4, 0.6, 0.8, 1.0]:
try:
Cell(fitness_memory=fitness_memory)
except SimulationError:
self.fail(
"SimulationError was unexpectedly raised when initializing Cell"
f" with fitness_memory = {fitness_memory}"
)
for fitness_memory in [-0.1, 1.1]:
with self.assertRaises(SimulationError):
Cell(fitness_memory=fitness_memory)
def test_cell_has_division_threshold_attribute(self) -> None:
"""Tests whether a Cell has a "division_threshold" attribute (a float)."""
self.assertTrue(hasattr(self.cell, 'division_threshold'))
self.assertIsInstance(self.cell.division_threshold, float)
def test_division_threshold_outside_zero_one_range_raises_error(self) -> None:
"""
Tests whether a Cell raises a SimulationError only when its "division_threshold"
attribute is initialized outside the [0, 1] interval.
"""
for division_threshold in [0.0, 0.2, 0.4, 0.6, 0.8, 1.0]:
try:
Cell(division_threshold=division_threshold)
except SimulationError:
self.fail(
"SimulationError was unexpectedly raised when initializing Cell"
f" with division_threshold = {division_threshold}"
)
for division_threshold in [-0.1, 1.1]:
with self.assertRaises(SimulationError):
Cell(division_threshold=division_threshold)
def test_cell_division_threshold_attribute_is_between_zero_and_one(self) -> None:
"""
Tests whether the "division_threshold" attribute (random float value) lies between 0 and 1
when it is initialized as a None value.
"""
for _ in range(10):
cell = Cell(division_threshold=None)
with self.subTest(cell=cell):
self.assertGreaterEqual(cell.division_threshold, 0)
self.assertLessEqual(cell.division_threshold, 1)
def test_cell_has_death_threshold_attribute(self) -> None:
"""Tests whether a Cell has a "death_threshold" attribute (a float)."""
self.assertTrue(hasattr(self.cell, 'death_threshold'))
self.assertIsInstance(self.cell.death_threshold, float)
def test_death_threshold_outside_zero_one_range_raises_error(self) -> None:
"""
Tests whether a Cell raises a SimulationError only when its "death_threshold"
attribute is initialized outside the [0, 1] interval.
"""
for death_threshold in [0.0, 0.2, 0.4, 0.6, 0.8, 1.0]:
try:
Cell(death_threshold=death_threshold)
except SimulationError:
self.fail(
"SimulationError was unexpectedly raised when initializing Cell"
f" with death_threshold = {death_threshold}"
)
for death_threshold in [-0.1, 1.1]:
with self.assertRaises(SimulationError):
Cell(death_threshold=death_threshold)
def test_cell_death_threshold_attribute_is_between_zero_and_one(self) -> None:
"""
Tests whether the "death_threshold" attribute (random float value) lies between 0 and 1
when it is initialized as a None value.
"""
for _ in range(10):
cell = Cell(death_threshold=None)
with self.subTest(cell=cell):
self.assertGreaterEqual(cell.death_threshold, 0)
self.assertLessEqual(cell.death_threshold, 1)
def test_cell_has_death_threshold_attribute_is_between_zero_and_one(self) -> None:
"""Tests whether the "death_threshold" attribute (random float value) lies between 0 and 1."""
for _ in range(10):
cell = Cell()
with self.subTest(cell=cell):
self.assertGreaterEqual(cell.death_threshold, 0)
self.assertLessEqual(cell.death_threshold, 1)
def test_cell_has_circle_attribute(self) -> None:
"""Tests whether a Cell has a "circle" attribute (a Circle instance)."""
self.assertTrue(hasattr(self.cell, 'circle'))
self.assertIsInstance(self.cell.circle, Circle)
def test_cell_has_signal_attribute(self) -> None:
"""Tests whether a Cell has a "signal" attribute (a CellSignal instance)."""
self.assertTrue(hasattr(self.cell, 'signal'))
self.assertIsInstance(self.cell.signal, CellSignal)
def test_cell_uses_a_constant_signal_if_signal_argument_is_none(self) -> None:
"""Tests whether a Cell uses a ConstantCellSignal instance when initialized with signal=None."""
cell = Cell(signal=None)
self.assertIsInstance(cell.signal, ConstantCellSignal)
def test_cell_has_treatment_attribute(self) -> None:
"""Tests whether a Cell has a "treatment" attribute (a Treatment instance)."""
self.assertTrue(hasattr(self.cell, 'treatment'))
self.assertIsInstance(self.cell.treatment, Treatment)
# def test_cell_uses_the_default_treatment_if_treatment_argument_is_none(self) -> None:
# """Tests whether a Cell uses the "default_treatment" class attribute when initialized with treatment=None."""
# cell = Cell(signal=None)
# self.assertIs(cell.treatment, self.cell.default_treatment)
def test_calculate_division_chance_method_returns_chance_depending_on_the_cell_seconds_since_birth(self) -> None:
"""
Tests whether the "calculate_division_chance" method returns a chance between
[0, 1] proportional to the Cell's age.
"""
self.cell.treatment = self.control_treatment # division stats: 24 (+-5) hours
self.cell.seconds_since_birth = 0 # Very low chance of dividing right after birth
self.assertLess(self.cell.calculate_division_chance(delta=self.default_delta), 0.1)
self.cell.seconds_since_birth = 60 * 60 * 1000 # Very high chance of dividing after 1000 h
self.assertGreater(self.cell.calculate_division_chance(delta=self.default_delta), 0.9)
def test_calculate_death_chance_method_returns_chance_depending_on_the_cell_seconds_since_birth(self) -> None:
"""
Tests whether the "calculate_death_chance" method returns a chance between
[0, 1] proportional to the Cell's age.
"""
self.cell.treatment = self.control_treatment # death stats: 24 (+-5) hours
self.cell.seconds_since_birth = 0 # Very low chance of dying right after birth
self.assertLess(self.cell.calculate_death_chance(delta=self.default_delta), 0.1)
self.cell.seconds_since_birth = 60 * 60 * 1000 # Very high chance of dying after 1000 h
self.assertGreater(self.cell.calculate_death_chance(delta=self.default_delta), 0.9)
def test_cell_has_circle_attributes_as_properties(self) -> None:
"""Tests whether a Cell exposes relevant Circle attributes as properties."""
test_cell = Cell(x=10.0, y=20.0, radius=5.0)
for attr_name in ['x', 'y', 'radius', 'center', 'area']:
with self.subTest(attr_name=attr_name):
try:
value = getattr(test_cell, attr_name)
self.assertEqual(value, getattr(test_cell.circle, attr_name))
except AttributeError:
self.fail(f"Test failed: could not get attribute {attr_name} in Cell instance {test_cell}")
def test_cell_is_able_to_set_circle_attributes(self) -> None:
"""Tests whether a Cell is able to directly set its "x", "y" and "radius" Circle attributes."""
test_cell = Cell(x=10.0, y=20.0, radius=5.0)
for attr_name in ['x', 'y', 'radius']:
with self.subTest(attr_name=attr_name):
try:
setattr(test_cell, attr_name, 1.0)
except AttributeError:
self.fail(f"Test failed: could not set attribute {attr_name} in Cell instance {test_cell}")
def test_cell_distance_to_method_calculates_cell_distance_using_circles(self) -> None:
"""Tests whether the "distance_to" method uses Circles to calculate distance between Cells."""
other_cell = Cell()
with mock.patch("clovars.abstract.Circle.distance_to") as mock_circle_distance_to:
self.cell.distance_to(other_cell=other_cell)
mock_circle_distance_to.assert_called_once_with(other_cell.circle)
def test_cell_distance_to_method_raises_type_error_if_argument_is_not_a_cell(self) -> None:
"""
Tests whether the "distance_to" method raises a TypeError only when the
other_cell argument is not an actual Cell instance.
"""
valid_argument = Cell()
try:
self.cell.distance_to(other_cell=valid_argument)
except TypeError:
self.fail("Cell raised TypeError unexpectedly!")
invalid_argument = "WHATEVER ELSE"
with self.assertRaises(TypeError):
self.cell.distance_to(other_cell=invalid_argument) # noqa
def test_cell_has_hours_since_birth_property(self) -> None:
"""Tests whether a Cell has an "hours_since_birth" property (a float)."""
self.assertTrue(hasattr(self.cell, 'hours_since_birth'))
self.assertIsInstance(self.cell.hours_since_birth, float)
def test_hours_since_birth_calculations_are_correct(self) -> None:
"""Tests whether the "hours_since_birth" property correctly calculates the Cell's hours since birth."""
for seconds, hours in [(0, 0.0), (60, 1/60), (3600, 1.0), (7200, 2.0), (9000, 2.5)]:
with self.subTest(seconds=seconds, hours=hours):
self.cell.seconds_since_birth = seconds
self.assertEqual(self.cell.hours_since_birth, hours)
def test_cell_has_branch_name_property(self) -> None:
"""Tests whether a Cell has a "branch_name" property (a string)."""
self.assertTrue(hasattr(self.cell, 'branch_name'))
self.assertIsInstance(self.cell.branch_name, str)
def test_branch_name_returns_root_name_up_to_first_division(self) -> None:
"""Tests whether the "branch_name" property returns the Cell's root name, including the branch number."""
for cell_name, branch_name in [('1', '1'), ('3b.1', '3b'), ('15e-5.1.2', '15e-5'), ('4d-3.2.2.1.2', '4d-3')]:
with self.subTest(cell_name=cell_name, branch_name=branch_name):
self.cell.name = cell_name
self.assertEqual(self.cell.branch_name, branch_name)
def test_cell_has_colony_name_property(self) -> None:
"""Tests whether a Cell has a "colony_name" property (a string)."""
self.assertTrue(hasattr(self.cell, 'colony_name'))
self.assertIsInstance(self.cell.colony_name, str)
def test_colony_name_returns_root_name_up_to_branch_name(self) -> None:
"""Tests whether the "colony_name" property returns the Cell's root name, excluding the branch number."""
for cell_name, colony_name in [('1', '1'), ('3b.1', '3b'), ('15e-5.1.2', '15e'), ('4d-3.2.2.1.2', '4d')]:
with self.subTest(cell_name=cell_name, colony_name=colony_name):
self.cell.name = cell_name
self.assertEqual(self.cell.colony_name, colony_name)
def test_cell_has_generation_property(self) -> None:
"""Tests whether a Cell has a "generation" property (an integer)."""
self.assertTrue(hasattr(self.cell, 'generation'))
self.assertIsInstance(self.cell.generation, int)
def test_generation_returns_cell_name_prefix(self) -> None:
"""
Tests whether the "generation" property returns the number of times that the Cell has divided
based on its name.
"""
for cell_name, generation in [('1', 0), ('3b.1', 1), ('15e-5.1.2', 2), ('4d-3.2.2.1.2', 4)]:
with self.subTest(cell_name=cell_name, generation=generation):
self.cell.name = cell_name
self.assertEqual(self.cell.generation, generation)
def test_cell_has_signal_value_property(self) -> None:
"""Tests whether a Cell has a "signal_value" property (a float)."""
self.assertTrue(hasattr(self.cell, 'signal_value'))
self.assertIsInstance(self.cell.signal_value, float)
def test_signal_value_returns_current_signal_value(self) -> None:
"""Tests whether the "signal_value" property returns the CellSignal's current value."""
signal = GaussianCellSignal()
test_cell = Cell(signal=signal)
for _ in range(10):
signal.oscillate(current_seconds=0)
current_signal_value = signal.value
with self.subTest(current_signal_value=current_signal_value):
self.assertEqual(test_cell.signal_value, current_signal_value)
def test_set_cell_fate_method_sets_fate_to_death_if_cell_should_die(self) -> None:
"""
Tests whether the "set_cell_fate" method sets the Cell fate to "death"
if the "should_die" method returns True.
"""
with mock.patch('clovars.bio.Cell.should_die', return_value=True):
self.cell.set_cell_fate(delta=self.default_delta)
self.assertEqual(self.cell.fate, "death")
def test_should_die_returns_boolean_based_on_death_chance_and_threshold(self) -> None:
"""Tests whether the "should_die" method returns True/False depending on the Cell's death chance."""
self.cell.death_threshold = 1.1 # death chance is in [0, 1], cell never dies here
self.assertFalse(self.cell.should_die(delta=self.default_delta))
self.cell.death_threshold = -0.1 # death chance is in [0, 1], cell always dies here
self.assertTrue(self.cell.should_die(delta=self.default_delta))
def test_set_cell_fate_method_sets_fate_to_division_if_cell_should_divide(self) -> None:
"""
Tests whether the "set_cell_fate" method sets the Cell fate to "division"
if the "should_die" method returns False and "should_divide" returns True.
"""
with (
mock.patch('clovars.bio.Cell.should_die', return_value=False),
mock.patch('clovars.bio.Cell.should_divide', return_value=True),
):
self.cell.set_cell_fate(delta=self.default_delta)
self.assertEqual(self.cell.fate, "division")
def test_should_divide_returns_boolean_based_on_division_chance_and_threshold(self) -> None:
"""Tests whether the "should_divide" method returns True/False depending on the Cell's division chance."""
self.cell.division_threshold = 1.1 # death chance is in [0, 1], cell never dies here
self.assertFalse(self.cell.should_divide(delta=self.default_delta))
self.cell.division_threshold = -0.1 # death chance is in [0, 1], cell always dies here
self.assertTrue(self.cell.should_divide(delta=self.default_delta))
def test_set_cell_fate_method_sets_fate_to_migration_if_cell_should_not_die_nor_divide(self) -> None:
"""
Tests whether the "set_cell_fate" method sets the Cell fate to "migration"
if both "should_die" and "should_divide" methods returns False.
"""
with (
mock.patch('clovars.bio.Cell.should_die', return_value=False),
mock.patch('clovars.bio.Cell.should_divide', return_value=False),
):
self.cell.set_cell_fate(delta=self.default_delta)
self.assertEqual(self.cell.fate, "migration")
@mock.patch('clovars.bio.Cell.migrate')
@mock.patch('clovars.bio.Cell.divide')
@mock.patch('clovars.bio.Cell.die')
def test_pass_time_method_calls_die_if_cell_fate_is_to_die(
self,
mock_die: MagicMock,
mock_divide: MagicMock,
mock_migrate: MagicMock,
) -> None:
"""Tests whether the "pass_time" method calls the "die" method if the Cell fate is set to "death"."""
self.cell.fate = 'death'
self.cell.pass_time(delta=self.default_delta, current_seconds=0)
mock_die.assert_called_once()
mock_divide.assert_not_called()
mock_migrate.assert_not_called()
def test_pass_time_method_returns_none_if_cell_fate_is_to_die(self) -> None:
"""Tests whether the "pass_time" method returns None if the Cell fate is set to "death"."""
self.cell.fate = 'death'
return_value = self.cell.pass_time(delta=self.default_delta, current_seconds=0)
self.assertIsNone(return_value)
@mock.patch('clovars.bio.Cell.migrate')
@mock.patch('clovars.bio.Cell.divide')
@mock.patch('clovars.bio.Cell.die')
def test_pass_time_method_calls_divide_if_cell_fate_is_to_divide(
self,
mock_die: MagicMock,
mock_divide: MagicMock,
mock_migrate: MagicMock,
) -> None:
"""Tests whether the "pass_time" method calls the "divide" method if the Cell fate is set to "division"."""
self.cell.fate = 'division'
self.cell.pass_time(delta=self.default_delta, current_seconds=0)
mock_die.assert_not_called()
mock_divide.assert_called_once()
mock_migrate.assert_not_called()
def test_pass_time_method_returns_a_tuple_of_child_cells_if_cell_fate_is_to_divide(self) -> None:
"""Tests whether the "pass_time" method returns a tuple of child Cells if the Cell fate is set to "division"."""
self.cell.fate = 'division'
return_value = self.cell.pass_time(delta=self.default_delta, current_seconds=0)
self.assertIsInstance(return_value, tuple)
for thing in return_value:
self.assertIsInstance(thing, Cell)
self.assertIsNot(thing, self.cell)
@mock.patch('clovars.bio.Cell.migrate')
@mock.patch('clovars.bio.Cell.divide')
@mock.patch('clovars.bio.Cell.die')
def test_pass_time_method_calls_migrate_if_cell_fate_is_to_migrate(
self,
mock_die: MagicMock,
mock_divide: MagicMock,
mock_migrate: MagicMock,
) -> None:
"""Tests whether the "pass_time" method calls the "migrate" method if the Cell fate is set to "migration"."""
self.cell.fate = 'migration'
self.cell.pass_time(delta=self.default_delta, current_seconds=0)
mock_die.assert_not_called()
mock_divide.assert_not_called()
mock_migrate.assert_called_once()
def test_pass_time_method_returns_the_same_cell_if_cell_fate_is_to_migrate(self) -> None:
"""Tests whether the "pass_time" method returns the own Cell instance if the Cell fate is set to "migration"."""
self.cell.fate = 'migration'
return_value = self.cell.pass_time(delta=self.default_delta, current_seconds=0)
self.assertIsInstance(return_value, Cell)
self.assertIs(return_value, self.cell)
def test_pass_time_method_raises_value_error_if_cell_fate_is_unexpected(self) -> None:
"""Tests whether the "pass_time" method raises a ValueError if the Cell fate value is unexpected."""
self.cell.fate = 'UNEXPECTED VALUE!'
with self.assertRaises(ValueError):
self.cell.pass_time(delta=self.default_delta, current_seconds=0)
def test_die_method_sets_the_state_of_the_alive_flag_to_false(self) -> None:
"""Tests whether the "die" method sets the state of the "alive" flag to False."""
self.assertTrue(self.cell.alive)
self.cell.die()
self.assertFalse(self.cell.alive)
def test_divide_method_returns_a_tuple_of_two_cells_with_matching_names(self) -> None:
"""Tests whether the "divide" returns a tuple of two child Cells with matching names (ending in .1 and .2)."""
children = self.cell.divide(delta=self.default_delta)
self.assertIsInstance(children[0], Cell)
self.assertEqual(children[0].name, self.cell.name + '.1')
self.assertIsInstance(children[1], Cell)
self.assertEqual(children[1].name, self.cell.name + '.2')
def test_get_child_cell_returns_a_new_cell_instance(self) -> None:
"""Tests whether the "get_child_cell" method returns a new Cell instance."""
child_cell = self.cell.get_child_cell(delta=self.default_delta, branch_name='')
self.assertIsInstance(child_cell, Cell)
self.assertIsNot(child_cell, self.cell)
def test_get_child_cell_adds_the_branch_name_to_the_parent_cell_name(self) -> None:
"""Tests whether the Cell returned from "get_child_cell" has the same base name as its parent + branch name."""
for branch_name in ['1', '2', 'BRANCH_NAME', '...', '']:
child_cell = self.cell.get_child_cell(delta=self.default_delta, branch_name=branch_name)
with self.subTest(branch_name=branch_name):
self.assertEqual(child_cell.name, f"{self.cell.name}.{branch_name}")
def test_get_child_cell_method_moves_cell(self) -> None:
"""Tests whether the "migrate" method moves the Cell from its previous position."""
previous_cell_center = self.cell.center
same_cell = self.cell.migrate(delta=self.default_delta)
self.assertNotEqual(same_cell.center, previous_cell_center) # unlikely to be equal, but it may happen...
def test_get_child_cell_copies_attributes_from_parent_cell(self) -> None:
"""Tests whether the Cell returned from "get_child_cell" has some identical attributes as its parent."""
child_cell = self.cell.get_child_cell(delta=self.default_delta, branch_name='')
for attr_name in ['max_speed', 'radius', 'fitness_memory', 'treatment']:
with self.subTest(attr_name=attr_name):
self.assertEqual(getattr(child_cell, attr_name), getattr(self.cell, attr_name))
def test_get_child_cell_calls_get_child_fitness_to_assign_a_the_child_thresholds(self) -> None:
"""
Tests whether the Cell returned from "get_child_cell" has a division and death threshold values
returned from the parent's "get_child_fitness" method.
"""
mock_fitness = (0.1, 0.2)
with mock.patch.object(self.cell, 'get_child_fitness', return_value=mock_fitness) as mock_get_cell_fitness:
child_cell = self.cell.get_child_cell(delta=self.default_delta, branch_name='')
mock_get_cell_fitness.assert_called()
self.assertIn(child_cell.division_threshold, mock_fitness)
self.assertIn(child_cell.death_threshold, mock_fitness)
def test_get_child_cell_uses_signal_split_to_assign_a_new_signal_to_child_cell(self) -> None:
"""
Tests whether the Cell returned from "get_child_cell" has a signal
returned from the parent's signal's "split" method.
"""
with mock.patch('clovars.scientific.CellSignal.split') as mock_split:
child_cell = self.cell.get_child_cell(delta=self.default_delta, branch_name='')
mock_split.assert_called_once()
self.assertIs(child_cell.signal, mock_split.return_value)
def test_get_new_xy_coordinates_method_returns_a_tuple_of_floats(self) -> None:
"""Tests whether the "get_new_xy_coordinates" method returns a tuple of floats."""
xy = self.cell.get_new_xy_coordinates(delta=self.default_delta, event_name='migration')
self.assertIsInstance(xy, tuple)
for thing in xy:
self.assertIsInstance(thing, float)
def test_get_new_xy_coordinates_method_raises_value_error_if_event_name_is_not_migration_or_division(self) -> None:
"""
Tests whether the "get_new_xy_coordinates" raises a ValueError if the
event name argument isn't "migration" or "division".
"""
for event_name in ['migration', 'division']:
with self.subTest(event_name=event_name):
try:
self.cell.get_new_xy_coordinates(delta=self.default_delta, event_name='migration')
except ValueError:
self.fail(f'Call to "get_new_xy_coordinates" failed unexpectedly with event_name="{event_name}"')
with self.assertRaises(ValueError):
self.cell.get_new_xy_coordinates(delta=self.default_delta, event_name="INVALID EVENT NAME")
def test_get_new_xy_coordinates_method_uses_smaller_search_radius_on_division(self) -> None:
"""Tests whether the "get_new_xy_coordinates" uses a smaller search radius when the event name is "division"."""
with mock.patch('clovars.bio.cell.Circle') as mock_circle_init_migration:
self.cell.get_new_xy_coordinates(delta=self.default_delta, event_name='migration')
migration_radius = mock_circle_init_migration.call_args[1]['radius']
with mock.patch('clovars.bio.cell.Circle') as mock_circle_init_division:
self.cell.get_new_xy_coordinates(delta=self.default_delta, event_name='division')
division_radius = mock_circle_init_division.call_args[1]['radius']
self.assertGreater(migration_radius, division_radius)
def test_get_child_fitness_method_returns_tuple_of_floats(self) -> None:
"""
Tests whether the "get_child_fitness" method returns a tuple of floats
representing the child Cell's division and death thresholds.
"""
return_value = self.cell.get_child_fitness()
self.assertIsInstance(return_value, tuple)
with self.assertSequenceNotEmpty(return_value):
for thing in return_value:
self.assertIsInstance(thing, float)
def test_get_child_fitness_method_returns_values_from_bounded_brownian_fluctuation_function(self) -> None:
"""
Tests whether the "get_child_fitness" method returns values from the
"bounded_brownian_fluctuation_function" function using the appropriate parameters from the Cell.
"""
with mock.patch('clovars.bio.cell.bounded_brownian_motion') as mock_brownian_motion:
self.cell.get_child_fitness()
mock_brownian_motion.assert_any_call(current_value=self.cell.division_threshold, scale=self.cell.fitness_memory)
mock_brownian_motion.assert_any_call(current_value=self.cell.death_threshold, scale=self.cell.fitness_memory)
def test_migrate_method_returns_the_same_cell(self) -> None:
"""Tests whether the "migrate" method returns the same Cell."""
same_cell = self.cell.migrate(delta=self.default_delta)
self.assertIs(same_cell, self.cell)
def test_migrate_method_adds_delta_seconds_to_the_cell_seconds_since_birth(self) -> None:
"""Tests whether the "migrate" method adds delta seconds to the Cell's "seconds_since_birth" attribute."""
previous_seconds_since_birth = self.cell.seconds_since_birth
same_cell = self.cell.migrate(delta=self.default_delta)
self.assertEqual(same_cell.seconds_since_birth, previous_seconds_since_birth + self.default_delta)
def test_migrate_method_moves_cell(self) -> None:
"""Tests whether the "migrate" method moves the Cell from its previous position."""
previous_cell_center = self.cell.center
same_cell = self.cell.migrate(delta=self.default_delta)
self.assertNotEqual(same_cell.center, previous_cell_center) # unlikely to be equal, but it may happen...
def test_fluctuate_signal_method_calls_signal_oscillate_method(self) -> None:
"""Tests whether the "fluctuate_signal" method calls the signal's "oscillate" method."""
self.cell.signal = (signal_mock := MagicMock())
self.cell.fluctuate_signal(current_seconds=0)
signal_mock.oscillate.assert_called_once_with(current_seconds=0)
if __name__ == '__main__':
unittest.main()
| 53.345638
| 120
| 0.686482
| 31,418
| 0.988174
| 0
| 0
| 2,151
| 0.067654
| 0
| 0
| 10,440
| 0.328364
|
a151ad0affbfcc7813c745ba76d87908fc3a227a
| 2,959
|
py
|
Python
|
nutsml/examples/pytorch_/mnist/mlp_train.py
|
maet3608/nuts-ml
|
2551612a47bc6e9efa534eda0db5d8c5def51887
|
[
"Apache-2.0"
] | 39
|
2017-02-07T03:22:41.000Z
|
2021-11-24T20:27:57.000Z
|
nutsml/examples/pytorch_/mnist/mlp_train.py
|
maet3608/nuts-ml
|
2551612a47bc6e9efa534eda0db5d8c5def51887
|
[
"Apache-2.0"
] | 19
|
2017-02-13T22:22:30.000Z
|
2019-01-31T04:13:39.000Z
|
nutsml/examples/pytorch_/mnist/mlp_train.py
|
maet3608/nuts-ml
|
2551612a47bc6e9efa534eda0db5d8c5def51887
|
[
"Apache-2.0"
] | 13
|
2017-06-01T13:44:54.000Z
|
2020-09-08T04:51:36.000Z
|
"""
.. module:: cnn_train
:synopsis: Example nuts-ml pipeline for training a MLP on MNIST
"""
import torch
import torch.nn.functional as F
import torch.nn as nn
import torch.optim as optim
import nutsflow as nf
import nutsml as nm
import numpy as np
from nutsml.network import PytorchNetwork
from utils import download_mnist, load_mnist
class Model(nn.Module):
"""Pytorch model"""
def __init__(self, device):
"""Construct model on given device, e.g. 'cpu' or 'cuda'"""
super(Model, self).__init__()
self.fc1 = nn.Linear(28 * 28, 500)
self.fc2 = nn.Linear(500, 256)
self.fc3 = nn.Linear(256, 10)
self.to(device) # set device before constructing optimizer
# required properties of a model to be wrapped as PytorchNetwork!
self.device = device # 'cuda', 'cuda:0' or 'gpu'
self.losses = nn.CrossEntropyLoss() # can be list of loss functions
self.optimizer = optim.Adam(self.parameters())
def forward(self, x):
"""Forward pass through network for input x"""
x = x.view(-1, 28 * 28)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def accuracy(y_true, y_pred):
"""Compute accuracy"""
from sklearn.metrics import accuracy_score
y_pred = [yp.argmax() for yp in y_pred]
return 100 * accuracy_score(y_true, y_pred)
def evaluate(network, x, y):
"""Evaluate network performance (here accuracy)"""
metrics = [accuracy]
build_batch = (nm.BuildBatch(64)
.input(0, 'vector', 'float32')
.output(1, 'number', 'int64'))
acc = zip(x, y) >> build_batch >> network.evaluate(metrics)
return acc
def train(network, epochs=3):
"""Train network for given number of epochs"""
print('loading data...')
filepath = download_mnist()
x_train, y_train, x_test, y_test = load_mnist(filepath)
plot = nm.PlotLines(None, every_sec=0.2)
build_batch = (nm.BuildBatch(128)
.input(0, 'vector', 'float32')
.output(1, 'number', 'int64'))
for epoch in range(epochs):
print('epoch', epoch + 1)
losses = (zip(x_train, y_train) >> nf.PrintProgress(x_train) >>
nf.Shuffle(1000) >> build_batch >>
network.train() >> plot >> nf.Collect())
acc_test = evaluate(network, x_test, y_test)
acc_train = evaluate(network, x_train, y_train)
print('train loss : {:.6f}'.format(np.mean(losses)))
print('train acc : {:.1f}'.format(acc_train))
print('test acc : {:.1f}'.format(acc_test))
if __name__ == '__main__':
print('creating model...')
device = 'cuda' if torch.cuda.is_available() else 'cpu'
model = Model(device)
network = PytorchNetwork(model)
# network.load_weights()
network.print_layers((28 * 28,))
print('training network...')
train(network, epochs=3)
| 31.147368
| 76
| 0.613045
| 859
| 0.290301
| 0
| 0
| 0
| 0
| 0
| 0
| 739
| 0.249747
|
a152a29b6edc8d593cb4451e6903d733b234650c
| 2,317
|
py
|
Python
|
get_image.py
|
DanielJamesEvans/spectrophotometer_code
|
10957590a4b49fe91ec6a0111ef83da63cc4ee67
|
[
"MIT"
] | 3
|
2019-08-31T16:43:10.000Z
|
2019-10-07T20:35:13.000Z
|
get_image.py
|
DanielJamesEvans/spectrophotometer_code
|
10957590a4b49fe91ec6a0111ef83da63cc4ee67
|
[
"MIT"
] | null | null | null |
get_image.py
|
DanielJamesEvans/spectrophotometer_code
|
10957590a4b49fe91ec6a0111ef83da63cc4ee67
|
[
"MIT"
] | 1
|
2019-08-31T19:10:40.000Z
|
2019-08-31T19:10:40.000Z
|
"""This code contains functions called by gui.py.
This software is licensed under the MIT license.
"""
import time
from picamera.array import PiRGBArray
from picamera import PiCamera
from gpiozero import LED
import numpy as np
from PIL import Image
__author__ = "Daniel James Evans"
__copyright__ = "Copyright 2019, Daniel James Evans"
__license__ = "MIT"
camera = PiCamera()
camera.resolution = (640, 480)
camera.framerate = 24
time.sleep(0.5)
def get_color_image():
"""Take a color image using the camera. Return as a numpy array."""
led = LED(4)
led.on()
output = np.empty((480, 640, 3), dtype=np.uint8)
camera.capture(output, "rgb")
led.off()
return output
def get_bw_image():
"""Return a numpy array of a grayscale image from the camera.
I couldn't figure out the proper way
to do this, so the function saves the image as bw.png.
The function takes multiple pictures and averages the values from
each picture. This is done to reduce noise."""
led = LED(4)
led.on()
# I couldn't find a way for the
# camera to pass a grayscale
# image directly to numpy. So
# the code saves a grayscale
# image file then reads it.
camera.color_effects = (128, 128)
camera.capture("bw.png")
image_pil = Image.open("bw.png")
image_arr_1 = np.array(image_pil)
time.sleep(0.1)
camera.capture("bw.png")
image_pil = Image.open("bw.png")
image_arr_2 = np.array(image_pil)
time.sleep(0.1)
camera.capture("bw.png")
image_pil = Image.open("bw.png")
image_arr_3 = np.array(image_pil)
time.sleep(0.1)
camera.capture("bw.png")
image_pil = Image.open("bw.png")
image_arr_4 = np.array(image_pil)
time.sleep(0.1)
camera.capture("bw.png")
image_pil = Image.open("bw.png")
image_arr_5 = np.array(image_pil)
image_arr = (image_arr_1.astype(np.int16) + image_arr_2.astype(np.int16) +
image_arr_3.astype(np.int16) + image_arr_4.astype(np.int16) +
image_arr_5.astype(np.int16)) / 5
image_arr = image_arr.astype(np.uint8)
camera.color_effects = None
led.off()
# Each pixel has 3 values (plus a 4th).
# But the values are identical
# (+/- 1) because of camera.color_effects.
return image_arr[:, :, 1]
| 25.461538
| 79
| 0.662063
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 857
| 0.369875
|
a1536df44cebf44b8ca6b21340ed07ba5ea74a42
| 15,346
|
py
|
Python
|
rave_ec/Lib/ec_mcgill.py
|
DanielMichelson/drqc_article
|
cd7df2f7290adedb557bbc6ba484d30039a23ce2
|
[
"CC-BY-4.0"
] | null | null | null |
rave_ec/Lib/ec_mcgill.py
|
DanielMichelson/drqc_article
|
cd7df2f7290adedb557bbc6ba484d30039a23ce2
|
[
"CC-BY-4.0"
] | null | null | null |
rave_ec/Lib/ec_mcgill.py
|
DanielMichelson/drqc_article
|
cd7df2f7290adedb557bbc6ba484d30039a23ce2
|
[
"CC-BY-4.0"
] | null | null | null |
'''
Copyright (C) 2016 The Crown (i.e. Her Majesty the Queen in Right of Canada)
This file is an add-on to RAVE.
RAVE is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
RAVE is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with RAVE. If not, see <http://www.gnu.org/licenses/>.
'''
##
# McGill format reader
# McGill indices are base 1, except the bin_number!
##
# @file
# @author Daniel Michelson, Environment and Climate Change Canada
# @date 2016-01-22
import time
import _rave, _raveio
import _polarvolume, _polarscan, _polarscanparam
from Proj import dr
from numpy import *
HEADER_LENGTH = 4096
RECORD_LENGTH = 2048
SEGMENT_LENGTH = 19
SEGMENTS = 107
NRAYS = 360
SCANT = 10 # Time in seconds to acquire a sweep.
QUANTITIES = {1 : "DBZH", 4 : "VRADH", 16 : "ZDR", 17 : "PHIDP",
18 : "RHOHV", 19 : "KDP"} # Only 1 and 4 are available
# esteps are the times in seconds between tilts in the ascending scan strategy
# These are real times from an acquisition in April 2012. They are used to
# adjust the timing metadata backwards, as McGill timestamps the end of data
# acquisition. They are indicative only, but the best we can do.
esteps = (0.921875, 0.914062, 0.914062, 1.04688, 0.976562, 1.00000, 0.984375,
1.02344, 1.47656, 1.33594, 1.17188, 1.71094, 2.17188, 2.82812,
3.12500, 3.32031, 3.71875, 3.92969, 4.44531, 4.83594, 5.13281,
5.22656, 5.29688, 0.0) # Last value is a dummy
## Empty generic container, to be populated
# @param object
class McGill(object):
def __init__(self):
pass
## Is this a McGill file?
# @param string containing the input file name
# @returns True if the file is a McGill file, otherwise False
def isMcGill(filename):
fd = open(filename)
s = fd.read(6)
fd.close()
return s == "mcgill"
## Reads the contents of a McGill file, according to
# http://deneb.tor.ec.gc.ca/urpdoc/reference/science/mcgill_volume_scan.html
# Attribute naming follows this document.
# The generic container is used to represent the contents of the file as:
# mobj : top-level McGill() object
# mobj.logical_records : a list of McGill objects containing one logical record each
# mobj.logical_records[index].segments : a list of 107 McGill objects, each
# representing a segment
# @param string input file name
# @returns McGill object representing the file contents
def readMcGill(filename):
mobj = McGill()
fd = open(filename)
# Start reading header
fd.seek(46*2)
#mobj.dum0 = fd.read(46*2)
mobj.number_Logical_Records = int(fromstring(fd.read(2), int16))
fd.seek(3*2, 1)
#mobj.dum1 = fd.read(3*2)
mobj.Volume_Scan_Format = int(fromstring(fd.read(2), int16))
fd.seek(5*2, 1)
#mobj.dum2 = fd.read(2*5)
mobj.hours = int(fromstring(fd.read(4), int32))
mobj.minutes = int(fromstring(fd.read(4), int32))
mobj.seconds = int(fromstring(fd.read(4), int32))
mobj.day = int(fromstring(fd.read(4), int32))
mobj.month = int(fromstring(fd.read(4), int32))
mobj.year = int(fromstring(fd.read(4), int32))
mobj.radar_Id = int(fromstring(fd.read(4), int32))
mobj.radar_latitude = float(fromstring(fd.read(4), float32))
mobj.radar_longitude = float(fromstring(fd.read(4), float32))
mobj.number_elevations = int(fromstring(fd.read(4), int32))
mobj.elevation_angles = []
for i in range(mobj.number_elevations):
mobj.elevation_angles.append(float(fromstring(fd.read(4), float32)))
mobj.azimuth_offset = int(fromstring(fd.read(2), int16))
mobj.viraq_flag = fd.read(2)
mobj.clutter_filter = fd.read(2)
fd.seek(315*2, 1)
#mobj.dum3 = fd.read(315*2)
mobj.met_param = int(fromstring(fd.read(2), int16))
fd.seek(2 ,1)
#mobj.dum4 = fd.read(2)
mobj.value_offset = float(fromstring(fd.read(4), float32))
mobj.cal_slope = float(fromstring(fd.read(4), float32))
mobj.antenna_programme = int(fromstring(fd.read(2), int16))
fd.seek(4, 1)
#mobj.dum5 = fd.read(2)
#mobj.dum6 = fd.read(2)
mobj.cscan_format = int(fromstring(fd.read(2), int16))
mobj.range_unfolded = int(fromstring(fd.read(2), int16))
mobj.vad_velocity_unfolded = int(fromstring(fd.read(2), int16))
mobj.numb_vad_unf_pts = []
for i in range(mobj.number_elevations):
mobj.numb_vad_unf_pts.append(int(fromstring(fd.read(2), int16)))
mobj.numb_range_unf_pts = []
for i in range(mobj.number_elevations):
mobj.numb_range_unf_pts.append(int(fromstring(fd.read(2), int16)))
mobj.range_bins_array_size = int(fromstring(fd.read(2), int16))
fd.seek(2, 1)
#mobj.dum7 = fd.read(2)
mobj.shift_cscan_flag = int(fromstring(fd.read(2), int16))
mobj.shift_speed = int(fromstring(fd.read(2), int16))
mobj.shift_dir = int(fromstring(fd.read(2), int16))
fd.seek(48*4, 1)
#mobj.dum8 = fd.read(24*4)
#mobj.dum9 = fd.read(24*4)
mobj.vert_grad_unfolded = int(fromstring(fd.read(2), int16))
mobj.numb_vert_grad_unf_pts = []
for i in range(mobj.number_elevations):
mobj.numb_vert_grad_unf_pts.append(int(fromstring(fd.read(2), int16)))
fd.seek(12, 1)
#mobj.dum10 = fd.read(4) # documentation says 2 bytes, but it's 4
#mobj.dum11 = fd.read(4)
#mobj.dum12 = fd.read(4)
mobj.radial_grad_unfolded = int(fromstring(fd.read(2), int16))
mobj.numb_radial_grad_unf_pts = []
for i in range(mobj.number_elevations):
mobj.numb_radial_grad_unf_pts.append(int(fromstring(fd.read(2), int16)))
mobj.prf1 = []
for i in range(mobj.number_elevations):
mobj.prf1.append(int(fromstring(fd.read(2), int16)))
mobj.prf2 = []
for i in range(mobj.number_elevations):
mobj.prf2.append(int(fromstring(fd.read(2), int16)))
mobj.nyq_range = []
for i in range(mobj.number_elevations):
mobj.nyq_range.append(int(fromstring(fd.read(2), int16)))
mobj.max_range = []
for i in range(mobj.number_elevations):
mobj.max_range.append(int(fromstring(fd.read(2), int16)))
mobj.nyq_vel = []
for i in range(mobj.number_elevations):
mobj.nyq_vel.append(float(fromstring(fd.read(4), float32)))
mobj.max_vel = []
for i in range(mobj.number_elevations):
mobj.max_vel.append(float(fromstring(fd.read(4), float32)))
mobj.usable_elv = []
for i in range(mobj.number_elevations):
mobj.usable_elv.append(int(fromstring(fd.read(1), uint8)))
mobj.prev_sub_area_speed, mobj.prev_sub_area_dir = [], []
for i in range(9):
mobj.prev_sub_area_speed.append(int(fromstring(fd.read(2), int16)))
for i in range(9):
mobj.prev_sub_area_dir.append(int(fromstring(fd.read(2), int16)))
#mobj.dum_pad = fd.read(1166*2)
# Start reading data, by logical record
mobj.logical_records = []
fd.seek(HEADER_LENGTH)
last_record = 0
while last_record == 0:
lr = McGill()
record = fd.read(RECORD_LENGTH)
lr.high = int(fromstring(record[0], uint8))
lr.low = int(fromstring(record[1], uint8))
lr.logical_record_number = 64 * lr.high + lr.low
last_record = int(fromstring(record[2], uint8))
lr.beginning_elevation_number = int(fromstring(record[3], uint8))
lr.end_elevation_number = int(fromstring(record[4], uint8))
lr.segstr = record[14:2047]
lr.segments = []
# Read SEGMENTS, each SEGMENT_LENGTH bytes long.
segpos = 0
for i in range(SEGMENTS):
seg = McGill()
this_seg = lr.segstr[segpos:segpos+SEGMENT_LENGTH]
seg.N = int(fromstring(this_seg[0], uint8))
# Data segment
if 1 <= seg.N <= 30:
seg.type = "data"
seg.high = int(fromstring(this_seg[1], uint8))
seg.low = int(fromstring(this_seg[2], uint8))
seg.bin_number = 16 * (seg.N - 1)# + 1
seg.radial_number = 64 * seg.high + seg.low
seg.data = fromstring(this_seg[3:], uint8)
# Elevation segment
elif 31 <= seg.N <= 55:
seg.type = "elevation"
seg.elevation_number = seg.N - 31
seg.elevation_angle = mobj.elevation_angles[seg.elevation_number-1]
# End-of-data segment can be ignored
elif seg.N == 63:
seg.type = "eod"
# For some reason, there are segments of type 0, which are
# undocumented. Ignore these.
if seg.N > 0:
lr.segments.append(seg)
segpos += SEGMENT_LENGTH
mobj.logical_records.append(lr)
fd.close()
return mobj
## Takes the output of readMcGill and creates contiguous scans of data.
# This is done by pasting the contents of each McGill segment into the
# equivalent position in the corresponding contiguous scan.
# @param McGill object representing file contents
def makeScans(mobj):
mobj.scans = []
# Create empty arrays for each scan
for i in range(mobj.number_elevations):
mobj.scans.append(zeros((NRAYS, 120+(60*2)+(60*4)), uint8))
# Populate them
for lr in mobj.logical_records:
for seg in lr.segments:
# Elevation segment types always preceed data types
if seg.type == "elevation":
scan = seg.elevation_number -1
elif seg.type == "data":
ray = seg.radial_number - 1
# Bins 112-119 are 1 km, 120-128 are 2 km, 112-135 km
if seg.bin_number == 112:
part1 = seg.data[:8]
part2 = repeat(seg.data[8:], 2)
data = concatenate([part1, part2])
frombin = 112
# All 2 km, 136-231 km
elif 128 <= seg.bin_number < 176:
data = repeat(seg.data, 2)
diff = (seg.bin_number - 128) / 16.0
frombin = 136 + 32 * diff # 16 and 32 combo makes no sense?
# Bins 176-179 are 2 km, 180-239 are 4 km, 232-287 km
elif seg.bin_number == 176:
part1 = repeat(seg.data[:4], 2)
part2 = repeat(seg.data[4:], 4)
data = concatenate([part1, part2])
frombin = 232
# All 4 km, 288- km
elif 192 <= seg.bin_number:
data = repeat(seg.data, 4)
diff = (seg.bin_number - 192) / 32.0
frombin = 288 + 64 * diff # 32 and 64 combo makes no sense?
# All 1 km, 0-111 km
else:
data = seg.data
frombin = seg.bin_number
tobin = int(frombin) + len(data)
mobj.scans[scan][ray][frombin:tobin] = data
## McGill data times are the end of data acquisition. This function guestimates
# the beginning dates and times of each scan in the volume.
# @param McGill object representing file contents
def adjustTimes(mobj):
startdate, starttime, enddate, endtime = [], [], [], []
tt = (mobj.year, mobj.month, mobj.day,
mobj.hours, mobj.minutes, mobj.seconds, 0, 0, 0)
epochs = time.mktime(tt) - (sum(esteps) + SCANT*mobj.number_elevations)
for i in range(mobj.number_elevations):
start = time.gmtime(epochs)
startdate.append(time.strftime("%Y%m%d", start))
starttime.append(time.strftime("%H%M%S", start))
epochs += SCANT
end = time.gmtime(epochs)
enddate.append(time.strftime("%Y%m%d", end))
endtime.append(time.strftime("%H%M%S", end))
epochs += esteps[i]
mobj.startdate = startdate
mobj.starttime = starttime
mobj.enddate = enddate
mobj.endtime = endtime
## Creates a PVOL from the McGill object
# @param McGill object representing file contents
# @returns BALTRAD/ODIM PVOL object
def makePVOL(mobj):
pvol = _polarvolume.new()
pvol.source = "NOD:cawmn,PLC:McGill QC"
pvol.longitude = mobj.radar_longitude * dr
pvol.latitude = mobj.radar_latitude * dr
pvol.height = 76.0 # From a URP Site.conf file
pvol.beamwidth = 0.85 * dr # From a URP Site.conf file
pvol.date = mobj.startdate[0]
pvol.time = mobj.starttime[0]
pvol.addAttribute("how/simulated", "False")
pvol.addAttribute("how/system", "McGill")
pvol.addAttribute("how/TXtype", "klystron")
pvol.addAttribute("how/polmode", "simultaneous-dual")
pvol.addAttribute("how/wavelength", 10.4) # According to the McGill spec
pvol.addAttribute("how/rpm", 6.0) # According to the McGill spec
for i in range(mobj.number_elevations):
scan = _polarscan.new()
scan.elangle = mobj.elevation_angles[i] * dr
scan.rscale = 1000.0
scan.rstart = 0.25 # According to URP decoder
scan.a1gate = 0 # Unknown
scan.startdate = mobj.startdate[i]
scan.starttime = mobj.starttime[i]
scan.enddate = mobj.enddate[i]
scan.endtime = mobj.endtime[i]
scan.addAttribute("how/astart", 0.5) # According to the McGill spec
scan.addAttribute("how/lowprf", mobj.prf1[i]) # PRFs are identical
#scan.addAttribute("how/midprf", )
scan.addAttribute("how/highprf", mobj.prf2[i])
param = _polarscanparam.new()
param.quantity = QUANTITIES[mobj.met_param] # Only DBZH and VRADH
param.nodata = 255.0 # Unknown
param.undetect = 0.0 # Implied
param.gain = mobj.cal_slope
param.offset = mobj.value_offset
param.setData(mobj.scans[i])
scan.addParameter(param)
pvol.addScan(scan)
return pvol
## Each PVOL contains only one moment, so merge several of these into one.
# Assume the first PVOL contains DBZH and the second VRADH.
# @param list of (two) PVOLs
# @returns PVOL object containing (both) moments per scan.
def mergePVOLs(pvols):
refl, wind = pvols
for i in range(wind.getNumberOfScans()):
zscan, vscan = refl.getScan(i), wind.getScan(i)
vradh = vscan.getParameter("VRADH")
zscan.addParameter(vradh)
return refl
## Reads McGill data from file and returns a BALTRAD/ODIM PVOL object for a
# single moment
# @param string of McGill file
# @returns PVOL object containing one moment for each scan.
def file2pvol(filename):
mobj = readMcGill(filename)
makeScans(mobj)
adjustTimes(mobj)
return makePVOL(mobj)
## Reads McGill data from two files into a single BALTRAD/ODIM PVOL
# @param string of the McGill file containing reflectivity (DBZH)
# @param string of the McGill file containing radial wind velocity (VRADH)
# @returns PVOL object containing both moments per scan
def read(zfile, vfile):
refl = file2pvol(zfile)
wind = file2pvol(vfile)
return mergePVOLs([refl, wind])
if __name__=="__main__":
pass
| 37.891358
| 84
| 0.6373
| 58
| 0.003779
| 0
| 0
| 0
| 0
| 0
| 0
| 4,821
| 0.314154
|
a1537d70484481dc31d44d35ec4975bba8b264f5
| 1,038
|
py
|
Python
|
product/migrations/0001_initial.py
|
dnetochaves/e-commerce
|
97c2266934b6db883d520381520130b0472e9db4
|
[
"MIT"
] | null | null | null |
product/migrations/0001_initial.py
|
dnetochaves/e-commerce
|
97c2266934b6db883d520381520130b0472e9db4
|
[
"MIT"
] | null | null | null |
product/migrations/0001_initial.py
|
dnetochaves/e-commerce
|
97c2266934b6db883d520381520130b0472e9db4
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.4 on 2020-12-27 15:03
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('short_description', models.TextField(max_length=255)),
('long_description', models.TextField()),
('image', models.ImageField(blank=True, null=True, upload_to='product_pictures/%Y/%m')),
('slug', models.SlugField(unique=True)),
('price_marketing', models.FloatField()),
('price_marketing_promotion', models.FloatField(default=0)),
('FIELDNAME', models.CharField(choices=[('V', 'Variação'), ('S', 'Simples')], default='V', max_length=1)),
],
),
]
| 35.793103
| 122
| 0.575145
| 947
| 0.910577
| 0
| 0
| 0
| 0
| 0
| 0
| 229
| 0.220192
|
a155e11f0e425a96e53ea2166d51415855a2b463
| 921
|
py
|
Python
|
src/python/setup.py
|
Basasuya/tsne-cuda
|
dc518acd9fdf9109952ffe57d6cf12363e3ffd2c
|
[
"BSD-3-Clause"
] | 2
|
2021-04-30T16:48:47.000Z
|
2021-05-21T08:49:13.000Z
|
src/python/setup.py
|
Basasuya/tsne-cuda
|
dc518acd9fdf9109952ffe57d6cf12363e3ffd2c
|
[
"BSD-3-Clause"
] | null | null | null |
src/python/setup.py
|
Basasuya/tsne-cuda
|
dc518acd9fdf9109952ffe57d6cf12363e3ffd2c
|
[
"BSD-3-Clause"
] | 1
|
2021-04-25T23:11:05.000Z
|
2021-04-25T23:11:05.000Z
|
from setuptools import setup
setup(
name='tsnecuda',
version='2.1.0',
author='Chan, David M., Huang, Forrest., Rao, Roshan.',
author_email='davidchan@berkeley.edu',
packages=['tsnecuda', 'tsnecuda.test'],
package_data={'tsnecuda': ['libtsnecuda.so']},
scripts=[],
url='https://github.com/CannyLab/tsne-cuda',
license='LICENSE.txt',
description='CUDA Implementation of T-SNE with Python bindings',
long_description=open('README.txt').read(),
install_requires=[
'numpy >= 1.14.1',
],
classifiers=[
'Programming Language :: Python :: 3.6',
'Operating System :: POSIX :: Linux',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Artificial Intelligence'
],
keywords=[
'TSNE',
'CUDA',
'Machine Learning',
'AI'
]
)
| 27.909091
| 68
| 0.598263
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 512
| 0.555917
|
a15747184e94e78f55f7ab475ca0b1abe33741e3
| 107,889
|
py
|
Python
|
programs/parallels.py
|
ETCBC/parallells
|
f45f6cc3c4f933dba6e649f49cdb14a40dcf333f
|
[
"MIT"
] | 4
|
2017-10-01T05:14:59.000Z
|
2020-09-09T09:41:26.000Z
|
programs/parallels.py
|
ETCBC/parallells
|
f45f6cc3c4f933dba6e649f49cdb14a40dcf333f
|
[
"MIT"
] | null | null | null |
programs/parallels.py
|
ETCBC/parallells
|
f45f6cc3c4f933dba6e649f49cdb14a40dcf333f
|
[
"MIT"
] | 1
|
2020-10-16T13:21:51.000Z
|
2020-10-16T13:21:51.000Z
|
#!/usr/bin/env python
# coding: utf-8
# <h1>Table of Contents<span class="tocSkip"></span></h1>
# <div class="toc" style="margin-top: 1em;"><ul class="toc-item"><li><span><a href="#0.1-Motivation" data-toc-modified-id="0.1-Motivation-1"><span class="toc-item-num">1 </span>0.1 Motivation</a></span></li><li><span><a href="#0.3-Open-Source" data-toc-modified-id="0.3-Open-Source-2"><span class="toc-item-num">2 </span>0.3 Open Source</a></span></li><li><span><a href="#0.4-What-are-parallel-passages?" data-toc-modified-id="0.4-What-are-parallel-passages?-3"><span class="toc-item-num">3 </span>0.4 What are parallel passages?</a></span></li><li><span><a href="#0.5-Authors" data-toc-modified-id="0.5-Authors-4"><span class="toc-item-num">4 </span>0.5 Authors</a></span></li><li><span><a href="#0.6-Status" data-toc-modified-id="0.6-Status-5"><span class="toc-item-num">5 </span>0.6 Status</a></span></li><li><span><a href="#2.1-Assessing-the-outcomes" data-toc-modified-id="2.1-Assessing-the-outcomes-6"><span class="toc-item-num">6 </span>2.1 Assessing the outcomes</a></span><ul class="toc-item"><li><span><a href="#2.1.1-Assessment-criteria" data-toc-modified-id="2.1.1-Assessment-criteria-6.1"><span class="toc-item-num">6.1 </span>2.1.1 Assessment criteria</a></span></li></ul></li><li><span><a href="#3.1-Similarity" data-toc-modified-id="3.1-Similarity-7"><span class="toc-item-num">7 </span>3.1 Similarity</a></span><ul class="toc-item"><li><span><a href="#3.1.1-SET" data-toc-modified-id="3.1.1-SET-7.1"><span class="toc-item-num">7.1 </span>3.1.1 SET</a></span></li><li><span><a href="#3.1.2-LCS" data-toc-modified-id="3.1.2-LCS-7.2"><span class="toc-item-num">7.2 </span>3.1.2 LCS</a></span></li></ul></li><li><span><a href="#3.2-Performance" data-toc-modified-id="3.2-Performance-8"><span class="toc-item-num">8 </span>3.2 Performance</a></span></li><li><span><a href="#4.1-Chunking" data-toc-modified-id="4.1-Chunking-9"><span class="toc-item-num">9 </span>4.1 Chunking</a></span><ul class="toc-item"><li><span><a href="#4.1.1-Fixed-chunking" data-toc-modified-id="4.1.1-Fixed-chunking-9.1"><span class="toc-item-num">9.1 </span>4.1.1 Fixed chunking</a></span></li><li><span><a href="#4.1.2-Object-chunking" data-toc-modified-id="4.1.2-Object-chunking-9.2"><span class="toc-item-num">9.2 </span>4.1.2 Object chunking</a></span></li></ul></li><li><span><a href="#4.2-Preparing" data-toc-modified-id="4.2-Preparing-10"><span class="toc-item-num">10 </span>4.2 Preparing</a></span></li><li><span><a href="#4.3-Cliques" data-toc-modified-id="4.3-Cliques-11"><span class="toc-item-num">11 </span>4.3 Cliques</a></span><ul class="toc-item"><li><span><a href="#4.3.1-Organizing-the-cliques" data-toc-modified-id="4.3.1-Organizing-the-cliques-11.1"><span class="toc-item-num">11.1 </span>4.3.1 Organizing the cliques</a></span></li><li><span><a href="#4.3.2-Evaluating-clique-sets" data-toc-modified-id="4.3.2-Evaluating-clique-sets-11.2"><span class="toc-item-num">11.2 </span>4.3.2 Evaluating clique sets</a></span></li></ul></li><li><span><a href="#5.1-Loading-the-feature-data" data-toc-modified-id="5.1-Loading-the-feature-data-12"><span class="toc-item-num">12 </span>5.1 Loading the feature data</a></span></li><li><span><a href="#5.2-Configuration" data-toc-modified-id="5.2-Configuration-13"><span class="toc-item-num">13 </span>5.2 Configuration</a></span></li><li><span><a href="#5.3-Experiment-settings" data-toc-modified-id="5.3-Experiment-settings-14"><span class="toc-item-num">14 </span>5.3 Experiment settings</a></span></li><li><span><a href="#5.4-Chunking" data-toc-modified-id="5.4-Chunking-15"><span class="toc-item-num">15 </span>5.4 Chunking</a></span></li><li><span><a href="#5.5-Preparing" data-toc-modified-id="5.5-Preparing-16"><span class="toc-item-num">16 </span>5.5 Preparing</a></span><ul class="toc-item"><li><span><a href="#5.5.1-Preparing-for-SET-comparison" data-toc-modified-id="5.5.1-Preparing-for-SET-comparison-16.1"><span class="toc-item-num">16.1 </span>5.5.1 Preparing for SET comparison</a></span></li><li><span><a href="#5.5.2-Preparing-for-LCS-comparison" data-toc-modified-id="5.5.2-Preparing-for-LCS-comparison-16.2"><span class="toc-item-num">16.2 </span>5.5.2 Preparing for LCS comparison</a></span></li></ul></li><li><span><a href="#5.6-Similarity-computation" data-toc-modified-id="5.6-Similarity-computation-17"><span class="toc-item-num">17 </span>5.6 Similarity computation</a></span><ul class="toc-item"><li><span><a href="#5.6.1-SET-similarity" data-toc-modified-id="5.6.1-SET-similarity-17.1"><span class="toc-item-num">17.1 </span>5.6.1 SET similarity</a></span></li><li><span><a href="#5.6.2-LCS-similarity" data-toc-modified-id="5.6.2-LCS-similarity-17.2"><span class="toc-item-num">17.2 </span>5.6.2 LCS similarity</a></span></li></ul></li><li><span><a href="#5.7-Cliques" data-toc-modified-id="5.7-Cliques-18"><span class="toc-item-num">18 </span>5.7 Cliques</a></span></li><li><span><a href="#5.7.1-Selecting-passages" data-toc-modified-id="5.7.1-Selecting-passages-19"><span class="toc-item-num">19 </span>5.7.1 Selecting passages</a></span></li><li><span><a href="#5.7.2-Growing-cliques" data-toc-modified-id="5.7.2-Growing-cliques-20"><span class="toc-item-num">20 </span>5.7.2 Growing cliques</a></span></li><li><span><a href="#5.8-Output" data-toc-modified-id="5.8-Output-21"><span class="toc-item-num">21 </span>5.8 Output</a></span><ul class="toc-item"><li><span><a href="#5.8.1-Format-definitions" data-toc-modified-id="5.8.1-Format-definitions-21.1"><span class="toc-item-num">21.1 </span>5.8.1 Format definitions</a></span></li><li><span><a href="#5.8.2-Formatting-clique-lists" data-toc-modified-id="5.8.2-Formatting-clique-lists-21.2"><span class="toc-item-num">21.2 </span>5.8.2 Formatting clique lists</a></span></li><li><span><a href="#5.8.3-Compiling-the-table-of-experiments" data-toc-modified-id="5.8.3-Compiling-the-table-of-experiments-21.3"><span class="toc-item-num">21.3 </span>5.8.3 Compiling the table of experiments</a></span></li><li><span><a href="#5.8.4-High-level-formatting-functions" data-toc-modified-id="5.8.4-High-level-formatting-functions-21.4"><span class="toc-item-num">21.4 </span>5.8.4 High level formatting functions</a></span></li></ul></li><li><span><a href="#5.9-Running-experiments" data-toc-modified-id="5.9-Running-experiments-22"><span class="toc-item-num">22 </span>5.9 Running experiments</a></span></li><li><span><a href="#Discussion" data-toc-modified-id="Discussion-23"><span class="toc-item-num">23 </span>Discussion</a></span></li></ul></div>
# <img align="right" src="images/dans-small.png"/>
# <img align="right" src="images/tf-small.png"/>
# <img align="right" src="images/etcbc.png"/>
#
#
# # Parallel Passages in the MT
#
# # 0. Introduction
#
# ## 0.1 Motivation
# We want to make a list of **all** parallel passages in the Masoretic Text (MT) of the Hebrew Bible.
#
# Here is a quote that triggered Dirk to write this notebook:
#
# > Finally, the Old Testament Parallels module in Accordance is a helpful resource that enables the researcher to examine 435 sets of parallel texts, or in some cases very similar wording in different texts, in both the MT and translation, but the large number of sets of texts in this database should not fool one to think it is complete or even nearly complete for all parallel writings in the Hebrew Bible.
#
# Robert Rezetko and Ian Young.
# Historical linguistics & Biblical Hebrew. Steps Toward an Integrated Approach.
# *Ancient Near East Monographs, Number9*. SBL Press Atlanta. 2014.
# [PDF Open access available](https://www.google.nl/url?sa=t&rct=j&q=&esrc=s&source=web&cd=2&ved=0CCgQFjAB&url=http%3A%2F%2Fwww.sbl-site.org%2Fassets%2Fpdfs%2Fpubs%2F9781628370461_OA.pdf&ei=2QSdVf-vAYSGzAPArJeYCg&usg=AFQjCNFA3TymYlsebQ0MwXq2FmJCSHNUtg&sig2=LaXuAC5k3V7fSXC6ZVx05w&bvm=bv.96952980,d.bGQ)
# <img align="right" width="50%" src="parallel.png"/>
#
# ## 0.3 Open Source
# This is an IPython notebook.
# It contains a working program to carry out the computations needed to obtain the results reported here.
#
# You can download this notebook and run it on your computer, provided you have
# [Text-Fabric](https://github.com/Dans-labs/text-fabric) installed.
#
# It is a pity that we cannot compare our results with the Accordance resource mentioned above,
# since that resource has not been published in an accessible manner.
# We also do not have the information how this resource has been constructed on the basis of the raw data.
# In contrast with that, we present our results in a completely reproducible manner.
# This notebook itself can serve as the method of replication,
# provided you have obtained the necessary resources.
# See [sources](https://github.com/ETCBC/shebanq/wiki/Sources), which are all Open Access.
#
# ## 0.4 What are parallel passages?
# The notion of *parallel passage* is not a simple, straightforward one.
# There are parallels on the basis of lexical content in the passages on the one hand,
# but on the other hand there are also correspondences in certain syntactical structures,
# or even in similarities in text structure.
#
# In this notebook we do select a straightforward notion of parallel, based on lexical content only.
# We investigate two measures of similarity, one that ignores word order completely,
# and one that takes word order into account.
#
# Two kinds of short-comings of this approach must be mentioned:
#
# 1. We will not find parallels based on non-lexical criteria (unless they are also lexical parallels)
# 1. We will find too many parallels: certain short sentences (and he said), or formula like passages (and the word of God came to Moses) occur so often that they have a more subtle bearing on whether there is a common text history.
#
# For a more full treatment of parallel passages, see
#
# **Wido Th. van Peursen and Eep Talstra**:
# Computer-Assisted Analysis of Parallel Texts in the Bible -
# The Case of 2 Kings xviii-xix and its Parallels in Isaiah and Chronicles.
# *Vetus Testamentum* 57, pp. 45-72.
# 2007, Brill, Leiden.
#
# Note that our method fails to identify any parallels with Chronica_II 32.
# Van Peursen and Talstra state about this chapter and 2 Kings 18:
#
# > These chapters differ so much, that it is sometimes impossible to establish
# which verses should be considered parallel.
#
# In this notebook we produce a set of *cliques*,
# a clique being a set of passages that are *quite* similar, based on lexical information.
#
#
# ## 0.5 Authors
# This notebook is by Dirk Roorda and owes a lot to discussions with Martijn Naaijer.
#
# [Dirk Roorda](mailto:dirk.roorda@dans.knaw.nl) while discussing ideas with
# [Martijn Naaijer](mailto:m.naaijer@vu.nl).
#
#
# ## 0.6 Status
#
# * **modified: 2017-09-28** Is now part of a pipeline for transferring data from the ETCBC to Text-Fabric.
# * **modified: 2016-03-03** Added experiments based on chapter chunks and lower similarities.
#
# 165 experiments have been carried out, of which 18 with promising results.
# All results can be easily inspected, just by clicking in your browser.
# One of the experiments has been chosen as the basis for
# [crossref](https://shebanq.ancient-data.org/hebrew/note?version=4b&id=Mnxjcm9zc3JlZg__&tp=txt_tb1&nget=v)
# annotations in SHEBANQ.
#
# # 1. Results
#
# Click in a green cell to see interesting results. The numbers in the cell indicate
#
# * the number of passages that have a variant elsewhere
# * the number of *cliques* they form (cliques are sets of similar passages)
# * the number of passages in the biggest clique
#
# Below the results is an account of the method that we used, followed by the actual code to produce these results.
# # Pipeline
# See [operation](https://github.com/ETCBC/pipeline/blob/master/README.md#operation)
# for how to run this script in the pipeline.
#
# The pipeline comes in action in Section [6a](#6a) below: TF features.
# # Caveat
#
# This notebook makes use of a new feature of text-fabric, first present in 2.3.15.
# Make sure to upgrade first.
#
# ```
# sudo -H pip3 install --upgrade text-fabric
# ```
# In[1]:
import sys
import os
import re
import collections
import pickle
import math
import difflib
import yaml
from difflib import SequenceMatcher
from IPython.display import HTML
import matplotlib.pyplot as plt
from tf.core.helpers import formatMeta
# pip3 install python-Levenshtein
# In[2]:
from Levenshtein import ratio
# In[3]:
import utils
from tf.fabric import Fabric
# In[4]:
get_ipython().run_line_magic("load_ext", "autoreload") # noqa F821
get_ipython().run_line_magic("autoreload", "2") # noqa F821
get_ipython().run_line_magic("matplotlib", "inline") # noqa F821
# In[2]:
# In[5]:
if "SCRIPT" not in locals():
# SCRIPT = False
SCRIPT = False
FORCE = True
FORCE_MATRIX = False
LANG_FEATURE = "languageISO"
OCC_FEATURE = "g_cons"
LEX_FEATURE = "lex"
TEXT_FEATURE = "g_word_utf8"
TRAILER_FEATURE = "trailer_utf8"
CORE_NAME = "bhsa"
NAME = "parallels"
VERSION = "2021"
# In[6]:
def stop(good=False):
if SCRIPT:
sys.exit(0 if good else 1)
# In[3]:
# In[7]:
# run this cell after all other cells
if False and not SCRIPT:
HTML(other_exps)
# # 2. Experiments
#
# We have conducted 165 experiments, all corresponding to a specific choice of parameters.
# Every experiment is an attempt to identify variants and collect them in *cliques*.
#
# The table gives an overview of the experiments conducted.
#
# Every *row* corresponds to a particular way of chunking and a method of measuring the similarity.
#
# There are *columns* for each similarity *threshold* that we have tried.
# The idea is that chunks are similar if their similarity is above the threshold.
#
# The outcomes of one experiment have been added to SHEBANQ as the note set
# [crossref](https://shebanq.ancient-data.org/hebrew/note?version=4b&id=Mnxjcm9zc3JlZg__&tp=txt_tb1&nget=v).
# The experiment chosen for this is currently
#
# * *chunking*: **object verse**
# * *similarity method*: **SET**
# * *similarity threshold*: **65**
#
#
# ## 2.1 Assessing the outcomes
#
# Not all experiments lead to useful results.
# We have indicated the value of a result by a color coding, based on objective characteristics,
# such as the number of parallel passages, the number of cliques, the size of the greatest clique, and the way of chunking.
# These numbers are shown in the cells.
#
# ### 2.1.1 Assessment criteria
#
# If the method is based on *fixed* chunks, we deprecated the method and the results.
# Because two perfectly similar verses could be missed if a 100-word wide window that shifts over the text aligns differently with both verses, which will usually be the case.
#
# Otherwise, we consider the *ll*, the length of the longest clique, and *nc*, the number of cliques.
# We set three quality parameters:
# * `REC_CLIQUE_RATIO` = 5 : recommended clique ratio
# * `DUB_CLIQUE_RATIO` = 15 : dubious clique ratio
# * `DEP_CLIQUE_RATIO` = 25 : deprecated clique ratio
#
# where the *clique ratio* is $100 (ll/nc)$,
# i.e. the length of the longest clique divided by the number of cliques as percentage.
#
# An experiment is *recommended* if its clique ratio is between the recommended and dubious clique ratios.
#
# It is *dubious* if its clique ratio is between the dubious and deprecated clique ratios.
#
# It is *deprecated* if its clique ratio is above the deprecated clique ratio.
#
# # 2.2 Inspecting results
# If you click on the hyperlink in the cell, you are taken to a page that gives you
# all the details of the results:
#
# 1. A link to a file with all *cliques* (which are the sets of similar passages)
# 1. A list of links to chapter-by-chapter diff files (for cliques with just two members), and only for
# experiments with outcomes that are labeled as *promising* or *unassessed quality* or *mixed results*.
#
# To get into the variants quickly, inspect the list (2) and click through
# to see the actual variant material in chapter context.
#
# Not all variants occur here, so continue with (1) to see the remaining cliques.
#
# Sometimes in (2) a chapter diff file does not indicate clearly the relevant common part of both chapters.
# In that case you have to consult the big list (1)
#
# All these results can be downloaded from the
# [SHEBANQ github repo](https://github.com/ETCBC/shebanq/tree/master/static/docs/tools/parallel/files)
# After downloading the whole directory, open ``experiments.html`` in your browser.
# # 3. Method
#
# Here we discuss the method we used to arrive at a list of parallel passages
# in the Masoretic Text (MT) of the Hebrew Bible.
#
# ## 3.1 Similarity
#
# We have to find passages in the MT that are *similar*.
# Therefore we *chunk* the text in some way, and then compute the similarities between pairs of chunks.
#
# There are many ways to define and compute similarity between texts.
# Here, we have tried two methods ``SET`` and ``LCS``.
# Both methods define similarity as the fraction of common material with respect to the total material.
#
# ### 3.1.1 SET
#
# The ``SET`` method reduces textual chunks to *sets* of *lexemes*.
# This method abstracts from the order and number of occurrences of words in chunks.
#
# We use as measure for the similarity of chunks $C_1$ and $C_2$ (taken as sets):
#
# $$ s_{\rm set}(C_1, C_2) = {\vert C_1 \cap C_2\vert \over \vert C_1 \cup C_2 \vert} $$
#
# where $\vert X \vert$ is the number of elements in set $X$.
#
# ### 3.1.2 LCS
#
# The ``LCS`` method is less reductive: chunks are *strings* of *lexemes*,
# so the order and number of occurrences of words is retained.
#
# We use as measure for the similarity of chunks $C_1$ and $C_2$ (taken as strings):
#
# $$ s_{\rm lcs}(C_1, C_2) = {\vert {\rm LCS}(C_1,C_2)\vert \over \vert C_1\vert + \vert C_2 \vert -
# \vert {\rm LCS}(C_1,C_2)\vert} $$
#
# where ${\rm LCS}(C_1, C_2)$ is the
# [longest common subsequence](https://en.wikipedia.org/wiki/Longest_common_subsequence_problem)
# of $C_1$ and $C_2$ and
# $\vert X\vert$ is the length of sequence $X$.
#
# It remains to be seen whether we need the extra sophistication of ``LCS``.
# The risk is that ``LCS`` could fail to spot related passages when there is a large amount of transposition going on.
# The results should have the last word.
#
# We need to compute the LCS efficiently, and for this we used the python ``Levenshtein`` module:
#
# ``pip install python-Levenshtein``
#
# whose documentation is
# [here](http://www.coli.uni-saarland.de/courses/LT1/2011/slides/Python-Levenshtein.html).
#
# ## 3.2 Performance
#
# Similarity computation is the part where the heavy lifting occurs.
# It is basically quadratic in the number of chunks, so if you have verses as chunks (~ 23,000),
# you need to do ~ 270,000,000 similarity computations, and if you use sentences (~ 64,000),
# you need to do ~ 2,000,000,000 ones!
# The computation of a single similarity should be *really* fast.
#
# Besides that, we use two ways to economize:
#
# * after having computed a matrix for a specific set of parameter values, we save the matrix to disk;
# new runs can load the matrix from disk in a matter of seconds;
# * we do not store low similarity values in the matrix, low being < ``MATRIX_THRESHOLD``.
#
# The ``LCS`` method is more complicated.
# We have tried the ``ratio`` method from the ``difflib`` package that is present in the standard python distribution.
# This is unbearably slow for our purposes.
# The ``ratio`` method in the ``Levenshtein`` package is much quicker.
#
# See the table for an indication of the amount of work to create the similarity matrix
# and the performance per similarity method.
#
# The *matrix threshold* is the lower bound of similarities that are stored in the matrix.
# If a pair of chunks has a lower similarity, no entry will be made in the matrix.
#
# The computing has been done on a Macbook Air (11", mid 2012, 1.7 GHz Intel Core i5, 8GB RAM).
#
# |chunk type |chunk size|similarity method|matrix threshold|# of comparisons|size of matrix (KB)|computing time (min)|
# |:----------|---------:|----------------:|---------------:|---------------:|------------------:|-------------------:|
# |fixed |100 |LCS |60 | 9,003,646| 7| ? |
# |fixed |100 |SET |50 | 9,003,646| 7| ? |
# |fixed |50 |LCS |60 | 36,197,286| 37| ? |
# |fixed |50 |SET |50 | 36,197,286| 18| ? |
# |fixed |20 |LCS |60 | 227,068,705| 2,400| ? |
# |fixed |20 |SET |50 | 227,068,705| 113| ? |
# |fixed |10 |LCS |60 | 909,020,841| 59,000| ? |
# |fixed |10 |SET |50 | 909,020,841| 1,800| ? |
# |object |verse |LCS |60 | 269,410,078| 2,300| 31|
# |object |verse |SET |50 | 269,410,078| 509| 14|
# |object |half_verse|LCS |60 | 1,016,396,241| 40,000| 50|
# |object |half_verse|SET |50 | 1,016,396,241| 3,600| 41|
# |object |sentence |LCS |60 | 2,055,975,750| 212,000| 68|
# |object |sentence |SET |50 | 2,055,975,750| 82,000| 63|
# # 4. Workflow
#
# ## 4.1 Chunking
#
# There are several ways to chunk the text:
#
# * fixed chunks of approximately ``CHUNK_SIZE`` words
# * by object, such as verse, sentence and even chapter
#
# After chunking, we prepare the chunks for similarity measuring.
#
# ### 4.1.1 Fixed chunking
# Fixed chunking is unnatural, but if the chunk size is small, it can yield fair results.
# The results are somewhat difficult to inspect, because they generally do not respect constituent boundaries.
# It is to be expected that fixed chunks in variant passages will be mutually *out of phase*,
# meaning that the chunks involved in these passages are not aligned with each other.
# So they will have a lower similarity than they could have if they were aligned.
# This is a source of artificial noise in the outcome and/or missed cases.
#
# If the chunking respects "natural" boundaries in the text, there is far less misalignment.
#
# ### 4.1.2 Object chunking
# We can also chunk by object, such as verse, half_verse or sentence.
#
# Chunking by *verse* is very much like chunking in fixed chunks of size 20, performance-wise.
#
# Chunking by *half_verse* is comparable to fixed chunks of size 10.
#
# Chunking by *sentence* will generate an enormous amount of
# false positives, because there are very many very short sentences (down to 1-word) in the text.
# Besides that, the performance overhead is huge.
#
# The *half_verses* seem to be a very interesting candidate.
# They are smaller than verses, but there are less *degenerate cases* compared to with sentences.
# From the table above it can be read that half verses require only half as many similarity computations as sentences.
#
#
# ## 4.2 Preparing
#
# We prepare the chunks for the application of the chosen method of similarity computation (``SET`` or ``LCS``).
#
# In both cases we reduce the text to a sequence of transliterated consonantal *lexemes* without disambiguation.
# In fact, we go one step further: we remove the consonants (aleph, wav, yod) that are often silent.
#
# For ``SET``, we represent each chunk as the set of its reduced lexemes.
#
# For ``LCS``, we represent each chunk as the string obtained by joining its reduced lexemes separated by white spaces.
#
# ## 4.3 Cliques
#
# After having computed a sufficient part of the similarity matrix, we set a value for ``SIMILARITY_THRESHOLD``.
# All pairs of chunks having at least that similarity are deemed *interesting*.
#
# We organize the members of such pairs in *cliques*, groups of chunks of which each member is
# similar (*similarity* > ``SIMILARITY_THRESHOLD``) to at least one other member.
#
# We start with no cliques and walk through the pairs whose similarity is above ``SIMILARITY_THRESHOLD``,
# and try to put each member into a clique.
#
# If there is not yet a clique, we make the member in question into a new singleton clique.
#
# If there are cliques, we find the cliques that have a member similar to the member in question.
# If we find several, we merge them all into one clique.
#
# If there is no such clique, we put the member in a new singleton clique.
#
# NB: Cliques may *drift*, meaning that they contain members that are completely different from each other.
# They are in the same clique, because there is a path of pairwise similar members leading from the one chunk to the other.
#
# ### 4.3.1 Organizing the cliques
# In order to handle cases where there are many corresponding verses in corresponding chapters, we produce
# chapter-by-chapter diffs in the following way.
#
# We make a list of all chapters that are involved in cliques.
# This yields a list of chapter cliques.
# For all *binary* chapters cliques, we generate a colorful diff rendering (as HTML) for the complete two chapters.
#
# We only do this for *promising* experiments.
#
# ### 4.3.2 Evaluating clique sets
#
# Not all clique sets are equally worth while.
# For example, if we set the ``SIMILARITY_THRESHOLD`` too low, we might get one gigantic clique, especially
# in combination with a fine-grained chunking. In other words: we suffer from *clique drifting*.
#
# We detect clique drifting by looking at the size of the largest clique.
# If that is large compared to the total number of chunks, we deem the results unsatisfactory.
#
# On the other hand, when the ``SIMILARITY_THRESHOLD`` is too high, you might miss a lot of correspondences,
# especially when chunks are large, or when we have fixed-size chunks that are out of phase.
#
# We deem the results of experiments based on a partitioning into fixed length chunks as unsatisfactory, although it
# might be interesting to inspect what exactly the damage is.
#
# At the moment, we have not yet analyzed the relative merits of the similarity methods ``SET`` and ``LCS``.
# # 5. Implementation
#
#
# The rest is code. From here we fire up the engines and start computing.
# In[8]:
PICKLE_PROTOCOL = 3
# # Setting up the context: source file and target directories
#
# The conversion is executed in an environment of directories, so that sources, temp files and
# results are in convenient places and do not have to be shifted around.
# In[5]:
# In[9]:
repoBase = os.path.expanduser("~/github/etcbc")
coreRepo = "{}/{}".format(repoBase, CORE_NAME)
thisRepo = "{}/{}".format(repoBase, NAME)
# In[10]:
coreTf = "{}/tf/{}".format(coreRepo, VERSION)
# In[11]:
allTemp = "{}/_temp".format(thisRepo)
thisTemp = "{}/_temp/{}".format(thisRepo, VERSION)
thisTempTf = "{}/tf".format(thisTemp)
# In[12]:
thisTf = "{}/tf/{}".format(thisRepo, VERSION)
thisNotes = "{}/shebanq/{}".format(thisRepo, VERSION)
# In[6]:
# In[13]:
notesFile = "crossrefNotes.csv"
if not os.path.exists(thisNotes):
os.makedirs(thisNotes)
# # Test
#
# Check whether this conversion is needed in the first place.
# Only when run as a script.
# In[7]:
# In[14]:
if SCRIPT:
(good, work) = utils.mustRun(
None, "{}/.tf/{}.tfx".format(thisTf, "crossref"), force=FORCE
)
if not good:
stop(good=False)
if not work:
stop(good=True)
# ## 5.1 Loading the feature data
#
# We load the features we need from the BHSA core database.
# In[8]:
# In[15]:
utils.caption(4, "Load the existing TF dataset")
TF = Fabric(locations=coreTf, modules=[""])
# In[9]:
# In[16]:
api = TF.load(
"""
otype
{} {} {}
book chapter verse number
""".format(
LEX_FEATURE,
TEXT_FEATURE,
TRAILER_FEATURE,
)
)
api.makeAvailableIn(globals())
# ## 5.2 Configuration
#
# Here are the parameters on which the results crucially depend.
#
# There are also parameters that control the reporting of the results, such as file locations.
# In[10]:
# In[17]:
# chunking
CHUNK_LABELS = {True: "fixed", False: "object"}
CHUNK_LBS = {True: "F", False: "O"}
CHUNK_SIZES = (100, 50, 20, 10)
CHUNK_OBJECTS = ("chapter", "verse", "half_verse", "sentence")
# In[18]:
# preparing
EXCLUDED_CONS = r"[>WJ=/\[]" # weed out weak consonants
EXCLUDED_PAT = re.compile(EXCLUDED_CONS)
# In[19]:
# similarity
MATRIX_THRESHOLD = 50
SIM_METHODS = ("SET", "LCS")
SIMILARITIES = (100, 95, 90, 85, 80, 75, 70, 65, 60, 55, 50, 45, 40, 35, 30)
# In[20]:
# printing
DEP_CLIQUE_RATIO = 25
DUB_CLIQUE_RATIO = 15
REC_CLIQUE_RATIO = 5
LARGE_CLIQUE_SIZE = 50
CLIQUES_PER_FILE = 50
# In[21]:
# assessing results
VALUE_LABELS = dict(
mis="no results available",
rec="promising results: recommended",
dep="messy results: deprecated",
dub="mixed quality: take care",
out="method deprecated",
nor="unassessed quality: inspection needed",
lr="this experiment is the last one run",
)
# note that the TF_TABLE and LOCAL_BASE_COMP are deliberately
# located in the version independent
# part of the tempdir.
# Here the results of expensive calculations are stored,
# to be used by all versions
# In[22]:
# crossrefs for TF
TF_TABLE = "{}/parallelTable.tsv".format(allTemp)
# In[23]:
# crossrefs for SHEBANQ
SHEBANQ_MATRIX = (False, "verse", "SET")
SHEBANQ_SIMILARITY = 65
SHEBANQ_TOOL = "parallel"
CROSSREF_STATUS = "!"
CROSSREF_KEYWORD = "crossref"
# In[24]:
# progress indication
VERBOSE = False
MEGA = 1000000
KILO = 1000
SIMILARITY_PROGRESS = 5 * MEGA
CLIQUES_PROGRESS = 1 * KILO
# In[25]:
# locations and hyperlinks
LOCAL_BASE_COMP = "{}/calculus".format(allTemp)
LOCAL_BASE_OUTP = "files"
EXPERIMENT_DIR = "experiments"
EXPERIMENT_FILE = "experiments"
EXPERIMENT_PATH = "{}/{}.txt".format(LOCAL_BASE_OUTP, EXPERIMENT_FILE)
EXPERIMENT_HTML = "{}/{}.html".format(LOCAL_BASE_OUTP, EXPERIMENT_FILE)
NOTES_FILE = "crossref"
NOTES_PATH = "{}/{}.csv".format(LOCAL_BASE_OUTP, NOTES_FILE)
STORED_CLIQUE_DIR = "stored/cliques"
STORED_MATRIX_DIR = "stored/matrices"
STORED_CHUNK_DIR = "stored/chunks"
CHAPTER_DIR = "chapters"
CROSSREF_DB_FILE = "crossrefdb.csv"
CROSSREF_DB_PATH = "{}/{}".format(LOCAL_BASE_OUTP, CROSSREF_DB_FILE)
# ## 5.3 Experiment settings
#
# For each experiment we have to adapt the configuration settings to the parameters that define the experiment.
# In[11]:
# In[26]:
def reset_params():
global CHUNK_FIXED, CHUNK_SIZE, CHUNK_OBJECT, CHUNK_LB, CHUNK_DESC
global SIMILARITY_METHOD, SIMILARITY_THRESHOLD, MATRIX_THRESHOLD
global meta
meta = collections.OrderedDict()
# chunking
CHUNK_FIXED = None # kind of chunking: fixed size or by object
CHUNK_SIZE = None # only relevant for CHUNK_FIXED = True
CHUNK_OBJECT = (
None # only relevant for CHUNK_FIXED = False; see CHUNK_OBJECTS in next cell
)
CHUNK_LB = None # computed from CHUNK_FIXED, CHUNK_SIZE, CHUNK_OBJ
CHUNK_DESC = None # computed from CHUNK_FIXED, CHUNK_SIZE, CHUNK_OBJ
# similarity
MATRIX_THRESHOLD = (
None # minimal similarity used to fill the matrix of similarities
)
SIMILARITY_METHOD = None # see SIM_METHODS in next cell
SIMILARITY_THRESHOLD = (
None # minimal similarity used to put elements together in cliques
)
meta = collections.OrderedDict()
# In[27]:
def set_matrix_threshold(sim_m=None, chunk_o=None):
global MATRIX_THRESHOLD
the_sim_m = SIMILARITY_METHOD if sim_m is None else sim_m
the_chunk_o = CHUNK_OBJECT if chunk_o is None else chunk_o
MATRIX_THRESHOLD = 50 if the_sim_m == "SET" else 60
if the_sim_m == "SET":
if the_chunk_o == "chapter":
MATRIX_THRESHOLD = 30
else:
MATRIX_THRESHOLD = 50
else:
if the_chunk_o == "chapter":
MATRIX_THRESHOLD = 55
else:
MATRIX_THRESHOLD = 60
# In[28]:
def do_params_chunk(chunk_f, chunk_i):
global CHUNK_FIXED, CHUNK_SIZE, CHUNK_OBJECT, CHUNK_LB, CHUNK_DESC
do_chunk = False
if (
chunk_f != CHUNK_FIXED
or (chunk_f and chunk_i != CHUNK_SIZE)
or (not chunk_f and chunk_i != CHUNK_OBJECT)
):
do_chunk = True
CHUNK_FIXED = chunk_f
if chunk_f:
CHUNK_SIZE = chunk_i
else:
CHUNK_OBJECT = chunk_i
CHUNK_LB = CHUNK_LBS[CHUNK_FIXED]
CHUNK_DESC = CHUNK_SIZE if CHUNK_FIXED else CHUNK_OBJECT
for p in (
"{}/{}".format(LOCAL_BASE_OUTP, EXPERIMENT_DIR),
"{}/{}".format(LOCAL_BASE_COMP, STORED_CHUNK_DIR),
):
if not os.path.exists(p):
os.makedirs(p)
return do_chunk
# In[29]:
def do_params(chunk_f, chunk_i, sim_m, sim_thr):
global CHUNK_FIXED, CHUNK_SIZE, CHUNK_OBJECT, CHUNK_LB, CHUNK_DESC
global SIMILARITY_METHOD, SIMILARITY_THRESHOLD, MATRIX_THRESHOLD
global meta
do_chunk = False
do_prep = False
do_sim = False
do_clique = False
meta = collections.OrderedDict()
if (
chunk_f != CHUNK_FIXED
or (chunk_f and chunk_i != CHUNK_SIZE)
or (not chunk_f and chunk_i != CHUNK_OBJECT)
):
do_chunk = True
do_prep = True
do_sim = True
do_clique = True
CHUNK_FIXED = chunk_f
if chunk_f:
CHUNK_SIZE = chunk_i
else:
CHUNK_OBJECT = chunk_i
if sim_m != SIMILARITY_METHOD:
do_prep = True
do_sim = True
do_clique = True
SIMILARITY_METHOD = sim_m
if sim_thr != SIMILARITY_THRESHOLD:
do_clique = True
SIMILARITY_THRESHOLD = sim_thr
set_matrix_threshold()
if SIMILARITY_THRESHOLD < MATRIX_THRESHOLD:
return (False, False, False, False, True)
CHUNK_LB = CHUNK_LBS[CHUNK_FIXED]
CHUNK_DESC = CHUNK_SIZE if CHUNK_FIXED else CHUNK_OBJECT
meta["CHUNK TYPE"] = (
"FIXED {}".format(CHUNK_SIZE)
if CHUNK_FIXED
else "OBJECT {}".format(CHUNK_OBJECT)
)
meta["MATRIX THRESHOLD"] = MATRIX_THRESHOLD
meta["SIMILARITY METHOD"] = SIMILARITY_METHOD
meta["SIMILARITY THRESHOLD"] = SIMILARITY_THRESHOLD
for p in (
"{}/{}".format(LOCAL_BASE_OUTP, EXPERIMENT_DIR),
"{}/{}".format(LOCAL_BASE_OUTP, CHAPTER_DIR),
"{}/{}".format(LOCAL_BASE_COMP, STORED_CLIQUE_DIR),
"{}/{}".format(LOCAL_BASE_COMP, STORED_MATRIX_DIR),
"{}/{}".format(LOCAL_BASE_COMP, STORED_CHUNK_DIR),
):
if not os.path.exists(p):
os.makedirs(p)
return (do_chunk, do_prep, do_sim, do_clique, False)
# In[30]:
reset_params()
# ## 5.4 Chunking
#
# We divide the text into chunks to be compared. The result is ``chunks``,
# which is a list of lists.
# Every chunk is a list of word nodes.
# In[12]:
# In[31]:
def chunking(do_chunk):
global chunks, book_rank
if not do_chunk:
TF.info(
"CHUNKING ({} {}): already chunked into {} chunks".format(
CHUNK_LB, CHUNK_DESC, len(chunks)
)
)
meta["# CHUNKS"] = len(chunks)
return
chunk_path = "{}/{}/chunk_{}_{}".format(
LOCAL_BASE_COMP,
STORED_CHUNK_DIR,
CHUNK_LB,
CHUNK_DESC,
)
if os.path.exists(chunk_path):
with open(chunk_path, "rb") as f:
chunks = pickle.load(f)
TF.info(
"CHUNKING ({} {}): Loaded: {:>5} chunks".format(
CHUNK_LB,
CHUNK_DESC,
len(chunks),
)
)
else:
TF.info("CHUNKING ({} {})".format(CHUNK_LB, CHUNK_DESC))
chunks = []
book_rank = {}
for b in F.otype.s("book"):
book_name = F.book.v(b)
book_rank[book_name] = b
words = L.d(b, otype="word")
nwords = len(words)
if CHUNK_FIXED:
nchunks = nwords // CHUNK_SIZE
if nchunks == 0:
nchunks = 1
common_incr = nwords
special_incr = 0
else:
rem = nwords % CHUNK_SIZE
common_incr = rem // nchunks
special_incr = rem % nchunks
word_in_chunk = -1
cur_chunk = -1
these_chunks = []
for w in words:
word_in_chunk += 1
if word_in_chunk == 0 or (
word_in_chunk
>= CHUNK_SIZE
+ common_incr
+ (1 if cur_chunk < special_incr else 0)
):
word_in_chunk = 0
these_chunks.append([])
cur_chunk += 1
these_chunks[-1].append(w)
else:
these_chunks = [
L.d(c, otype="word") for c in L.d(b, otype=CHUNK_OBJECT)
]
chunks.extend(these_chunks)
chunkvolume = sum(len(c) for c in these_chunks)
if VERBOSE:
TF.info(
"CHUNKING ({} {}): {:<20s} {:>5} words; {:>5} chunks; sizes {:>5} to {:>5}; {:>5}".format(
CHUNK_LB,
CHUNK_DESC,
book_name,
nwords,
len(these_chunks),
min(len(c) for c in these_chunks),
max(len(c) for c in these_chunks),
"OK" if chunkvolume == nwords else "ERROR",
)
)
with open(chunk_path, "wb") as f:
pickle.dump(chunks, f, protocol=PICKLE_PROTOCOL)
TF.info("CHUNKING ({} {}): Made {} chunks".format(CHUNK_LB, CHUNK_DESC, len(chunks)))
meta["# CHUNKS"] = len(chunks)
# ## 5.5 Preparing
#
# In order to compute similarities between chunks, we have to compile each chunk into the information that really matters for the comparison. This is dependent on the chosen method of similarity computing.
#
# ### 5.5.1 Preparing for SET comparison
#
# We reduce words to their lexemes (dictionary entries) and from them we also remove the aleph, wav, and yods.
# The lexeme feature also contains characters (`/ [ =`) to disambiguate homonyms. We also remove these.
# If we end up with something empty, we skip it.
# Eventually, we take the set of these reduced word lexemes, so that we effectively ignore order and multiplicity of words. In other words: the resulting similarity will be based on lexeme content.
#
# ### 5.5.2 Preparing for LCS comparison
#
# Again, we reduce words to their lexemes as for the SET preparation, and we do the same weeding of consonants and empty strings. But then we concatenate everything, separated by a space. So we preserve order and multiplicity.
# In[13]:
# In[32]:
def preparing(do_prepare):
global chunk_data
if not do_prepare:
TF.info(
"PREPARING ({} {} {}): Already prepared".format(
CHUNK_LB, CHUNK_DESC, SIMILARITY_METHOD
)
)
return
TF.info("PREPARING ({} {} {})".format(CHUNK_LB, CHUNK_DESC, SIMILARITY_METHOD))
chunk_data = []
if SIMILARITY_METHOD == "SET":
for c in chunks:
words = (
EXCLUDED_PAT.sub("", Fs(LEX_FEATURE).v(w).replace("<", "O")) for w in c
)
clean_words = (w for w in words if w != "")
this_data = frozenset(clean_words)
chunk_data.append(this_data)
else:
for c in chunks:
words = (
EXCLUDED_PAT.sub("", Fs(LEX_FEATURE).v(w).replace("<", "O")) for w in c
)
clean_words = (w for w in words if w != "")
this_data = " ".join(clean_words)
chunk_data.append(this_data)
TF.info(
"PREPARING ({} {} {}): Done {} chunks.".format(
CHUNK_LB, CHUNK_DESC, SIMILARITY_METHOD, len(chunk_data)
)
)
# ## 5.6 Similarity computation
#
# Here we implement our two ways of similarity computation.
# Both need a massive amount of work, especially for experiments with many small chunks.
# The similarities are stored in a ``matrix``, a data structure that stores a similarity number for each pair of chunk indexes.
# Most pair of chunks will be dissimilar. In order to save space, we do not store similarities below a certain threshold.
# We store matrices for re-use.
#
# ### 5.6.1 SET similarity
# The core is an operation on the sets, associated with the chunks by the prepare step. We take the cardinality of the intersection divided by the cardinality of the union.
# Intuitively, we compute the proportion of what two chunks have in common against their total material.
#
# In case the union is empty (both chunks have yielded an empty set), we deem the chunks not to be interesting as a parallel pair, and we set the similarity to 0.
#
# ### 5.6.2 LCS similarity
# The core is the method `ratio()`, taken from the Levenshtein module.
# Remember that the preparation step yielded a space separated string of lexemes, and these strings are compared on the basis of edit distance.
# In[14]:
# In[33]:
def similarity_post():
nequals = len({x for x in chunk_dist if chunk_dist[x] >= 100})
cmin = min(chunk_dist.values()) if len(chunk_dist) else "!empty set!"
cmax = max(chunk_dist.values()) if len(chunk_dist) else "!empty set!"
meta["LOWEST AVAILABLE SIMILARITY"] = cmin
meta["HIGHEST AVAILABLE SIMILARITY"] = cmax
meta["# EQUAL COMPARISONS"] = nequals
TF.info(
"SIMILARITY ({} {} {} M>{}): similarities between {} and {}. {} are 100%".format(
CHUNK_LB,
CHUNK_DESC,
SIMILARITY_METHOD,
MATRIX_THRESHOLD,
cmin,
cmax,
nequals,
)
)
# In[34]:
def similarity(do_sim):
global chunk_dist
total_chunks = len(chunks)
total_distances = total_chunks * (total_chunks - 1) // 2
meta["# SIMILARITY COMPARISONS"] = total_distances
SIMILARITY_PROGRESS = total_distances // 100
if SIMILARITY_PROGRESS >= MEGA:
sim_unit = MEGA
sim_lb = "M"
else:
sim_unit = KILO
sim_lb = "K"
if not do_sim:
TF.info(
"SIMILARITY ({} {} {} M>{}): Using {:>5} {} ({}) comparisons with {} entries in matrix".format(
CHUNK_LB,
CHUNK_DESC,
SIMILARITY_METHOD,
MATRIX_THRESHOLD,
total_distances // sim_unit,
sim_lb,
total_distances,
len(chunk_dist),
)
)
meta["# STORED SIMILARITIES"] = len(chunk_dist)
similarity_post()
return
matrix_path = "{}/{}/matrix_{}_{}_{}_{}".format(
LOCAL_BASE_COMP,
STORED_MATRIX_DIR,
CHUNK_LB,
CHUNK_DESC,
SIMILARITY_METHOD,
MATRIX_THRESHOLD,
)
if os.path.exists(matrix_path):
with open(matrix_path, "rb") as f:
chunk_dist = pickle.load(f)
TF.info(
"SIMILARITY ({} {} {} M>{}): Loaded: {:>5} {} ({}) comparisons with {} entries in matrix".format(
CHUNK_LB,
CHUNK_DESC,
SIMILARITY_METHOD,
MATRIX_THRESHOLD,
total_distances // sim_unit,
sim_lb,
total_distances,
len(chunk_dist),
)
)
meta["# STORED SIMILARITIES"] = len(chunk_dist)
similarity_post()
return
TF.info(
"SIMILARITY ({} {} {} M>{}): Computing {:>5} {} ({}) comparisons and saving entries in matrix".format(
CHUNK_LB,
CHUNK_DESC,
SIMILARITY_METHOD,
MATRIX_THRESHOLD,
total_distances // sim_unit,
sim_lb,
total_distances,
)
)
chunk_dist = {}
wc = 0
wt = 0
if SIMILARITY_METHOD == "SET":
# method SET: all chunks have been reduced to sets, ratio between lengths of intersection and union
for i in range(total_chunks):
c_i = chunk_data[i]
for j in range(i + 1, total_chunks):
c_j = chunk_data[j]
u = len(c_i | c_j)
# HERE COMES THE SIMILARITY COMPUTATION
d = 100 * len(c_i & c_j) / u if u != 0 else 0
# HERE WE STORE THE OUTCOME
if d >= MATRIX_THRESHOLD:
chunk_dist[(i, j)] = d
wc += 1
wt += 1
if wc == SIMILARITY_PROGRESS:
wc = 0
TF.info(
"SIMILARITY ({} {} {} M>{}): Computed {:>5} {} comparisons and saved {} entries in matrix".format(
CHUNK_LB,
CHUNK_DESC,
SIMILARITY_METHOD,
MATRIX_THRESHOLD,
wt // sim_unit,
sim_lb,
len(chunk_dist),
)
)
elif SIMILARITY_METHOD == "LCS":
# method LCS: chunks are sequence aligned, ratio between length of all common parts and total length
for i in range(total_chunks):
c_i = chunk_data[i]
for j in range(i + 1, total_chunks):
c_j = chunk_data[j]
# HERE COMES THE SIMILARITY COMPUTATION
d = 100 * ratio(c_i, c_j)
# HERE WE STORE THE OUTCOME
if d >= MATRIX_THRESHOLD:
chunk_dist[(i, j)] = d
wc += 1
wt += 1
if wc == SIMILARITY_PROGRESS:
wc = 0
TF.info(
"SIMILARITY ({} {} {} M>{}): Computed {:>5} {} comparisons and saved {} entries in matrix".format(
CHUNK_LB,
CHUNK_DESC,
SIMILARITY_METHOD,
MATRIX_THRESHOLD,
wt // sim_unit,
sim_lb,
len(chunk_dist),
)
)
with open(matrix_path, "wb") as f:
pickle.dump(chunk_dist, f, protocol=PICKLE_PROTOCOL)
TF.info(
"SIMILARITY ({} {} {} M>{}): Computed {:>5} {} ({}) comparisons and saved {} entries in matrix".format(
CHUNK_LB,
CHUNK_DESC,
SIMILARITY_METHOD,
MATRIX_THRESHOLD,
wt // sim_unit,
sim_lb,
wt,
len(chunk_dist),
)
)
meta["# STORED SIMILARITIES"] = len(chunk_dist)
similarity_post()
# ## 5.7 Cliques
#
# Based on the value for the ``SIMILARITY_THRESHOLD`` we use the similarity matrix to pick the *interesting*
# similar pairs out of it. From these pairs we lump together our cliques.
#
# Our list of experiments will select various values for ``SIMILARITY_THRESHOLD``, which will result
# in various types of clique behavior.
#
# We store computed cliques for re-use.
#
# ## 5.7.1 Selecting passages
#
# We take all pairs from the similarity matrix which are above the threshold, and add both members to a list of passages.
#
# ## 5.7.2 Growing cliques
# We inspect all passages in our set, and try to add them to the cliques we are growing.
# We start with an empty set of cliques.
# Each passage is added to a clique with which it has *enough familiarity*, otherwise it is added to a new clique.
# *Enough familiarity means*: the passage is similar to at least one member of the clique, and the similarity is at least ``SIMILARITY_THRESHOLD``.
# It is possible that a passage is thus added to more than one clique. In that case, those cliques are merged.
# This may lead to growing very large cliques if ``SIMILARITY_THRESHOLD`` is too low.
# In[15]:
# In[35]:
def key_chunk(i):
c = chunks[i]
w = c[0]
return (
-len(c),
L.u(w, otype="book")[0],
L.u(w, otype="chapter")[0],
L.u(w, otype="verse")[0],
)
# In[36]:
def meta_clique_pre():
global similars, passages
TF.info(
"CLIQUES ({} {} {} M>{} S>{}): inspecting the similarity matrix".format(
CHUNK_LB,
CHUNK_DESC,
SIMILARITY_METHOD,
MATRIX_THRESHOLD,
SIMILARITY_THRESHOLD,
)
)
similars = {x for x in chunk_dist if chunk_dist[x] >= SIMILARITY_THRESHOLD}
passage_set = set()
for (i, j) in similars:
passage_set.add(i)
passage_set.add(j)
passages = sorted(passage_set, key=key_chunk)
meta["# SIMILAR COMPARISONS"] = len(similars)
meta["# SIMILAR PASSAGES"] = len(passages)
# In[37]:
def meta_clique_pre2():
TF.info(
"CLIQUES ({} {} {} M>{} S>{}): {} relevant similarities between {} passages".format(
CHUNK_LB,
CHUNK_DESC,
SIMILARITY_METHOD,
MATRIX_THRESHOLD,
SIMILARITY_THRESHOLD,
len(similars),
len(passages),
)
)
# In[38]:
def meta_clique_post():
global l_c_l
meta["# CLIQUES"] = len(cliques)
scliques = collections.Counter()
for c in cliques:
scliques[len(c)] += 1
l_c_l = max(scliques.keys()) if len(scliques) > 0 else 0
totmn = 0
totcn = 0
for (ln, n) in sorted(scliques.items(), key=lambda x: x[0]):
totmn += ln * n
totcn += n
if VERBOSE:
TF.info(
"CLIQUES ({} {} {} M>{} S>{}): {:>4} cliques of length {:>4}".format(
CHUNK_LB,
CHUNK_DESC,
SIMILARITY_METHOD,
MATRIX_THRESHOLD,
SIMILARITY_THRESHOLD,
n,
ln,
)
)
meta["# CLIQUES of LENGTH {:>4}".format(ln)] = n
TF.info(
"CLIQUES ({} {} {} M>{} S>{}): {} members in {} cliques".format(
CHUNK_LB,
CHUNK_DESC,
SIMILARITY_METHOD,
MATRIX_THRESHOLD,
SIMILARITY_THRESHOLD,
totmn,
totcn,
)
)
# In[39]:
def cliqueing(do_clique):
global cliques
if not do_clique:
TF.info(
"CLIQUES ({} {} {} M>{} S>{}): Already loaded {} cliques out of {} candidates from {} comparisons".format(
CHUNK_LB,
CHUNK_DESC,
SIMILARITY_METHOD,
MATRIX_THRESHOLD,
SIMILARITY_THRESHOLD,
len(cliques),
len(passages),
len(similars),
)
)
meta_clique_pre2()
meta_clique_post()
return
TF.info(
"CLIQUES ({} {} {} M>{} S>{}): fetching similars and chunk candidates".format(
CHUNK_LB,
CHUNK_DESC,
SIMILARITY_METHOD,
MATRIX_THRESHOLD,
SIMILARITY_THRESHOLD,
)
)
meta_clique_pre()
meta_clique_pre2()
clique_path = "{}/{}/clique_{}_{}_{}_{}_{}".format(
LOCAL_BASE_COMP,
STORED_CLIQUE_DIR,
CHUNK_LB,
CHUNK_DESC,
SIMILARITY_METHOD,
MATRIX_THRESHOLD,
SIMILARITY_THRESHOLD,
)
if os.path.exists(clique_path):
with open(clique_path, "rb") as f:
cliques = pickle.load(f)
TF.info(
"CLIQUES ({} {} {} M>{} S>{}): Loaded: {:>5} cliques out of {:>6} chunks from {} comparisons".format(
CHUNK_LB,
CHUNK_DESC,
SIMILARITY_METHOD,
MATRIX_THRESHOLD,
SIMILARITY_THRESHOLD,
len(cliques),
len(passages),
len(similars),
)
)
meta_clique_post()
return
TF.info(
"CLIQUES ({} {} {} M>{} S>{}): Composing cliques out of {:>6} chunks from {} comparisons".format(
CHUNK_LB,
CHUNK_DESC,
SIMILARITY_METHOD,
MATRIX_THRESHOLD,
SIMILARITY_THRESHOLD,
len(passages),
len(similars),
)
)
cliques_unsorted = []
np = 0
npc = 0
for i in passages:
added = None
removable = set()
for (k, c) in enumerate(cliques_unsorted):
origc = tuple(c)
for j in origc:
d = (
chunk_dist.get((i, j), 0)
if i < j
else chunk_dist.get((j, i), 0)
if j < i
else 0
)
if d >= SIMILARITY_THRESHOLD:
if (
added is None
): # the passage has not been added to any clique yet
c.add(i)
added = k # remember that we added the passage to this clique
else: # the passage has alreay been added to another clique:
# we merge this clique with that one
cliques_unsorted[added] |= c
removable.add(
k
) # we remember that we have merged this clicque into another one,
# so we can throw away this clicque later
break
if added is None:
cliques_unsorted.append({i})
else:
if len(removable):
cliques_unsorted = [
c for (k, c) in enumerate(cliques_unsorted) if k not in removable
]
np += 1
npc += 1
if npc == CLIQUES_PROGRESS:
npc = 0
TF.info(
"CLIQUES ({} {} {} M>{} S>{}): Composed {:>5} cliques out of {:>6} chunks".format(
CHUNK_LB,
CHUNK_DESC,
SIMILARITY_METHOD,
MATRIX_THRESHOLD,
SIMILARITY_THRESHOLD,
len(cliques_unsorted),
np,
)
)
cliques = sorted([tuple(sorted(c, key=key_chunk)) for c in cliques_unsorted])
with open(clique_path, "wb") as f:
pickle.dump(cliques, f, protocol=PICKLE_PROTOCOL)
meta_clique_post()
TF.info(
"CLIQUES ({} {} {} M>{} S>{}): Composed and saved {:>5} cliques out of {:>6} chunks from {} comparisons".format(
CHUNK_LB,
CHUNK_DESC,
SIMILARITY_METHOD,
MATRIX_THRESHOLD,
SIMILARITY_THRESHOLD,
len(cliques),
len(passages),
len(similars),
)
)
# ## 5.8 Output
#
# We deliver the output of our experiments in various ways, all in HTML.
#
# We generate chapter based diff outputs with color-highlighted differences between the chapters for every pair of chapters that merit it.
#
# For every (*good*) experiment, we produce a big list of its cliques, and for
# every such clique, we produce a diff-view of its members.
#
# Big cliques will be split into several files.
#
# Clique listings will also contain metadata: the value of the experiment parameters.
#
# ### 5.8.1 Format definitions
# Here are the definitions for formatting the (HTML) output.
# In[16]:
# In[40]:
# clique lists
css = """
td.vl {
font-family: Verdana, Arial, sans-serif;
font-size: small;
text-align: right;
color: #aaaaaa;
width: 10%;
direction: ltr;
border-left: 2px solid #aaaaaa;
border-right: 2px solid #aaaaaa;
}
td.ht {
font-family: Ezra SIL, SBL Hebrew, Verdana, sans-serif;
font-size: x-large;
line-height: 1.7;
text-align: right;
direction: rtl;
}
table.ht {
width: 100%;
direction: rtl;
border-collapse: collapse;
}
td.ht {
border-left: 2px solid #aaaaaa;
border-right: 2px solid #aaaaaa;
}
tr.ht.tb {
border-top: 2px solid #aaaaaa;
border-left: 2px solid #aaaaaa;
border-right: 2px solid #aaaaaa;
}
tr.ht.bb {
border-bottom: 2px solid #aaaaaa;
border-left: 2px solid #aaaaaa;
border-right: 2px solid #aaaaaa;
}
span.m {
background-color: #aaaaff;
}
span.f {
background-color: #ffaaaa;
}
span.x {
background-color: #ffffaa;
color: #bb0000;
}
span.delete {
background-color: #ffaaaa;
}
span.insert {
background-color: #aaffaa;
}
span.replace {
background-color: #ffff00;
}
"""
# In[41]:
# chapter diffs
diffhead = """
<head>
<meta http-equiv="Content-Type"
content="text/html; charset=UTF-8" />
<title></title>
<style type="text/css">
table.diff {
font-family: Ezra SIL, SBL Hebrew, Verdana, sans-serif;
font-size: x-large;
text-align: right;
}
.diff_header {background-color:#e0e0e0}
td.diff_header {text-align:right}
.diff_next {background-color:#c0c0c0}
.diff_add {background-color:#aaffaa}
.diff_chg {background-color:#ffff77}
.diff_sub {background-color:#ffaaaa}
</style>
</head>
"""
# In[42]:
# table of experiments
ecss = """
<style type="text/css">
.mis {background-color: #cccccc;}
.rec {background-color: #aaffaa;}
.dep {background-color: #ffaaaa;}
.dub {background-color: #ffddaa;}
.out {background-color: #ffddff;}
.nor {background-color: #fcfcff;}
.ps {font-weight: normal;}
.mx {font-style: italic;}
.cl {font-weight: bold;}
.lr {font-weight: bold; background-color: #ffffaa;}
p,td {font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: small;}
td {border: 1pt solid #000000; padding: 4pt;}
table {border: 1pt solid #000000; border-collapse: collapse;}
</style>
"""
# In[43]:
legend = """
<table>
<tr><td class="mis">{mis}</td></tr>
<tr><td class="rec">{rec}</td></tr>
<tr><td class="dep">{dep}</td></tr>
<tr><td class="dub">{dub}</td></tr>
<tr><td class="out">{out}</td></tr>
<tr><td class="nor">{nor}</td></tr>
</table>
""".format(
**VALUE_LABELS
)
# ### 5.8.2 Formatting clique lists
# In[17]:
# In[44]:
def xterse_chunk(i):
chunk = chunks[i]
fword = chunk[0]
book = L.u(fword, otype="book")[0]
chapter = L.u(fword, otype="chapter")[0]
return (book, chapter)
# In[45]:
def xterse_clique(ii):
return tuple(sorted({xterse_chunk(i) for i in ii}))
# In[46]:
def terse_chunk(i):
chunk = chunks[i]
fword = chunk[0]
book = L.u(fword, otype="book")[0]
chapter = L.u(fword, otype="chapter")[0]
verse = L.u(fword, otype="verse")[0]
return (book, chapter, verse)
# In[47]:
def terse_clique(ii):
return tuple(sorted({terse_chunk(i) for i in ii}))
# In[48]:
def verse_chunk(i):
(bk, ch, vs) = i
book = F.book.v(bk)
chapter = F.chapter.v(ch)
verse = F.verse.v(vs)
text = "".join(
"{}{}".format(Fs(TEXT_FEATURE).v(w), Fs(TRAILER_FEATURE).v(w))
for w in L.d(vs, otype="word")
)
verse_label = '<td class="vl">{} {}:{}</td>'.format(book, chapter, verse)
htext = '{}<td class="ht">{}</td>'.format(verse_label, text)
return '<tr class="ht">{}</tr>'.format(htext)
# In[49]:
def verse_clique(ii):
return '<table class="ht">{}</table>\n'.format(
"".join(verse_chunk(i) for i in sorted(ii))
)
# In[50]:
def condense(vlabels):
cnd = ""
(cur_b, cur_c) = (None, None)
for (b, c, v) in vlabels:
c = str(c)
v = str(v)
sep = (
""
if cur_b is None
else ". "
if cur_b != b
else "; "
if cur_c != c
else ", "
)
show_b = b + " " if cur_b != b else ""
show_c = c + ":" if cur_b != b or cur_c != c else ""
(cur_b, cur_c) = (b, c)
cnd += "{}{}{}{}".format(sep, show_b, show_c, v)
return cnd
# In[51]:
def print_diff(a, b):
arep = ""
brep = ""
for (lb, ai, aj, bi, bj) in SequenceMatcher(
isjunk=None, a=a, b=b, autojunk=False
).get_opcodes():
if lb == "equal":
arep += a[ai:aj]
brep += b[bi:bj]
elif lb == "delete":
arep += '<span class="{}">{}</span>'.format(lb, a[ai:aj])
elif lb == "insert":
brep += '<span class="{}">{}</span>'.format(lb, b[bi:bj])
else:
arep += '<span class="{}">{}</span>'.format(lb, a[ai:aj])
brep += '<span class="{}">{}</span>'.format(lb, b[bi:bj])
return (arep, brep)
# In[52]:
def print_chunk_fine(prev, text, verse_labels, prevlabels):
if prev is None:
return """
<tr class="ht tb bb"><td class="vl">{}</td><td class="ht">{}</td></tr>
""".format(
condense(verse_labels),
text,
)
else:
(prevline, textline) = print_diff(prev, text)
return """
<tr class="ht tb"><td class="vl">{}</td><td class="ht">{}</td></tr>
<tr class="ht bb"><td class="vl">{}</td><td class="ht">{}</td></tr>
""".format(
condense(prevlabels) if prevlabels is not None else "previous",
prevline,
condense(verse_labels),
textline,
)
# In[53]:
def print_chunk_coarse(text, verse_labels):
return """
<tr class="ht tb bb"><td class="vl">{}</td><td class="ht">{}</td></tr>
""".format(
condense(verse_labels),
text,
)
# In[54]:
def print_clique(ii, ncliques):
return (
print_clique_fine(ii)
if len(ii) < ncliques * DEP_CLIQUE_RATIO / 100
else print_clique_coarse(ii)
)
# In[55]:
def print_clique_fine(ii):
condensed = collections.OrderedDict()
for i in sorted(ii, key=lambda c: (-len(chunks[c]), c)):
chunk = chunks[i]
fword = chunk[0]
book = F.book.v(L.u(fword, otype="book")[0])
chapter = F.chapter.v(L.u(fword, otype="chapter")[0])
verse = F.verse.v(L.u(fword, otype="verse")[0])
text = "".join(
"{}{}".format(Fs(TEXT_FEATURE).v(w), Fs(TRAILER_FEATURE).v(w))
for w in chunk
)
condensed.setdefault(text, []).append((book, chapter, verse))
result = []
nv = len(condensed.items())
prev = None
for (text, verse_labels) in condensed.items():
if prev is None:
if nv == 1:
result.append(print_chunk_fine(None, text, verse_labels, None))
else:
prev = text
prevlabels = verse_labels
continue
else:
result.append(print_chunk_fine(prev, text, verse_labels, prevlabels))
prev = text
prevlabels = None
return '<table class="ht">{}</table>\n'.format("".join(result))
# In[56]:
def print_clique_coarse(ii):
condensed = collections.OrderedDict()
for i in sorted(ii, key=lambda c: (-len(chunks[c]), c))[0:LARGE_CLIQUE_SIZE]:
chunk = chunks[i]
fword = chunk[0]
book = F.book.v(L.u(fword, otype="book")[0])
chapter = F.chapter.v(L.u(fword, otype="chapter")[0])
verse = F.verse.v(L.u(fword, otype="verse")[0])
text = "".join(
"{}{}".format(Fs(TEXT_FEATURE).v(w), Fs(TRAILER_FEATURE).v(w))
for w in chunk
)
condensed.setdefault(text, []).append((book, chapter, verse))
result = []
for (text, verse_labels) in condensed.items():
result.append(print_chunk_coarse(text, verse_labels))
if len(ii) > LARGE_CLIQUE_SIZE:
result.append(
print_chunk_coarse("+ {} ...".format(len(ii) - LARGE_CLIQUE_SIZE), [])
)
return '<table class="ht">{}</table>\n'.format("".join(result))
# In[57]:
def index_clique(bnm, n, ii, ncliques):
return (
index_clique_fine(bnm, n, ii)
if len(ii) < ncliques * DEP_CLIQUE_RATIO / 100
else index_clique_coarse(bnm, n, ii)
)
# In[58]:
def index_clique_fine(bnm, n, ii):
verse_labels = []
for i in sorted(ii, key=lambda c: (-len(chunks[c]), c)):
chunk = chunks[i]
fword = chunk[0]
book = F.book.v(L.u(fword, otype="book")[0])
chapter = F.chapter.v(L.u(fword, otype="chapter")[0])
verse = F.verse.v(L.u(fword, otype="verse")[0])
verse_labels.append((book, chapter, verse))
reffl = "{}_{}".format(bnm, n // CLIQUES_PER_FILE)
return '<p><b>{}</b> <a href="{}.html#c_{}">{}</a></p>'.format(
n,
reffl,
n,
condense(verse_labels),
)
# In[59]:
def index_clique_coarse(bnm, n, ii):
verse_labels = []
for i in sorted(ii, key=lambda c: (-len(chunks[c]), c))[0:LARGE_CLIQUE_SIZE]:
chunk = chunks[i]
fword = chunk[0]
book = F.book.v(L.u(fword, otype="book")[0])
chapter = F.chapter.v(L.u(fword, otype="chapter")[0])
verse = F.verse.v(L.u(fword, otype="verse")[0])
verse_labels.append((book, chapter, verse))
reffl = "{}_{}".format(bnm, n // CLIQUES_PER_FILE)
extra = (
"+ {} ...".format(len(ii) - LARGE_CLIQUE_SIZE)
if len(ii) > LARGE_CLIQUE_SIZE
else ""
)
return '<p><b>{}</b> <a href="{}.html#c_{}">{}{}</a></p>'.format(
n,
reffl,
n,
condense(verse_labels),
extra,
)
# In[60]:
def lines_chapter(c):
lines = []
for v in L.d(c, otype="verse"):
vl = F.verse.v(v)
text = "".join(
"{}{}".format(Fs(TEXT_FEATURE).v(w), Fs(TRAILER_FEATURE).v(w))
for w in L.d(v, otype="word")
)
lines.append("{} {}".format(vl, text.replace("\n", " ")))
return lines
# In[61]:
def compare_chapters(c1, c2, lb1, lb2):
dh = difflib.HtmlDiff(wrapcolumn=80)
table_html = dh.make_table(
lines_chapter(c1),
lines_chapter(c2),
fromdesc=lb1,
todesc=lb2,
context=False,
numlines=5,
)
htext = """<html>{}<body>{}</body></html>""".format(diffhead, table_html)
return htext
# ### 5.8.3 Compiling the table of experiments
#
# Here we generate the table of experiments, complete with the coloring according to their assessments.
# In[18]:
# In[62]:
# generate the table of experiments
def gen_html(standalone=False):
global other_exps
TF.info(
"EXPERIMENT: Generating html report{}".format(
"(standalone)" if standalone else ""
)
)
stats = collections.Counter()
pre = (
"""
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8" />
{}
</head>
<body>
""".format(
ecss
)
if standalone
else ""
)
post = (
"""
</body></html>
"""
if standalone
else ""
)
experiments = """
{}
{}
<table>
<tr><th>chunk type</th><th>chunk size</th><th>similarity method</th>{}</tr>
""".format(
pre, legend, "".join("<th>{}</th>".format(sim_thr) for sim_thr in SIMILARITIES)
)
for chunk_f in (True, False):
if chunk_f:
chunk_items = CHUNK_SIZES
else:
chunk_items = CHUNK_OBJECTS
chunk_lb = CHUNK_LBS[chunk_f]
for chunk_i in chunk_items:
for sim_m in SIM_METHODS:
set_matrix_threshold(sim_m=sim_m, chunk_o=chunk_i)
these_outputs = outputs.get(MATRIX_THRESHOLD, {})
experiments += "<tr><td>{}</td><td>{}</td><td>{}</td>".format(
CHUNK_LABELS[chunk_f],
chunk_i,
sim_m,
)
for sim_thr in SIMILARITIES:
okey = (chunk_lb, chunk_i, sim_m, sim_thr)
values = these_outputs.get(okey)
if values is None:
result = '<td class="mis"> </td>'
stats["mis"] += 1
else:
(npassages, ncliques, longest_clique_len) = values
cls = assess_exp(
chunk_f, npassages, ncliques, longest_clique_len
)
stats[cls] += 1
(lr_el, lr_lb) = ("", "")
if (
CHUNK_LB,
CHUNK_DESC,
SIMILARITY_METHOD,
SIMILARITY_THRESHOLD,
) == (
chunk_lb,
chunk_i,
sim_m,
sim_thr,
):
lr_el = '<span class="lr">*</span>'
lr_lb = VALUE_LABELS["lr"]
result = """
<td class="{}" title="{}">{}
<span class="ps">{}</span><br/>
<a target="_blank" href="{}{}/{}_{}_{}_M{}_S{}.html"><span class="cl">{}</span></a><br/>
<span class="mx">{}</span>
</td>""".format(
cls,
lr_lb,
lr_el,
npassages,
"" if standalone else LOCAL_BASE_OUTP + "/",
EXPERIMENT_DIR,
chunk_lb,
chunk_i,
sim_m,
MATRIX_THRESHOLD,
sim_thr,
ncliques,
longest_clique_len,
)
experiments += result
experiments += "</tr>\n"
experiments += "</table>\n{}".format(post)
if standalone:
with open(EXPERIMENT_HTML, "w") as f:
f.write(experiments)
else:
other_exps = experiments
for stat in sorted(stats):
TF.info("EXPERIMENT: {:>3} {}".format(stats[stat], VALUE_LABELS[stat]))
TF.info("EXPERIMENT: Generated html report")
# ### 5.8.4 High level formatting functions
#
# Here everything concerning output is brought together.
# In[19]:
# In[63]:
def assess_exp(cf, np, nc, ll):
return (
"out"
if cf
else "rec"
if ll > nc * REC_CLIQUE_RATIO / 100 and ll <= nc * DUB_CLIQUE_RATIO / 100
else "dep"
if ll > nc * DEP_CLIQUE_RATIO / 100
else "dub"
if ll > nc * DUB_CLIQUE_RATIO / 100
else "nor"
)
# In[64]:
def printing():
global outputs, bin_cliques, base_name
TF.info(
"PRINT ({} {} {} M>{} S>{}): sorting out cliques".format(
CHUNK_LB,
CHUNK_DESC,
SIMILARITY_METHOD,
MATRIX_THRESHOLD,
SIMILARITY_THRESHOLD,
)
)
xt_cliques = {
xterse_clique(c) for c in cliques
} # chapter cliques as tuples of (b, ch) tuples
bin_cliques = {
c for c in xt_cliques if len(c) == 2
} # chapter cliques with exactly two chapters
# all chapters that occur in binary chapter cliques
meta["# BINARY CHAPTER DIFFS"] = len(bin_cliques)
# We generate one kind of info for binary chapter cliques (the majority of cases).
# The remaining cases are verse cliques that do not occur in such chapters, e.g. because they
# have member chunks in the same chapter, or in multiple (more than two) chapters.
ncliques = len(cliques)
chapters_ok = assess_exp(CHUNK_FIXED, len(passages), ncliques, l_c_l) in {
"rec",
"nor",
"dub",
}
cdoing = "involving" if chapters_ok else "skipping"
TF.info(
"PRINT ({} {} {} M>{} S>{}): formatting {} cliques {} {} binary chapter diffs".format(
CHUNK_LB,
CHUNK_DESC,
SIMILARITY_METHOD,
MATRIX_THRESHOLD,
SIMILARITY_THRESHOLD,
ncliques,
cdoing,
len(bin_cliques),
)
)
meta_html = "\n".join("{:<40} : {:>10}".format(k, str(meta[k])) for k in meta)
base_name = "{}_{}_{}_M{}_S{}".format(
CHUNK_LB,
CHUNK_DESC,
SIMILARITY_METHOD,
MATRIX_THRESHOLD,
SIMILARITY_THRESHOLD,
)
param_spec = """
<table>
<tr><th>chunking method</th><td>{}</td></tr>
<tr><th>chunking description</th><td>{}</td></tr>
<tr><th>similarity method</th><td>{}</td></tr>
<tr><th>similarity threshold</th><td>{}</td></tr>
</table>
""".format(
CHUNK_LABELS[CHUNK_FIXED],
CHUNK_DESC,
SIMILARITY_METHOD,
SIMILARITY_THRESHOLD,
)
param_lab = "chunk-{}-{}-sim-{}-m{}-s{}".format(
CHUNK_LB,
CHUNK_DESC,
SIMILARITY_METHOD,
MATRIX_THRESHOLD,
SIMILARITY_THRESHOLD,
)
index_name = base_name
all_name = "{}_{}".format("all", base_name)
cliques_name = "{}_{}".format("clique", base_name)
clique_links = []
clique_links.append(
("{}/{}.html".format(base_name, all_name), "Big list of all cliques")
)
nexist = 0
nnew = 0
if chapters_ok:
chapter_diffs = []
TF.info(
"PRINT ({} {} {} M>{} S>{}): Chapter diffs needed: {}".format(
CHUNK_LB,
CHUNK_DESC,
SIMILARITY_METHOD,
MATRIX_THRESHOLD,
SIMILARITY_THRESHOLD,
len(bin_cliques),
)
)
bcc_text = "<p>These results look good, so a binary chapter comparison has been generated</p>"
for cl in sorted(bin_cliques):
lb1 = "{} {}".format(F.book.v(cl[0][0]), F.chapter.v(cl[0][1]))
lb2 = "{} {}".format(F.book.v(cl[1][0]), F.chapter.v(cl[1][1]))
hfilename = "{}_vs_{}.html".format(lb1, lb2).replace(" ", "_")
hfilepath = "{}/{}/{}".format(LOCAL_BASE_OUTP, CHAPTER_DIR, hfilename)
chapter_diffs.append(
(
lb1,
cl[0][1],
lb2,
cl[1][1],
"{}/{}/{}/{}".format(
SHEBANQ_TOOL,
LOCAL_BASE_OUTP,
CHAPTER_DIR,
hfilename,
),
)
)
if not os.path.exists(hfilepath):
htext = compare_chapters(cl[0][1], cl[1][1], lb1, lb2)
with open(hfilepath, "w") as f:
f.write(htext)
if VERBOSE:
TF.info(
"PRINT ({} {} {} M>{} S>{}): written {}".format(
CHUNK_LB,
CHUNK_DESC,
SIMILARITY_METHOD,
MATRIX_THRESHOLD,
SIMILARITY_THRESHOLD,
hfilename,
)
)
nnew += 1
else:
nexist += 1
clique_links.append(
(
"../{}/{}".format(CHAPTER_DIR, hfilename),
"{} versus {}".format(lb1, lb2),
)
)
TF.info(
"PRINT ({} {} {} M>{} S>{}): Chapter diffs: {} newly created and {} already existing".format(
CHUNK_LB,
CHUNK_DESC,
SIMILARITY_METHOD,
MATRIX_THRESHOLD,
SIMILARITY_THRESHOLD,
nnew,
nexist,
)
)
else:
bcc_text = "<p>These results look dubious at best, so no binary chapter comparison has been generated</p>"
allgeni_html = (
index_clique(cliques_name, i, c, ncliques) for (i, c) in enumerate(cliques)
)
allgen_htmls = []
allgen_html = ""
for (i, c) in enumerate(cliques):
if i % CLIQUES_PER_FILE == 0:
if i > 0:
allgen_htmls.append(allgen_html)
allgen_html = ""
allgen_html += '<h3><a name="c_{}">Clique {}</a></h3>\n{}'.format(
i, i, print_clique(c, ncliques)
)
allgen_htmls.append(allgen_html)
index_html_tpl = """
{}
<h1>Binary chapter comparisons</h1>
{}
{}
"""
content_file_tpl = """<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8" />
<title>{}</title>
<style type="text/css">
{}
</style>
</head>
<body>
<h1>{}</h1>
{}
<p><a href="#meta">more parameters and stats</a></p>
{}
<h1><a name="meta">Parameters and stats</a></h1>
<pre>{}</pre>
</body>
</html>"""
a_tpl_file = '<p><a target="_blank" href="{}">{}</a></p>'
index_html_file = index_html_tpl.format(
a_tpl_file.format(*clique_links[0]),
bcc_text,
"\n".join(a_tpl_file.format(*c) for c in clique_links[1:]),
)
listing_html = "{}\n".format(
"\n".join(allgeni_html),
)
for (subdir, fname, content_html, tit) in (
(None, index_name, index_html_file, "Index " + param_lab),
(base_name, all_name, listing_html, "Listing " + param_lab),
(base_name, cliques_name, allgen_htmls, "Cliques " + param_lab),
):
subdir = "" if subdir is None else (subdir + "/")
subdirabs = "{}/{}/{}".format(LOCAL_BASE_OUTP, EXPERIMENT_DIR, subdir)
if not os.path.exists(subdirabs):
os.makedirs(subdirabs)
if type(content_html) is list:
for (i, c_h) in enumerate(content_html):
fn = "{}_{}".format(fname, i)
t = "{}_{}".format(tit, i)
with open(
"{}/{}/{}{}.html".format(
LOCAL_BASE_OUTP, EXPERIMENT_DIR, subdir, fn
),
"w",
) as f:
f.write(
content_file_tpl.format(t, css, t, param_spec, c_h, meta_html)
)
else:
with open(
"{}/{}/{}{}.html".format(
LOCAL_BASE_OUTP, EXPERIMENT_DIR, subdir, fname
),
"w",
) as f:
f.write(
content_file_tpl.format(
tit, css, tit, param_spec, content_html, meta_html
)
)
destination = outputs.setdefault(MATRIX_THRESHOLD, {})
destination[(CHUNK_LB, CHUNK_DESC, SIMILARITY_METHOD, SIMILARITY_THRESHOLD)] = (
len(passages),
len(cliques),
l_c_l,
)
TF.info(
"PRINT ({} {} {} M>{} S>{}): formatted {} cliques ({} files) {} {} binary chapter diffs".format(
CHUNK_LB,
CHUNK_DESC,
SIMILARITY_METHOD,
MATRIX_THRESHOLD,
SIMILARITY_THRESHOLD,
len(cliques),
len(allgen_htmls),
cdoing,
len(bin_cliques),
)
)
# ## 5.9 Running experiments
#
# The workflows of doing a single experiment, and then all experiments, are defined.
# In[20]:
# In[65]:
outputs = {}
# In[66]:
def writeoutputs():
global outputs
with open(EXPERIMENT_PATH, "wb") as f:
pickle.dump(outputs, f, protocol=PICKLE_PROTOCOL)
# In[67]:
def readoutputs():
global outputs
if not os.path.exists(EXPERIMENT_PATH):
outputs = {}
else:
with open(EXPERIMENT_PATH, "rb") as f:
outputs = pickle.load(f)
# In[68]:
def do_experiment(chunk_f, chunk_i, sim_m, sim_thr, do_index):
if do_index:
readoutputs()
(do_chunk, do_prep, do_sim, do_clique, skip) = do_params(
chunk_f, chunk_i, sim_m, sim_thr
)
if skip:
return
chunking(do_chunk)
preparing(do_prep)
similarity(do_sim)
cliqueing(do_clique)
printing()
if do_index:
writeoutputs()
gen_html()
# In[69]:
def do_only_chunk(chunk_f, chunk_i):
do_chunk = do_params_chunk(chunk_f, chunk_i)
chunking(do_chunk)
# In[70]:
def reset_experiments():
global outputs
readoutputs()
outputs = {}
reset_params()
writeoutputs()
gen_html()
# In[71]:
def do_all_experiments(no_fixed=False, only_object=None):
global outputs
reset_experiments()
for chunk_f in (False,) if no_fixed else (True, False):
if chunk_f:
chunk_items = CHUNK_SIZES
else:
chunk_items = CHUNK_OBJECTS if only_object is None else (only_object,)
for chunk_i in chunk_items:
for sim_m in SIM_METHODS:
for sim_thr in SIMILARITIES:
do_experiment(chunk_f, chunk_i, sim_m, sim_thr, False)
writeoutputs()
gen_html()
gen_html(standalone=True)
# In[72]:
def do_all_chunks(no_fixed=False, only_object=None):
global outputs
reset_experiments()
for chunk_f in (False,) if no_fixed else (True, False):
if chunk_f:
chunk_items = CHUNK_SIZES
else:
chunk_items = CHUNK_OBJECTS if only_object is None else (only_object,)
for chunk_i in chunk_items:
do_only_chunk(chunk_f, chunk_i)
# In[73]:
def show_all_experiments():
readoutputs()
gen_html()
gen_html(standalone=True)
# # 6a
# # TF features
#
# Based on selected similarity matrices, we produce an
# edge features between verses, containing weighted links to parallel verses.
#
# The features to deliver are called `crossrefSET` and `crossrefLCS` and `crossref`.
#
# These are edge feature, both are symmetric, and hence redundant.
# For every node, the *from* and *to* edges are identical.
#
# The `SET` variant consists of set based similarity, the `LCS` one on longest common subsequence
# similarity.
#
# The `crossref` feature takes the union of both methods, with the average confidence.
#
# The weight is the similarity as percentage integer as it comes from the similarity matrix.
#
# ## Discussion
# We only produce the results of the similarity computation (the matrix), we do not do the cliqueing.
# There are many ways to make cliques, and that can easily be done by users of the data, once the
# matrix results are in place.
# We also do not produce pretty outputs, chapter diffs and other goodies.
# Just the raw similarity data.
#
# The matrix computation is expensive.
# We use fixed settings:
# * verse chunks
# * `SET` method / `LCS` method,
# * matrix threshold 50 / 60
# * similarity threshold 75
#
# That is, we compute a matrix that contains all pairs with similarity above 50 or 60
# depending on whether we do the `SET` method or the `LCS` method.
#
# From that matrix, we only use the similarities above 75.
# This gives us room to play without recomputing the matrix.
#
# We do not want to redo this computation if it can be avoided.
#
# Verse similarity is not something that is very sensitive to change in the encoding.
# It is very likely that similar verses in one version of the data agree with similar
# verses in all other versions.
#
# However, the node numbers of verses may change from version to version, so that part
# must be done again for each version.
#
# This is how we proceed:
# * the matrix computation gives us triples (v1, v2, w), where v1, v2 are verse nodes and d is there similarity
# * we store the result of the matrix computation in a csv file with the following fields:
# * method, v1, v1Ref, v2, v2Ref, d, where v1Ref and v2Ref are verse references,
# each containing exactly 3 fields: book, chapter, verse
# * NB: the similarity table has only one entry for each pair of similar verses per method.
# If (v1, v2) is in the table, (v2, v1) is not in the table, per method.
#
# When we run this notebook for the pipeline, we check for the presence of this file.
# If it is present, we uses the vRefs in it to compute the verse nodes that are valid for the
# version we are going to produce.
# That gives us all the data we need, so we can skip the matrix computation.
#
# If the file is not present, we have to compute the matrix.
# There will be a parameter, called FORCE_MATRIX, which can enforce a re-computation of the matrix.
# We need some utility function geared to TF feature production.
# The `get_verse()` function is simpler, and we do not have to run full experiments.
# In[21]:
# In[74]:
def writeSimTable(similars):
with open(TF_TABLE, "w") as h:
for entry in similars:
h.write("{}\n".format("\t".join(str(x) for x in entry)))
# In[75]:
def readSimTable():
similars = []
stats = set()
with open(TF_TABLE) as h:
for line in h:
(
method,
v1,
v2,
sim,
book1,
chapter1,
verse1,
book2,
chapter2,
verse2,
) = line.rstrip("\n").split("\t")
verseNode1 = T.nodeFromSection((book1, int(chapter1), int(verse1)))
verseNode2 = T.nodeFromSection((book2, int(chapter2), int(verse2)))
if verseNode1 != int(v1):
stats.add(verseNode1)
if verseNode2 != int(v2):
stats.add(verseNode2)
similars.append(
(
method,
verseNode1,
verseNode2,
int(sim),
book1,
int(chapter1),
int(verse1),
book2,
int(chapter2),
int(verse2),
)
)
nStats = len(stats)
if nStats:
utils.caption(
0,
"\t\tINFO: {} verse nodes have been changed between versions".format(
nStats
),
)
utils.caption(0, "\t\tINFO: We will save and use the recomputed ones")
writeSimTable(similars)
else:
utils.caption(
0, "\t\tINFO: All verse nodes are the same as in the previous version"
)
return similars
# In[76]:
def makeSimTable():
similars = []
for (method, similarityCutoff) in (
("SET", 75),
("LCS", 75),
):
(do_chunk, do_prep, do_sim, do_clique, skip) = do_params(
False, "verse", method, similarityCutoff
)
chunking(do_chunk)
preparing(do_prep)
similarity(do_sim or FORCE_MATRIX)
theseSimilars = []
for ((chunk1, chunk2), sim) in sorted(
(x, d) for (x, d) in chunk_dist.items() if d >= similarityCutoff
):
verseNode1 = L.u(chunks[chunk1][0], otype="verse")[0]
verseNode2 = L.u(chunks[chunk2][0], otype="verse")[0]
simInt = int(round(sim))
heading1 = T.sectionFromNode(verseNode1)
heading2 = T.sectionFromNode(verseNode2)
theseSimilars.append(
(method, verseNode1, verseNode2, simInt, *heading1, *heading2)
)
utils.caption(
0,
"\tMethod {}: found {} similar pairs of verses".format(
method, len(theseSimilars)
),
)
similars.extend(theseSimilars)
writeSimTable(similars)
return similars
# In[22]:
# In[77]:
utils.caption(4, "CROSSREFS: Fetching crossrefs")
# In[78]:
xTable = os.path.exists(TF_TABLE)
if FORCE_MATRIX:
utils.caption(
0,
"\t{} requested of {}".format(
"Recomputing" if xTable else "computing",
TF_TABLE,
),
)
else:
if xTable:
utils.caption(0, "\tReading existing {}".format(TF_TABLE))
else:
utils.caption(0, "\tComputing missing {}".format(TF_TABLE))
# In[79]:
if FORCE_MATRIX or not xTable:
similars = makeSimTable()
else:
similars = readSimTable()
# In[23]:
# In[80]:
if not SCRIPT:
print("\n".join(sorted(repr(sim) for sim in similars if sim[0] == "LCS")[0:10]))
print("\n".join(sorted(repr(sim) for sim in similars if sim[0] == "SET")[0:10]))
# In[81]:
crossrefData = {}
otherMethod = dict(LCS="SET", SET="LCS")
# In[82]:
for (method, v1, v2, sim, *x) in similars:
crossrefData.setdefault(method, {}).setdefault(v1, {})[v2] = sim
crossrefData.setdefault(method, {}).setdefault(v2, {})[v1] = sim
omethod = otherMethod[method]
otherSim = crossrefData.get(omethod, {}).get(v1, {}).get(v2, None)
thisSim = sim if otherSim is None else int(round((otherSim + sim) / 2))
crossrefData.setdefault("", {}).setdefault(v1, {})[v2] = thisSim
crossrefData.setdefault("", {}).setdefault(v2, {})[v1] = thisSim
# # Generating parallels module for Text-Fabric
#
# We generate the feature `crossref`.
# It is an edge feature between verse nodes, with the similarity as weight.
# In[89]:
utils.caption(4, "Writing TF parallel features")
# In[90]:
newFeatureStr = "crossref crossrefSET crossrefLCS"
newFeatures = newFeatureStr.strip().split()
# In[91]:
genericMetaPath = f"{thisRepo}/yaml/generic.yaml"
parallelsMetaPath = f"{thisRepo}/yaml/parallels.yaml"
with open(genericMetaPath) as fh:
genericMeta = yaml.load(fh, Loader=yaml.FullLoader)
genericMeta["version"] = VERSION
with open(parallelsMetaPath) as fh:
parallelsMeta = formatMeta(yaml.load(fh, Loader=yaml.FullLoader))
metaData = {"": genericMeta, **parallelsMeta}
# In[92]:
nodeFeatures = dict()
edgeFeatures = dict()
for method in [""] + list(otherMethod):
edgeFeatures["crossref{}".format(method)] = crossrefData[method]
# In[93]:
for newFeature in newFeatures:
metaData[newFeature]["valueType"] = "int"
metaData[newFeature]["edgeValues"] = True
# In[94]:
TF = Fabric(locations=thisTempTf, silent=True)
TF.save(nodeFeatures=nodeFeatures, edgeFeatures=edgeFeatures, metaData=metaData)
# # Generating simple crossref notes for SHEBANQ
# We base them on the average of both methods, we supply the confidence.
# In[33]:
# In[ ]:
MAX_REFS = 10
# In[ ]:
def condenseX(vlabels):
cnd = []
(cur_b, cur_c) = (None, None)
for (b, c, v, d) in vlabels:
sep = (
""
if cur_b is None
else ". "
if cur_b != b
else "; "
if cur_c != c
else ", "
)
show_b = b + " " if cur_b != b else ""
show_c = str(c) + ":" if cur_b != b or cur_c != c else ""
(cur_b, cur_c) = (b, c)
cnd.append("{}[{}{}{}{}]".format(sep, show_b, show_c, v, d))
return cnd
# In[ ]:
crossrefBase = crossrefData[""]
# In[ ]:
refsGrouped = []
nCrossrefs = 0
for (x, refs) in crossrefBase.items():
vys = sorted(refs.keys())
nCrossrefs += len(vys)
currefs = []
for vy in vys:
nr = len(currefs)
if nr == MAX_REFS:
refsGrouped.append((x, tuple(currefs)))
currefs = []
currefs.append(vy)
if len(currefs):
refsGrouped.append((x, tuple(currefs)))
# In[33]:
refsCompiled = []
for (x, vys) in refsGrouped:
vysd = [
(*T.sectionFromNode(vy, lang="la"), " ~{}%".format(crossrefBase[x][vy]))
for vy in vys
]
vysl = condenseX(vysd)
these_refs = []
for (i, vy) in enumerate(vysd):
link_text = vysl[i]
link_target = "{} {}:{}".format(vy[0], vy[1], vy[2])
these_refs.append("{}({})".format(link_text, link_target))
refsCompiled.append((x, " ".join(these_refs)))
utils.caption(
0,
"Compiled {} cross references into {} notes".format(nCrossrefs, len(refsCompiled)),
)
# In[34]:
# In[ ]:
sfields = """
version
book
chapter
verse
clause_atom
is_shared
is_published
status
keywords
ntext
""".strip().split()
# In[ ]:
sfields_fmt = ("{}\t" * (len(sfields) - 1)) + "{}\n"
# In[ ]:
ofs = open("{}/{}".format(thisNotes, notesFile), "w")
ofs.write("{}\n".format("\t".join(sfields)))
# In[ ]:
for (v, refs) in refsCompiled:
firstWord = L.d(v, otype="word")[0]
ca = F.number.v(L.u(firstWord, otype="clause_atom")[0])
(bk, ch, vs) = T.sectionFromNode(v, lang="la")
ofs.write(
sfields_fmt.format(
VERSION,
bk,
ch,
vs,
ca,
"T",
"",
CROSSREF_STATUS,
CROSSREF_KEYWORD,
refs,
)
)
# In[34]:
utils.caption(0, "Generated {} notes".format(len(refsCompiled)))
ofs.close()
# # Diffs
#
# Check differences with previous versions.
# In[35]:
# In[35]:
utils.checkDiffs(thisTempTf, thisTf, only=set(newFeatures))
# # Deliver
#
# Copy the new TF feature from the temporary location where it has been created to its final destination.
# In[36]:
# In[36]:
utils.deliverDataset(thisTempTf, thisTf)
# # Compile TF
# In[38]:
# In[ ]:
utils.caption(4, "Load and compile the new TF features")
# In[38]:
TF = Fabric(locations=[coreTf, thisTf], modules=[""])
api = TF.load(newFeatureStr)
api.makeAvailableIn(globals())
# # Examples
# We list all the crossrefs that the verses of Genesis 10 are involved in.
# In[39]:
# In[ ]:
utils.caption(4, "Test: crossrefs of Genesis 10")
# In[ ]:
chapter = ("Genesis", 10)
chapterNode = T.nodeFromSection(chapter)
startVerses = {}
# In[39]:
for method in ["", "SET", "LCS"]:
utils.caption(0, "\tMethod {}".format(method))
for verseNode in L.d(chapterNode, otype="verse"):
crossrefs = Es("crossref{}".format(method)).f(verseNode)
if crossrefs:
startVerses[T.sectionFromNode(verseNode)] = crossrefs
utils.caption(0, "\t\t{} start verses".format(len(startVerses)))
for (start, crossrefs) in sorted(startVerses.items()):
utils.caption(0, "\t\t{} {}:{}".format(*start), continuation=True)
for (target, confidence) in crossrefs:
utils.caption(
0,
"\t\t{:>20} {:<20} confidende {:>3}%".format(
"-" * 10 + ">",
"{} {}:{}".format(*T.sectionFromNode(target)),
confidence,
),
)
# In[29]:
# In[29]:
if SCRIPT:
stop(good=True)
# # 6b. SHEBANQ annotations
#
# The code below generates extensive crossref notes for `4b`, including clique overviews and chapter diffs.
# But since the pipeline in October 2017, we generate much simpler notes.
# That code is above.
#
# We retain this code here, in case we want to expand the crossref functionality in the future again.
#
# Based on selected similarity matrices, we produce a SHEBANQ note set of cross references for similar passages.
# In[30]:
# In[ ]:
def get_verse(i, ca=False):
return get_verse_w(chunks[i][0], ca=ca)
# In[ ]:
def get_verse_o(o, ca=False):
return get_verse_w(L.d(o, otype="word")[0], ca=ca)
# In[ ]:
def get_verse_w(w, ca=False):
book = F.book.v(L.u(w, otype="book")[0])
chapter = F.chapter.v(L.u(w, otype="chapter")[0])
verse = F.verse.v(L.u(w, otype="verse")[0])
if ca:
ca = F.number.v(L.u(w, otype="clause_atom")[0])
return (book, chapter, verse, ca) if ca else (book, chapter, verse)
# In[ ]:
def key_verse(x):
return (book_rank[x[0]], int(x[1]), int(x[2]))
# In[ ]:
MAX_REFS = 10
# In[ ]:
def condensex(vlabels):
cnd = []
(cur_b, cur_c) = (None, None)
for (b, c, v, d) in vlabels:
sep = (
""
if cur_b is None
else ". "
if cur_b != b
else "; "
if cur_c != c
else ", "
)
show_b = b + " " if cur_b != b else ""
show_c = c + ":" if cur_b != b or cur_c != c else ""
(cur_b, cur_c) = (b, c)
cnd.append("{}{}{}{}{}".format(sep, show_b, show_c, v, d))
return cnd
# In[ ]:
dfields = """
book1
chapter1
verse1
book2
chapter2
verse2
similarity
""".strip().split()
# In[ ]:
dfields_fmt = ("{}\t" * (len(dfields) - 1)) + "{}\n"
# In[ ]:
def get_crossrefs():
global crossrefs
TF.info("CROSSREFS: Fetching crossrefs")
crossrefs_proto = {}
crossrefs = {}
(chunk_f, chunk_i, sim_m) = SHEBANQ_MATRIX
sim_thr = SHEBANQ_SIMILARITY
(do_chunk, do_prep, do_sim, do_clique, skip) = do_params(
chunk_f, chunk_i, sim_m, sim_thr
)
if skip:
return
TF.info(
"CROSSREFS ({} {} {} S>{})".format(CHUNK_LBS[chunk_f], chunk_i, sim_m, sim_thr)
)
crossrefs_proto = {x for x in chunk_dist.items() if x[1] >= sim_thr}
TF.info(
"CROSSREFS ({} {} {} S>{}): found {} pairs".format(
CHUNK_LBS[chunk_f],
chunk_i,
sim_m,
sim_thr,
len(crossrefs_proto),
)
)
f = open(CROSSREF_DB_PATH, "w")
f.write("{}\n".format("\t".join(dfields)))
for ((x, y), d) in crossrefs_proto:
vx = get_verse(x)
vy = get_verse(y)
rd = int(round(d))
crossrefs.setdefault(x, {})[vy] = rd
crossrefs.setdefault(y, {})[vx] = rd
f.write(dfields_fmt.format(*(vx + vy + (rd,))))
total = sum(len(x) for x in crossrefs.values())
f.close()
TF.info(
"CROSSREFS: Found {} crossreferences and wrote {} pairs".format(
total, len(crossrefs_proto)
)
)
# In[ ]:
def get_specific_crossrefs(chunk_f, chunk_i, sim_m, sim_thr, write_to):
(do_chunk, do_prep, do_sim, do_clique, skip) = do_params(
chunk_f, chunk_i, sim_m, sim_thr
)
if skip:
return
chunking(do_chunk)
preparing(do_prep)
similarity(do_sim)
TF.info("CROSSREFS: Fetching crossrefs")
crossrefs_proto = {}
crossrefs = {}
(do_chunk, do_prep, do_sim, do_clique, skip) = do_params(
chunk_f, chunk_i, sim_m, sim_thr
)
if skip:
return
TF.info(
"CROSSREFS ({} {} {} S>{})".format(CHUNK_LBS[chunk_f], chunk_i, sim_m, sim_thr)
)
crossrefs_proto = {x for x in chunk_dist.items() if x[1] >= sim_thr}
TF.info(
"CROSSREFS ({} {} {} S>{}): found {} pairs".format(
CHUNK_LBS[chunk_f],
chunk_i,
sim_m,
sim_thr,
len(crossrefs_proto),
)
)
f = open("files/{}".format(write_to), "w")
f.write("{}\n".format("\t".join(dfields)))
for ((x, y), d) in crossrefs_proto:
vx = get_verse(x)
vy = get_verse(y)
rd = int(round(d))
crossrefs.setdefault(x, {})[vy] = rd
crossrefs.setdefault(y, {})[vx] = rd
f.write(dfields_fmt.format(*(vx + vy + (rd,))))
total = sum(len(x) for x in crossrefs.values())
f.close()
TF.info(
"CROSSREFS: Found {} crossreferences and wrote {} pairs".format(
total, len(crossrefs_proto)
)
)
# In[ ]:
def compile_refs():
global refs_compiled
refs_grouped = []
for x in sorted(crossrefs):
refs = crossrefs[x]
vys = sorted(refs.keys(), key=key_verse)
currefs = []
for vy in vys:
nr = len(currefs)
if nr == MAX_REFS:
refs_grouped.append((x, tuple(currefs)))
currefs = []
currefs.append(vy)
if len(currefs):
refs_grouped.append((x, tuple(currefs)))
refs_compiled = []
for (x, vys) in refs_grouped:
vysd = [(vy[0], vy[1], vy[2], " ~{}%".format(crossrefs[x][vy])) for vy in vys]
vysl = condensex(vysd)
these_refs = []
for (i, vy) in enumerate(vysd):
link_text = vysl[i]
link_target = "{} {}:{}".format(vy[0], vy[1], vy[2])
these_refs.append("[{}]({})".format(link_text, link_target))
refs_compiled.append((x, " ".join(these_refs)))
TF.info(
"CROSSREFS: Compiled cross references into {} notes".format(len(refs_compiled))
)
# In[ ]:
def get_chapter_diffs():
global chapter_diffs
chapter_diffs = []
for cl in sorted(bin_cliques):
lb1 = "{} {}".format(F.book.v(cl[0][0]), F.chapter.v(cl[0][1]))
lb2 = "{} {}".format(F.book.v(cl[1][0]), F.chapter.v(cl[1][1]))
hfilename = "{}_vs_{}.html".format(lb1, lb2).replace(" ", "_")
chapter_diffs.append(
(
lb1,
cl[0][1],
lb2,
cl[1][1],
"{}/{}/{}/{}".format(
SHEBANQ_TOOL,
LOCAL_BASE_OUTP,
CHAPTER_DIR,
hfilename,
),
)
)
TF.info("CROSSREFS: Added {} chapter diffs".format(2 * len(chapter_diffs)))
# In[ ]:
def get_clique_refs():
global clique_refs
clique_refs = []
for (i, c) in enumerate(cliques):
for j in c:
seq = i // CLIQUES_PER_FILE
clique_refs.append(
(
j,
i,
"{}/{}/{}/{}/clique_{}_{}.html#c_{}".format(
SHEBANQ_TOOL,
LOCAL_BASE_OUTP,
EXPERIMENT_DIR,
base_name,
base_name,
seq,
i,
),
)
)
TF.info("CROSSREFS: Added {} clique references".format(len(clique_refs)))
# In[ ]:
sfields = """
version
book
chapter
verse
clause_atom
is_shared
is_published
status
keywords
ntext
""".strip().split()
# In[ ]:
sfields_fmt = ("{}\t" * (len(sfields) - 1)) + "{}\n"
# In[ ]:
def generate_notes():
with open(NOTES_PATH, "w") as f:
f.write("{}\n".format("\t".join(sfields)))
x = next(F.otype.s("word"))
(bk, ch, vs, ca) = get_verse(x, ca=True)
f.write(
sfields_fmt.format(
VERSION,
bk,
ch,
vs,
ca,
"T",
"",
CROSSREF_STATUS,
CROSSREF_KEYWORD,
"""The crossref notes are the result of a computation without manual tweaks.
Parameters: chunk by verse, similarity method SET with threshold 65.
[Here](tool=parallel) is an account of the generation method.""".replace(
"\n", " "
),
)
)
for (lb1, ch1, lb2, ch2, fl) in chapter_diffs:
(bk1, ch1, vs1, ca1) = get_verse_o(ch1, ca=True)
(bk2, ch2, vs2, ca2) = get_verse_o(ch2, ca=True)
f.write(
sfields_fmt.format(
VERSION,
bk1,
ch1,
vs1,
ca1,
"T",
"",
CROSSREF_STATUS,
CROSSREF_KEYWORD,
"[chapter diff with {}](tool:{})".format(lb2, fl),
)
)
f.write(
sfields_fmt.format(
VERSION,
bk2,
ch2,
vs2,
ca2,
"T",
"",
CROSSREF_STATUS,
CROSSREF_KEYWORD,
"[chapter diff with {}](tool:{})".format(lb1, fl),
)
)
for (x, refs) in refs_compiled:
(bk, ch, vs, ca) = get_verse(x, ca=True)
f.write(
sfields_fmt.format(
VERSION,
bk,
ch,
vs,
ca,
"T",
"",
CROSSREF_STATUS,
CROSSREF_KEYWORD,
refs,
)
)
for (chunk, clique, fl) in clique_refs:
(bk, ch, vs, ca) = get_verse(chunk, ca=True)
f.write(
sfields_fmt.format(
VERSION,
bk,
ch,
vs,
ca,
"T",
"",
CROSSREF_STATUS,
CROSSREF_KEYWORD,
"[all variants (clique {})](tool:{})".format(clique, fl),
)
)
TF.info(
"CROSSREFS: Generated {} notes".format(
1 + len(refs_compiled) + 2 * len(chapter_diffs) + len(clique_refs)
)
)
# In[30]:
def crossrefs2shebanq():
expr = SHEBANQ_MATRIX + (SHEBANQ_SIMILARITY,)
do_experiment(*(expr + (True,)))
get_crossrefs()
compile_refs()
get_chapter_diffs()
get_clique_refs()
generate_notes()
# # 7. Main
#
# In the cell below you can select the experiments you want to carry out.
#
# The previous cells contain just definitions and parameters.
# The next cell will do work.
#
# If none of the matrices and cliques have been computed before on the system where this runs, doing all experiments might take multiple hours (4-8).
# In[ ]:
# In[ ]:
reset_params()
# do_experiment(False, 'sentence', 'LCS', 60, False)
# In[ ]:
do_all_experiments()
# do_all_experiments(no_fixed=True, only_object='chapter')
# crossrefs2shebanq()
# show_all_experiments()
# get_specific_crossrefs(False, 'verse', 'LCS', 60, 'crossrefs_lcs_db.txt')
# do_all_chunks()
# In[ ]:
# In[ ]:
HTML(ecss)
# # 8. Overview of the similarities
#
# Here are the plots of two similarity matrices
# * with verses as chunks and SET as similarity method
# * with verses as chunks and LCS as similarity method
#
# Horizontally you see the degree of similarity from 0 to 100%, vertically the number of pairs that have that (rounded) similarity. This axis is logarithmic.
# In[ ]:
# In[ ]:
do_experiment(False, "verse", "SET", 60, False)
distances = collections.Counter()
for (x, d) in chunk_dist.items():
distances[int(round(d))] += 1
# In[ ]:
x = range(MATRIX_THRESHOLD, 101)
fig = plt.figure(figsize=[15, 4])
plt.plot(x, [math.log(max((1, distances[y]))) for y in x], "b-")
plt.axis([MATRIX_THRESHOLD, 101, 0, 15])
plt.xlabel("similarity as %")
plt.ylabel("log # similarities")
plt.xticks(x, x, rotation="vertical")
plt.margins(0.2)
plt.subplots_adjust(bottom=0.15)
plt.title("distances")
# In[ ]:
# In[ ]:
do_experiment(False, "verse", "LCS", 60, False)
distances = collections.Counter()
for (x, d) in chunk_dist.items():
distances[int(round(d))] += 1
# In[ ]:
x = range(MATRIX_THRESHOLD, 101)
fig = plt.figure(figsize=[15, 4])
plt.plot(x, [math.log(max((1, distances[y]))) for y in x], "b-")
plt.axis([MATRIX_THRESHOLD, 101, 0, 15])
plt.xlabel("similarity as %")
plt.ylabel("log # similarities")
plt.xticks(x, x, rotation="vertical")
plt.margins(0.2)
plt.subplots_adjust(bottom=0.15)
plt.title("distances")
# In[ ]:
| 30.896048
| 6,888
| 0.579503
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 51,128
| 0.473894
|
a157d32f7b13b416fb6bf59f5d4cfdbbe25ce080
| 4,870
|
py
|
Python
|
src/python/pants/goal/initialize_reporting.py
|
WamBamBoozle/pants
|
98cadfa1a5d337146903eb66548cfe955f2627b3
|
[
"Apache-2.0"
] | null | null | null |
src/python/pants/goal/initialize_reporting.py
|
WamBamBoozle/pants
|
98cadfa1a5d337146903eb66548cfe955f2627b3
|
[
"Apache-2.0"
] | null | null | null |
src/python/pants/goal/initialize_reporting.py
|
WamBamBoozle/pants
|
98cadfa1a5d337146903eb66548cfe955f2627b3
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import errno
import os
import sys
from six import StringIO
from pants.reporting.html_reporter import HtmlReporter
from pants.reporting.plaintext_reporter import PlainTextReporter
from pants.reporting.quiet_reporter import QuietReporter
from pants.reporting.report import Report, ReportingError
from pants.reporting.reporting_server import ReportingServerManager
from pants.util.dirutil import safe_mkdir, safe_rmtree
def initial_reporting(config, run_tracker):
"""Sets up the initial reporting configuration.
Will be changed after we parse cmd-line flags.
"""
reports_dir = os.path.join(config.getdefault('pants_workdir'), 'reports')
link_to_latest = os.path.join(reports_dir, 'latest')
run_id = run_tracker.run_info.get_info('id')
if run_id is None:
raise ReportingError('No run_id set')
run_dir = os.path.join(reports_dir, run_id)
safe_rmtree(run_dir)
html_dir = os.path.join(run_dir, 'html')
safe_mkdir(html_dir)
try:
if os.path.lexists(link_to_latest):
os.unlink(link_to_latest)
os.symlink(run_dir, link_to_latest)
except OSError as e:
# Another run may beat us to deletion or creation.
if not (e.errno == errno.EEXIST or e.errno == errno.ENOENT):
raise
report = Report()
# Capture initial console reporting into a buffer. We'll do something with it once
# we know what the cmd-line flag settings are.
outfile = StringIO()
capturing_reporter_settings = PlainTextReporter.Settings(outfile=outfile, log_level=Report.INFO,
color=False, indent=True, timing=False,
cache_stats=False)
capturing_reporter = PlainTextReporter(run_tracker, capturing_reporter_settings)
report.add_reporter('capturing', capturing_reporter)
# Set up HTML reporting. We always want that.
template_dir = config.get('reporting', 'reports_template_dir')
html_reporter_settings = HtmlReporter.Settings(log_level=Report.INFO,
html_dir=html_dir,
template_dir=template_dir)
html_reporter = HtmlReporter(run_tracker, html_reporter_settings)
report.add_reporter('html', html_reporter)
# Add some useful RunInfo.
run_tracker.run_info.add_info('default_report', html_reporter.report_path())
(_, port) = ReportingServerManager.get_current_server_pid_and_port()
if port:
run_tracker.run_info.add_info('report_url', 'http://localhost:{}/run/{}'.format(port, run_id))
return report
def update_reporting(options, is_quiet_task, run_tracker):
"""Updates reporting config once we've parsed cmd-line flags."""
# Get any output silently buffered in the old console reporter, and remove it.
old_outfile = run_tracker.report.remove_reporter('capturing').settings.outfile
old_outfile.flush()
buffered_output = old_outfile.getvalue()
old_outfile.close()
log_level = Report.log_level_from_string(options.level or 'info')
# Ideally, we'd use terminfo or somesuch to discover whether a
# terminal truly supports color, but most that don't set TERM=dumb.
color = (options.colors) and (os.getenv('TERM') != 'dumb')
timing = options.time
cache_stats = options.time # TODO: Separate flag for this?
if options.quiet or is_quiet_task:
console_reporter = QuietReporter(run_tracker,
QuietReporter.Settings(log_level=log_level, color=color))
else:
# Set up the new console reporter.
settings = PlainTextReporter.Settings(log_level=log_level, outfile=sys.stdout, color=color,
indent=True, timing=timing, cache_stats=cache_stats)
console_reporter = PlainTextReporter(run_tracker, settings)
console_reporter.emit(buffered_output)
console_reporter.flush()
run_tracker.report.add_reporter('console', console_reporter)
if options.logdir:
# Also write plaintext logs to a file. This is completely separate from the html reports.
safe_mkdir(options.logdir)
run_id = run_tracker.run_info.get_info('id')
outfile = open(os.path.join(options.logdir, '{}.log'.format(run_id)), 'w')
settings = PlainTextReporter.Settings(log_level=log_level, outfile=outfile, color=False,
indent=True, timing=True, cache_stats=True)
logfile_reporter = PlainTextReporter(run_tracker, settings)
logfile_reporter.emit(buffered_output)
logfile_reporter.flush()
run_tracker.report.add_reporter('logfile', logfile_reporter)
| 42.719298
| 98
| 0.716222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,145
| 0.235113
|
a1586b7c08a86b032589e3a797f710af94eef3ed
| 4,947
|
py
|
Python
|
ResolvePageSwitcher.py
|
IgorRidanovic/DaVinciResolve-PageSwitcher
|
5a771d8fa319454dbcf986b8921e5fa0c665baa9
|
[
"MIT"
] | 17
|
2018-06-01T07:30:33.000Z
|
2021-12-22T21:05:29.000Z
|
ResolvePageSwitcher.py
|
IgorRidanovic/DaVinciResolve-PageSwitcher
|
5a771d8fa319454dbcf986b8921e5fa0c665baa9
|
[
"MIT"
] | 2
|
2018-10-23T17:32:45.000Z
|
2020-12-09T07:48:06.000Z
|
ResolvePageSwitcher.py
|
IgorRidanovic/DaVinciResolve-PageSwitcher
|
5a771d8fa319454dbcf986b8921e5fa0c665baa9
|
[
"MIT"
] | 5
|
2018-09-06T02:11:56.000Z
|
2020-10-25T11:25:22.000Z
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# DaVinci Resolve scripting proof of concept. Resolve page external switcher.
# Local or TCP/IP control mode.
# Refer to Resolve V15 public beta 2 scripting API documentation for host setup.
# Copyright 2018 Igor Riđanović, www.hdhead.com
from PyQt4 import QtCore, QtGui
import sys
import socket
# If API module not found assume we're working as a remote control
try:
import DaVinciResolveScript
#Instantiate Resolve object
resolve = DaVinciResolveScript.scriptapp('Resolve')
checkboxState = False
except ImportError:
print 'Resolve API not found.'
checkboxState = True
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName(_fromUtf8('Resolve Page Switcher'))
Form.resize(561, 88)
Form.setStyleSheet(_fromUtf8('background-color: #282828;\
border-color: #555555;\
color: #929292;\
font-size: 13px;'\
))
self.horizontalLayout = QtGui.QHBoxLayout(Form)
self.horizontalLayout.setObjectName(_fromUtf8('horizontalLayout'))
self.mediaButton = QtGui.QPushButton(Form)
self.mediaButton.setObjectName(_fromUtf8('mediaButton'))
self.horizontalLayout.addWidget(self.mediaButton)
self.editButton = QtGui.QPushButton(Form)
self.editButton.setObjectName(_fromUtf8('editButton'))
self.horizontalLayout.addWidget(self.editButton)
self.fusionButton = QtGui.QPushButton(Form)
self.fusionButton.setObjectName(_fromUtf8('fusionButton'))
self.horizontalLayout.addWidget(self.fusionButton)
self.colorButton = QtGui.QPushButton(Form)
self.colorButton.setObjectName(_fromUtf8('colorButton'))
self.horizontalLayout.addWidget(self.colorButton)
self.fairlightButton = QtGui.QPushButton(Form)
self.fairlightButton.setObjectName(_fromUtf8('fairlightButton'))
self.horizontalLayout.addWidget(self.fairlightButton)
self.deliverButton = QtGui.QPushButton(Form)
self.deliverButton.setObjectName(_fromUtf8('deliverButton'))
self.horizontalLayout.addWidget(self.deliverButton)
self.tcpipcheckBox = QtGui.QCheckBox(Form)
self.tcpipcheckBox.setObjectName(_fromUtf8('tcpipcheckBox'))
self.tcpipcheckBox.setChecked(checkboxState)
self.horizontalLayout.addWidget(self.tcpipcheckBox)
self.mediaButton.clicked.connect(lambda: self.pageswitch('media'))
self.editButton.clicked.connect(lambda: self.pageswitch('edit'))
self.fusionButton.clicked.connect(lambda: self.pageswitch('fusion'))
self.colorButton.clicked.connect(lambda: self.pageswitch('color'))
self.fairlightButton.clicked.connect(lambda: self.pageswitch('fairlight'))
self.deliverButton.clicked.connect(lambda: self.pageswitch('deliver'))
self.mediaButton.setStyleSheet(_fromUtf8('background-color: #181818;'))
self.editButton.setStyleSheet(_fromUtf8('background-color: #181818;'))
self.fusionButton.setStyleSheet(_fromUtf8('background-color: #181818;'))
self.colorButton.setStyleSheet(_fromUtf8('background-color: #181818;'))
self.fairlightButton.setStyleSheet(_fromUtf8('background-color: #181818;'))
self.deliverButton.setStyleSheet(_fromUtf8('background-color: #181818;'))
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(_translate('Resolve Page Switcher',\
'Resolve Page Switcher', None))
self.mediaButton.setText(_translate('Form', 'Media', None))
self.editButton.setText(_translate('Form', 'Edit', None))
self.fusionButton.setText(_translate('Form', 'Fusion', None))
self.colorButton.setText(_translate('Form', 'Color', None))
self.fairlightButton.setText(_translate('Form', 'Fairlight', None))
self.deliverButton.setText(_translate('Form', 'Deliver', None))
self.tcpipcheckBox.setText(_translate("Form", "TCP/IP remote", None))
def send(self, message):
s = socket.socket()
try:
s.connect((server, port))
except socket.error:
print 'Server unavailable. Exiting.'
s.send(message)
return s.recv(32)
def pageswitch(self, page):
# Send page name to server to switch remote Resolve's page
if self.tcpipcheckBox.isChecked():
response = self.send(page)
print 'Server echo:', response
# Switch local Resolve's page if API is available
else:
try:
resolve.OpenPage(page)
print 'Switched to', page
except NameError:
print 'Resolve API not found. Run in remote mode instead?'
if __name__ == '__main__':
# Assign server parameters
server = '192.168.1.1'
port = 7779
app = QtGui.QApplication(sys.argv)
Form = QtGui.QWidget()
ui = Ui_Form()
ui.setupUi(Form)
Form.show()
sys.exit(app.exec_())
| 36.91791
| 80
| 0.761472
| 3,703
| 0.748232
| 0
| 0
| 0
| 0
| 0
| 0
| 1,291
| 0.260861
|
a15a0aec2c8adfc46228db42100cded4658cf98f
| 14,022
|
py
|
Python
|
Make Data Files.py
|
micitz/Dune_Aspect_Ratio_XB_Paper
|
25395219886facb3a7e68835e8aae406dbff0b4d
|
[
"MIT"
] | null | null | null |
Make Data Files.py
|
micitz/Dune_Aspect_Ratio_XB_Paper
|
25395219886facb3a7e68835e8aae406dbff0b4d
|
[
"MIT"
] | null | null | null |
Make Data Files.py
|
micitz/Dune_Aspect_Ratio_XB_Paper
|
25395219886facb3a7e68835e8aae406dbff0b4d
|
[
"MIT"
] | null | null | null |
"""
All the data sources are scattered around the D drive, this script
organizes it and consolidates it into the "Data" subfolder in the
"Chapter 2 Dune Aspect Ratio" folder.
Michael Itzkin, 5/6/2020
"""
import shutil as sh
import pandas as pd
import numpy as np
import os
# Set the data directory to save files into
DATA_DIR = os.path.join('..', 'Data')
# Set the directory with most of the XBeach data
XB_DIR = os.path.join('..', '..', 'XBeach Modelling', 'Dune Complexity Experiments')
def bogue_lidar_data():
"""
Load all Bogue Banks morphometrics from 1997-2016
and return a dataframe of aspect ratios and natural
dune volumes
"""
# Set a list of years
years = [1997, 1998, 1999, 2000, 2004, 2005, 2010, 2011, 2014, 2016]
# Set an empty dataframe
morpho = pd.DataFrame()
# Loop through the years and load the data
for year in years:
# Set a path to the data and load
path = os.path.join('..', '..', 'Chapter 1 Sand Fences', 'Data', f'Morphometrics for Bogue {year}.csv')
temp = pd.read_csv(path, delimiter=',', header=0)
# Add a column for the year
temp['Year'] = year
# Append the data to the main dataframe
morpho = pd.concat([morpho, temp])
# Make a new dataframe with just aspect ratios and volumes
data = pd.DataFrame()
data['Year'] = morpho['Year']
data['Ratio'] = (morpho['y_crest'] - morpho['y_toe']) / (morpho['x_heel'] - morpho['x_toe'])
data['Volume'] = morpho['Natural Dune Volume']
# Save the Dataframe to the data folder
save_name = os.path.join(DATA_DIR, 'Bogue Banks Volumes and Aspect Ratios.csv')
data.to_csv(save_name, index=False)
print(f'File Saved: {save_name}')
def initial_profiles():
"""
Take all the initial profiles and place them
into a Dataframe to save as a .csv
Make a column for the experiment names, a column for
the X-grids, and columns for the profiles
"""
# Set the experiment names. The initial profiles are the same regardless of
# the surge level so just take from the half surge simulations
experiments = ['Toes Joined', 'Crests Joined', 'Heels Joined', 'Fenced']
# Set an empty dataframe
profiles = pd.DataFrame()
# Loop through the experiments
for experiment in experiments:
# Set a path to the profiles
PROFILE_DIR = os.path.join(XB_DIR, f'{experiment} Half Surge')
# Load the x-grid
x_grid_fname = os.path.join(PROFILE_DIR, 'Dune Complexity 1 1', 'x.grd')
x_grid = np.loadtxt(x_grid_fname)
# Load the dunes
dune_1 = np.loadtxt(fname=os.path.join(PROFILE_DIR, 'Dune Complexity 1 1', 'bed.dep'))
dune_2 = np.loadtxt(fname=os.path.join(PROFILE_DIR, 'Dune Complexity 20 1', 'bed.dep'))
dune_3 = np.loadtxt(fname=os.path.join(PROFILE_DIR, 'Dune Complexity 40 1', 'bed.dep'))
dune_4 = np.loadtxt(fname=os.path.join(PROFILE_DIR, 'Dune Complexity 60 1', 'bed.dep'))
dune_5 = np.loadtxt(fname=os.path.join(PROFILE_DIR, 'Dune Complexity -20 1', 'bed.dep'))
dune_6 = np.loadtxt(fname=os.path.join(PROFILE_DIR, 'Dune Complexity -40 1', 'bed.dep'))
dune_7 = np.loadtxt(fname=os.path.join(PROFILE_DIR, 'Dune Complexity -60 1', 'bed.dep'))
# Put all of the stretched dunes into a dataframe
dune_dict = {
'Experiment': experiment.replace('Joined', 'Aligned'),
'X': x_grid,
'1 pct': dune_1,
'20 pct': dune_2,
'40 pct': dune_3,
'60 pct': dune_4,
'-20 pct': dune_5,
'-40 pct': dune_6,
'-60 pct': dune_7,
}
dune_data = pd.DataFrame(data=dune_dict)
# Concatenate the Dataframes
profiles = pd.concat([profiles, dune_data])
# Save the Dataframe to the data folder
save_name = os.path.join(DATA_DIR, 'Initial Profiles.csv')
profiles.to_csv(save_name, index=False)
print(f'File Saved: {save_name}')
def initial_ratios():
"""
Make a .csv file with the initial dune aspect ratios and
dune volumes for the profiles used in the simulations
"""
# Set the experiment names. The initial profiles are the same regardless of
# the surge level so just take from the half surge simulations
experiments = ['Toes Joined', 'Crests Joined', 'Heels Joined', 'Fenced']
# Set an empty dataframe
ratios = pd.DataFrame()
# Loop through the experiments
for experiment in experiments:
# Load the initial dune ratios
init_ratio_fname = os.path.join(XB_DIR, f'{experiment} Half Surge', 'Setup Data', 'Initial Dune Ratios.csv')
init_ratios = pd.read_csv(init_ratio_fname, delimiter=',', header=None, names=['Stretch', 'Ratio', 'Volume'])
# Add a column for the experiment name
init_ratios['Experiment'] = experiment.replace('Joined', 'Aligned')
# Concatenate the data
ratios = pd.concat([ratios, init_ratios])
# Save the Dataframe to the data folder
save_name = os.path.join(DATA_DIR, 'Initial Dune Ratios.csv')
ratios.to_csv(save_name, index=False)
print(f'File Saved: {save_name}')
def joaquin_and_florence():
"""
Load the storm surge time series' from
Tropical Storm Joaquin and Hurricane
Florence, put them in a .csv file
"""
# Loop through the storms
for storm in ['Joaquin', 'Florence']:
# Load the tide predictions and observations as a Pandas dataframe
filename = os.path.join(XB_DIR, 'Setup Data', f'{storm}.csv')
if storm == 'Joaquin':
parse_dates_cols = ['Date', 'Time']
data_columns = ['Time', 'Predicted', 'Observed']
else:
parse_dates_cols = ['Date', 'Time (GMT)']
data_columns = ['Time', 'Predicted', 'Preliminary', 'Observed']
data = pd.read_csv(filename, delimiter=',', parse_dates=[parse_dates_cols], header=0)
data.columns = data_columns
# Calculate the non-tidal residual
data['NTR'] = data['Observed'] - data['Predicted']
# Load the time data
times = data['Time'].tolist()
data['String Times'] = [t.strftime('%Y-%m-%d %H') for t in times]
# Save the DataFrame as a .csv
save_name = os.path.join(DATA_DIR, f'{storm}.csv')
data.to_csv(save_name, index=False)
def move_csv_output():
"""
Take the .csv files and move them into the "Data" folder,
then rename them from "xboutput.nc" to the name of the simulation
"""
# Set lists with the dune configurations, storm surge
# modifications, storm duration increases, and dune aspect
# ratio stretches
dunes = ['Toes Joined', 'Crests Joined', 'Heels Joined', 'Fenced']
surges = ['Half', 'Normal', 'One Half']
durations = [1, 12, 18, 24, 36, 48]
stretches = [-60, -40, -20, 1, 20, 40, 60]
# Loop through the dunes and surges
for dune in dunes:
for surge in surges:
# Set the experiment folder name
experiment_name = f'{dune} {surge} Surge'
experiment_folder = os.path.join(XB_DIR, experiment_name)
# Make a target folder to move the runs into
save_folder = os.path.join(DATA_DIR, 'XBeach Morphometrics', experiment_name)
if not os.path.exists(save_folder):
os.mkdir(save_folder)
# Loop through the dunes and durations within the experiment
for stretch in stretches:
for duration in durations:
# Set the simulation folder
run_name = f'Dune Complexity {stretch} {duration}'
simulation_folder = os.path.join(experiment_folder, run_name)
# Set the XBeach output file as the source. Set the destination
# name. Then copy the file over
source = os.path.join(simulation_folder, f'{run_name} Morphometrics.csv')
if os.path.exists(source):
destination = os.path.join(save_folder, f'{run_name} Morphometrics.csv')
if not os.path.exists(destination):
sh.copy(source, destination)
print(f'File Successfully Copied: {destination}')
else:
print(f'File already exists: {destination}')
else:
print(f'FILE DOES NOT EXIST: {source}')
def move_field_data():
"""
Move the field data morphometrics from 2017
and 2018 into the data folder
"""
# Set the years
years = [2017, 2018]
# Set a path to the field data
field_dir = os.path.join('..', '..', 'Bogue Banks Field Data')
# Loop through the years
for year in years:
# Identify the source file
source = os.path.join(field_dir, str(year), f'Morphometrics for Bogue Banks {year}.csv')
# Set the target
destination = os.path.join(DATA_DIR, f'Morphometrics for Bogue Banks {year}.csv')
# Copy the file
sh.copy(source, destination)
def move_netcdf_output():
"""
Take the netCDF files and move them into the "Data" folder,
then rename them from "xboutput.nc" to the name of the simulation
"""
# Set lists with the dune configurations, storm surge
# modifications, storm duration increases, and dune aspect
# ratio stretches
dunes = ['Toes Joined', 'Crests Joined', 'Heels Joined', 'Fenced']
surges = ['Half', 'Normal', 'One Half']
durations = [1, 12, 18, 24, 36, 48]
stretches = [-60, -40, -20, 1, 20, 40, 60]
# Loop through the dunes and surges
for dune in dunes:
for surge in surges:
# Set the experiment folder name
experiment_name = f'{dune} {surge} Surge'
experiment_folder = os.path.join(XB_DIR, experiment_name)
# Make a target folder to move the runs into
save_folder = os.path.join(DATA_DIR, 'XBeach Output', experiment_name)
if not os.path.exists(save_folder):
os.mkdir(save_folder)
# Loop through the dunes and durations within the experiment
for stretch in stretches:
for duration in durations:
# Set the simulation folder
run_name = f'Dune Complexity {stretch} {duration}'
simulation_folder = os.path.join(experiment_folder, run_name)
# Set the XBeach output file as the source. Set the destination
# name. Then copy the file over
source = os.path.join(simulation_folder, 'xboutput.nc')
if os.path.exists(source):
destination = os.path.join(save_folder, f'{run_name}.nc')
if not os.path.exists(destination):
sh.copy(source, destination)
print(f'File Successfully Copied: {destination}')
else:
print(f'File already exists: {destination}')
else:
print(f'FILE DOES NOT EXIST: {source}')
def surge_time_series():
"""
Put all the storm time series' into
a .csv file that can be loaded as a
DataFrame
"""
# Set a list of storm surge modifiers
# and storm duration increases
surges, surge_labels = [0.5, 1.0, 1.5], ['Half', 'Normal', 'One Half']
durations = [1, 12, 18, 24, 36, 48]
# Make an empty DataFrame to loop into
surge_df = pd.DataFrame()
# Loop through the surges
for surge, label in zip(surges, surge_labels):
# Loop through the durations
for duration in durations:
# The DataFrame won't work if the columns are different
# lengths so place them all in a preset 125 "hour" long
# array so that they'll fit in the DataFrame
time_series = np.full((1, 125), fill_value=np.nan)[0]
# Load the data and place it in the time series NaN array
filename = os.path.join(XB_DIR, f'Toes Joined {label} Surge', f'Dune Complexity 1 {duration}', 'ntr.txt')
ntr = np.genfromtxt(filename, dtype=np.float32)
time_series[:len(ntr)] = ntr
# Place the time series in the dict
surge_df[f'{label} {duration}'] = time_series
# Save the DataFrame as a .csv file
save_name = os.path.join(DATA_DIR, 'Storm Surge Time Series.csv')
surge_df.to_csv(save_name, index=False)
def main():
"""
Main program function to consolidate all the
data sources
"""
# Make a .csv file with the initial profiles used
# initial_profiles()
# Make a .csv file with the initial dune ratios
# initial_ratios()
# Make a .csv file with all the natural dune volumes
# and aspect ratios measured from Bogue Banks LiDAR
# bogue_lidar_data()
# Make a .csv file with the storm surge time
# series' for all the model runs
# surge_time_series()
# Make a .csv file with storm surge data
# for Tropical Storm Joaquin and Hurricane Florence
# joaquin_and_florence()
# Move the netCDF output files into the Data folder
# and rename them for the run name. Move the .csv
# files with the morphometrics from the runs too
# move_csv_output()
# move_netcdf_output()
# Move the Bogue Banks field data morphometrics
# from 2017 and 2018 into the data folder
move_field_data()
if __name__ == '__main__':
main()
| 36.80315
| 118
| 0.597276
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 6,984
| 0.498074
|
a15ae079911483a5e3b82012f76254443eb7a059
| 339
|
py
|
Python
|
counter-test-applications/lr100000/linear-regr-100k.py
|
EsperLiu/vPython
|
f1005f011d6d9fd079cf72e8f78bab6d95a9f993
|
[
"0BSD"
] | 1
|
2021-11-21T03:31:32.000Z
|
2021-11-21T03:31:32.000Z
|
counter-test-applications/lr100000/linear-regr-100k.py
|
EsperLiu/vPython
|
f1005f011d6d9fd079cf72e8f78bab6d95a9f993
|
[
"0BSD"
] | null | null | null |
counter-test-applications/lr100000/linear-regr-100k.py
|
EsperLiu/vPython
|
f1005f011d6d9fd079cf72e8f78bab6d95a9f993
|
[
"0BSD"
] | 1
|
2021-11-28T05:57:55.000Z
|
2021-11-28T05:57:55.000Z
|
def end_of_import():
return 0
def end_of_init():
return 0
def end_of_computing():
return 0
import numpy as np
from sklearn.linear_model import LinearRegression
end_of_import()
X = np.array(range(0,100000)).reshape(-1, 1)
# y = 2x + 3
y = np.dot(X, 2) + 3
end_of_init()
reg = LinearRegression().fit(X, y)
end_of_computing()
| 16.95
| 49
| 0.696165
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 12
| 0.035398
|
a15b3e54d6303597b66c9ac9aa7e5fefcc34013d
| 262
|
py
|
Python
|
python/bitcoin/ch04/04_08.py
|
gangserver/py_test
|
869bdfa5c94c3b6a15b87e0c3de6b2cdaca821f4
|
[
"Apache-2.0"
] | null | null | null |
python/bitcoin/ch04/04_08.py
|
gangserver/py_test
|
869bdfa5c94c3b6a15b87e0c3de6b2cdaca821f4
|
[
"Apache-2.0"
] | null | null | null |
python/bitcoin/ch04/04_08.py
|
gangserver/py_test
|
869bdfa5c94c3b6a15b87e0c3de6b2cdaca821f4
|
[
"Apache-2.0"
] | null | null | null |
import requests
url = "https://api.korbit.co.kr/v1/ticker/detailed?currency_pair=btc_krw"
r = requests.get(url)
bitcoin = r.json()
print(bitcoin)
print(type(bitcoin))
print(bitcoin['last'])
print(bitcoin['bid'])
print(bitcoin['ask'])
print(bitcoin['volume'])
| 18.714286
| 73
| 0.725191
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 91
| 0.347328
|
a15b9e2b4f9954059a9f62e3b0c43fda6866814f
| 3,938
|
py
|
Python
|
jackselect/indicator.py
|
SpotlightKid/jack-select
|
acb6cfa5a48846fa7640373d4976d4df1ab0bbd7
|
[
"MIT"
] | 12
|
2016-03-30T18:32:35.000Z
|
2022-01-18T21:12:51.000Z
|
jackselect/indicator.py
|
SpotlightKid/jack-select
|
acb6cfa5a48846fa7640373d4976d4df1ab0bbd7
|
[
"MIT"
] | 8
|
2018-09-03T15:26:51.000Z
|
2020-04-20T14:44:00.000Z
|
jackselect/indicator.py
|
SpotlightKid/jack-select
|
acb6cfa5a48846fa7640373d4976d4df1ab0bbd7
|
[
"MIT"
] | null | null | null |
"""A convenience class for a GTK 3 system tray indicator."""
from pkg_resources import resource_filename
import gi
gi.require_version('Gtk', '3.0') # noqa
from gi.repository import Gtk
from gi.repository.GdkPixbuf import Pixbuf
class Indicator:
"""This class defines a standard GTK3 system tray indicator.
Class Indicator can be easily reused in any other project.
"""
def __init__(self, icon, title=None):
"""Create indicator icon and add menu.
Args:
icon (str): path to initial icon that will be shown on system panel
"""
self._icon_cache = {}
self.icon = Gtk.StatusIcon.new_from_pixbuf(self._get_icon(icon))
self.menu = Gtk.Menu()
self.icon.connect('activate', self.on_popup_menu_open)
self.icon.connect('popup-menu', self.on_popup_menu_open)
if title:
self.icon.set_title(title)
def _get_icon(self, icon):
"""Return icon from package as GdkPixbuf.Pixbuf.
Extracts the image from package to a file, stores it in the icon cache
if it's not in there yet and returns it. Otherwise just returns the
image stored in the cache.
"""
if icon not in self._icon_cache:
filename = resource_filename(__name__, "images/%s" % icon)
self._icon_cache[icon] = Pixbuf.new_from_file(filename)
return self._icon_cache[icon]
def set_icon(self, icon):
"""Set new icon in system tray.
Args:
icon (str): path to file with new icon
"""
self.icon.set_from_pixbuf(self._get_icon(icon))
def set_tooltip(self, callback):
self.icon.set_has_tooltip(True)
self.icon.connect("query-tooltip", callback)
def clear_menu(self):
"""Clear all entries from the main menu."""
self.menu = Gtk.Menu()
def add_menu_item(self, command=None, title=None, icon=None, enabled=True, is_check=False,
active=False, menu=None, data=None):
"""Add mouse right click menu item.
Args:
command (callable): function that will be called after left mouse
click on title
title (str): label that will be shown in menu
icon (str): name of icon stored in application package
active (bool): whether the menu entry can be activated (default: True)
data (obj): arbitrary data to associate with the menu entry
"""
if icon:
m_item = Gtk.ImageMenuItem(title)
image = Gtk.Image.new_from_pixbuf(self._get_icon(icon))
m_item.set_image(image)
elif is_check:
m_item = Gtk.CheckMenuItem(title)
m_item.set_active(active)
else:
m_item = Gtk.MenuItem(title)
if command:
m_item.connect('toggled' if is_check else 'activate', command)
m_item.set_sensitive(enabled)
m_item.data = data
if menu:
menu.append(m_item)
else:
self.menu.append(m_item)
return m_item
def add_submenu(self, title):
"""Add a sub menu popup menu."""
submenu = Gtk.Menu()
m_item = Gtk.MenuItem(title)
m_item.set_submenu(submenu)
self.menu.append(m_item)
return submenu
def add_separator(self):
"""Add separator between labels in the popup menu."""
m_item = Gtk.SeparatorMenuItem()
self.menu.append(m_item)
def on_popup_menu_open(self, widget=None, button=None, *args):
"""Some action requested opening the popup menu."""
self.menu.popup(None, None, Gtk.StatusIcon.position_menu,
widget or self.icon, button or 1,
Gtk.get_current_event_time())
def on_popup_menu_close(self, widget=None, button=None, *args):
"""Some action requested closing the popup menu."""
self.menu.popdown()
| 32.278689
| 94
| 0.61935
| 3,704
| 0.940579
| 0
| 0
| 0
| 0
| 0
| 0
| 1,446
| 0.367191
|
a15c583b91868493579d97f1c0cb3471ef7cba0e
| 442
|
py
|
Python
|
myaxf/migrations/0011_minebtns_is_used.py
|
Pyrans/test1806
|
1afc62e09bbebf74521b4b6fdafde8eeaa260ed9
|
[
"Apache-2.0"
] | null | null | null |
myaxf/migrations/0011_minebtns_is_used.py
|
Pyrans/test1806
|
1afc62e09bbebf74521b4b6fdafde8eeaa260ed9
|
[
"Apache-2.0"
] | null | null | null |
myaxf/migrations/0011_minebtns_is_used.py
|
Pyrans/test1806
|
1afc62e09bbebf74521b4b6fdafde8eeaa260ed9
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2018-11-06 01:54
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('myaxf', '0010_minebtns'),
]
operations = [
migrations.AddField(
model_name='minebtns',
name='is_used',
field=models.BooleanField(default=True),
),
]
| 21.047619
| 52
| 0.608597
| 284
| 0.642534
| 0
| 0
| 0
| 0
| 0
| 0
| 112
| 0.253394
|
a15d304cf1b066b2781b604c9736d8b3d3f4ed26
| 3,342
|
py
|
Python
|
components/PyTorch/pytorch-kfp-components/setup.py
|
nostro-im/pipelines
|
39f5b6b74040abbf4b764cbd5b422d7548723d9e
|
[
"Apache-2.0"
] | 2,860
|
2018-05-24T04:55:01.000Z
|
2022-03-31T13:49:56.000Z
|
components/PyTorch/pytorch-kfp-components/setup.py
|
nostro-im/pipelines
|
39f5b6b74040abbf4b764cbd5b422d7548723d9e
|
[
"Apache-2.0"
] | 7,331
|
2018-05-16T09:03:26.000Z
|
2022-03-31T23:22:04.000Z
|
components/PyTorch/pytorch-kfp-components/setup.py
|
nostro-im/pipelines
|
39f5b6b74040abbf4b764cbd5b422d7548723d9e
|
[
"Apache-2.0"
] | 1,359
|
2018-05-15T11:05:41.000Z
|
2022-03-31T09:42:09.000Z
|
#!/usr/bin/env/python3
#
# Copyright (c) Facebook, Inc. and its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setup script."""
import importlib
import os
import types
from setuptools import setup, find_packages
def make_required_install_packages():
return [
"pytorch-lightning>=1.4.0",
"torch>=1.7.1",
"torch-model-archiver",
]
def make_required_test_packages():
return make_required_install_packages() + [
"mock>=4.0.0",
"flake8>=3.0.0",
"pylint",
"pytest>=6.0.0",
"wget",
"pandas",
"minio"
]
def make_dependency_links():
return []
def detect_version(base_path):
loader = importlib.machinery.SourceFileLoader(
fullname="version",
path=os.path.join(base_path,
"pytorch_kfp_components/__init__.py"),
)
version = types.ModuleType(loader.name)
loader.exec_module(version)
return version.__version__
if __name__ == "__main__":
relative_directory = os.path.relpath(
os.path.dirname(os.path.abspath(__file__)))
version = detect_version(relative_directory)
setup(
name="pytorch-kfp-components",
version=version,
description="PyTorch Kubeflow Pipeline",
url="https://github.com/kubeflow/pipelines/tree/master/components/PyTorch/pytorch-kfp-components/",
author="The PyTorch Kubeflow Pipeline Components authors",
author_email="pytorch-kfp-components@fb.com",
license="Apache License 2.0",
extra_requires={"tests": make_required_test_packages()},
include_package_data=True,
python_requires=">=3.6",
install_requires=make_required_install_packages(),
dependency_links=make_dependency_links(),
keywords=[
"Kubeflow Pipelines",
"KFP",
"ML workflow",
"PyTorch",
],
classifiers=[
"Development Status :: 4 - Beta",
"Operating System :: Unix",
"Operating System :: MacOS",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3 :: Only",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Software Development",
"Topic :: Software Development :: Libraries",
"Topic :: Software Development :: Libraries :: Python Modules",
],
package_dir={
"pytorch_kfp_components":
os.path.join(relative_directory, "pytorch_kfp_components")
},
packages=find_packages(where=relative_directory),
)
| 31.528302
| 107
| 0.635548
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,684
| 0.50389
|
a15d6cd6a92c370d9583f2a5012f9737df67a02a
| 10,453
|
py
|
Python
|
generate_pipelines.py
|
phorne-uncharted/d3m-primitives
|
77d900b9dd6ab4b2b330f4e969dabcdc419c73e1
|
[
"MIT"
] | null | null | null |
generate_pipelines.py
|
phorne-uncharted/d3m-primitives
|
77d900b9dd6ab4b2b330f4e969dabcdc419c73e1
|
[
"MIT"
] | null | null | null |
generate_pipelines.py
|
phorne-uncharted/d3m-primitives
|
77d900b9dd6ab4b2b330f4e969dabcdc419c73e1
|
[
"MIT"
] | null | null | null |
"""
Utility to get generate all submission pipelines for all primitives.
This script assumes that `generate_annotations.py` has already been run.
"""
import os
import subprocess
import shutil
import fire
from kf_d3m_primitives.data_preprocessing.data_cleaning.data_cleaning_pipeline import DataCleaningPipeline
from kf_d3m_primitives.data_preprocessing.text_summarization.duke_pipeline import DukePipeline
from kf_d3m_primitives.data_preprocessing.geocoding_forward.goat_forward_pipeline import GoatForwardPipeline
from kf_d3m_primitives.data_preprocessing.geocoding_reverse.goat_reverse_pipeline import GoatReversePipeline
from kf_d3m_primitives.data_preprocessing.data_typing.simon_pipeline import SimonPipeline
from kf_d3m_primitives.clustering.spectral_clustering.spectral_clustering_pipeline import SpectralClusteringPipeline
from kf_d3m_primitives.clustering.k_means.storc_pipeline import StorcPipeline
from kf_d3m_primitives.clustering.hdbscan.hdbscan_pipeline import HdbscanPipeline
from kf_d3m_primitives.dimensionality_reduction.tsne.tsne_pipeline import TsnePipeline
from kf_d3m_primitives.feature_selection.pca_features.pca_features_pipeline import PcaFeaturesPipeline
from kf_d3m_primitives.feature_selection.rf_features.rf_features_pipeline import RfFeaturesPipeline
from kf_d3m_primitives.natural_language_processing.sent2vec.sent2vec_pipeline import Sent2VecPipeline
from kf_d3m_primitives.object_detection.retinanet.object_detection_retinanet_pipeline import ObjectDetectionRNPipeline
from kf_d3m_primitives.image_classification.imagenet_transfer_learning.gator_pipeline import GatorPipeline
from kf_d3m_primitives.ts_classification.knn.kanine_pipeline import KaninePipeline
from kf_d3m_primitives.ts_classification.lstm_fcn.lstm_fcn_pipeline import LstmFcnPipeline
from kf_d3m_primitives.ts_forecasting.vector_autoregression.var_pipeline import VarPipeline
from kf_d3m_primitives.ts_forecasting.deep_ar.deepar_pipeline import DeepARPipeline
from kf_d3m_primitives.ts_forecasting.nbeats.nbeats_pipeline import NBEATSPipeline
from kf_d3m_primitives.remote_sensing.classifier.mlp_classifier_pipeline import MlpClassifierPipeline
def generate_pipelines(gpu = False):
gpu_prims = [
"d3m.primitives.classification.inceptionV3_image_feature.Gator",
"d3m.primitives.object_detection.retina_net.ObjectDetectionRN",
"d3m.primitives.time_series_classification.convolutional_neural_net.LSTM_FCN",
"d3m.primitives.feature_extraction.nk_sent2vec.Sent2Vec",
"d3m.primitives.remote_sensing.mlp.MlpClassifier"
]
prims_to_pipelines = {
"d3m.primitives.data_cleaning.column_type_profiler.Simon": [
(SimonPipeline(), ('185_baseball_MIN_METADATA',))
],
"d3m.primitives.data_cleaning.geocoding.Goat_forward": [
(GoatForwardPipeline(), ('LL0_acled_reduced_MIN_METADATA',))
],
"d3m.primitives.data_cleaning.geocoding.Goat_reverse": [
(GoatReversePipeline(), ('LL0_acled_reduced_MIN_METADATA',))
],
"d3m.primitives.feature_extraction.nk_sent2vec.Sent2Vec": [
(Sent2VecPipeline(), ('LL1_TXT_CLS_apple_products_sentiment_MIN_METADATA',))
],
"d3m.primitives.clustering.k_means.Sloth": [
(StorcPipeline(), ('66_chlorineConcentration_MIN_METADATA',))
],
"d3m.primitives.clustering.hdbscan.Hdbscan": [
(HdbscanPipeline(), ('SEMI_1044_eye_movements_MIN_METADATA',))
],
"d3m.primitives.clustering.spectral_graph.SpectralClustering": [
(SpectralClusteringPipeline(), ('SEMI_1044_eye_movements_MIN_METADATA',))
],
"d3m.primitives.dimensionality_reduction.t_distributed_stochastic_neighbor_embedding.Tsne": [
(TsnePipeline(), ('SEMI_1044_eye_movements_MIN_METADATA',))
],
"d3m.primitives.time_series_classification.k_neighbors.Kanine": [
(KaninePipeline(), ('66_chlorineConcentration_MIN_METADATA',))
],
"d3m.primitives.time_series_classification.convolutional_neural_net.LSTM_FCN": [
(LstmFcnPipeline(), (
'66_chlorineConcentration_MIN_METADATA',
"LL1_Adiac_MIN_METADATA",
"LL1_ArrowHead_MIN_METADATA",
"LL1_Cricket_Y_MIN_METADATA",
"LL1_ECG200_MIN_METADATA",
"LL1_ElectricDevices_MIN_METADATA",
"LL1_FISH_MIN_METADATA",
"LL1_FaceFour_MIN_METADATA",
"LL1_HandOutlines_MIN_METADATA",
"LL1_Haptics_MIN_METADATA",
"LL1_ItalyPowerDemand_MIN_METADATA",
"LL1_Meat_MIN_METADATA",
"LL1_OSULeaf_MIN_METADATA",
)),
(LstmFcnPipeline(attention_lstm=True), (
'66_chlorineConcentration_MIN_METADATA',
"LL1_Adiac_MIN_METADATA",
"LL1_ArrowHead_MIN_METADATA",
"LL1_Cricket_Y_MIN_METADATA",
"LL1_ECG200_MIN_METADATA",
"LL1_ElectricDevices_MIN_METADATA",
"LL1_FISH_MIN_METADATA",
"LL1_FaceFour_MIN_METADATA",
"LL1_HandOutlines_MIN_METADATA",
"LL1_Haptics_MIN_METADATA",
"LL1_ItalyPowerDemand_MIN_METADATA",
"LL1_Meat_MIN_METADATA",
"LL1_OSULeaf_MIN_METADATA",
))
],
"d3m.primitives.time_series_forecasting.vector_autoregression.VAR": [
(VarPipeline(), (
'56_sunspots_MIN_METADATA',
'56_sunspots_monthly_MIN_METADATA',
'LL1_736_population_spawn_MIN_METADATA',
'LL1_736_stock_market_MIN_METADATA',
'LL1_terra_canopy_height_long_form_s4_100_MIN_METADATA',
"LL1_terra_canopy_height_long_form_s4_90_MIN_METADATA",
"LL1_terra_canopy_height_long_form_s4_80_MIN_METADATA",
"LL1_terra_canopy_height_long_form_s4_70_MIN_METADATA",
'LL1_terra_leaf_angle_mean_long_form_s4_MIN_METADATA',
'LL1_PHEM_Monthly_Malnutrition_MIN_METADATA',
'LL1_PHEM_weeklyData_malnutrition_MIN_METADATA',
))
],
"d3m.primitives.time_series_forecasting.lstm.DeepAR": [
(DeepARPipeline(prediction_length = 21, context_length = 21), ('56_sunspots_MIN_METADATA',)),
(DeepARPipeline(prediction_length = 38, context_length = 38), ('56_sunspots_monthly_MIN_METADATA',)),
(DeepARPipeline(prediction_length = 60, context_length = 30), ('LL1_736_population_spawn_MIN_METADATA',)),
(DeepARPipeline(prediction_length = 34, context_length = 17), ('LL1_736_stock_market_MIN_METADATA',)),
],
"d3m.primitives.time_series_forecasting.feed_forward_neural_net.NBEATS": [
(NBEATSPipeline(prediction_length = 21), ('56_sunspots_MIN_METADATA',)),
(NBEATSPipeline(prediction_length = 38), ('56_sunspots_monthly_MIN_METADATA',)),
(NBEATSPipeline(prediction_length = 60), ('LL1_736_population_spawn_MIN_METADATA',)),
(NBEATSPipeline(prediction_length = 34), ('LL1_736_stock_market_MIN_METADATA',)),
],
"d3m.primitives.object_detection.retina_net.ObjectDetectionRN": [
(ObjectDetectionRNPipeline(), (
'LL1_tidy_terra_panicle_detection_MIN_METADATA',
'LL1_penn_fudan_pedestrian_MIN_METADATA'
))
],
"d3m.primitives.data_cleaning.data_cleaning.Datacleaning": [
(DataCleaningPipeline(), ('185_baseball_MIN_METADATA',))
],
"d3m.primitives.data_cleaning.text_summarization.Duke": [
(DukePipeline(), ('185_baseball_MIN_METADATA',))
],
"d3m.primitives.feature_selection.pca_features.Pcafeatures": [
(PcaFeaturesPipeline(), ('185_baseball_MIN_METADATA',))
],
"d3m.primitives.feature_selection.rffeatures.Rffeatures": [
(RfFeaturesPipeline(), ('185_baseball_MIN_METADATA',))
],
"d3m.primitives.classification.inceptionV3_image_feature.Gator": [
(GatorPipeline(), (
"124_174_cifar10_MIN_METADATA",
"124_188_usps_MIN_METADATA",
"124_214_coil20_MIN_METADATA",
"uu_101_object_categories_MIN_METADATA",
))
],
"d3m.primitives.remote_sensing.mlp.MlpClassifier": [
(MlpClassifierPipeline(), ('LL1_bigearth_landuse_detection',))
]
}
for primitive, pipelines in prims_to_pipelines.items():
if gpu:
if primitive not in gpu_prims:
continue
else:
if primitive in gpu_prims:
continue
os.chdir(f'/annotations/{primitive}')
os.chdir(os.listdir('.')[0])
if not os.path.isdir('pipelines'):
os.mkdir('pipelines')
else:
[os.remove(f'pipelines/{pipeline}') for pipeline in os.listdir('pipelines')]
if not os.path.isdir('pipeline_runs'):
os.mkdir('pipeline_runs')
else:
[os.remove(f'pipeline_runs/{pipeline_run}') for pipeline_run in os.listdir('pipeline_runs')]
if not os.path.isdir(f'/pipeline_scores/{primitive.split(".")[-1]}'):
os.mkdir(f'/pipeline_scores/{primitive.split(".")[-1]}')
for pipeline, datasets in pipelines:
pipeline.write_pipeline(output_dir = './pipelines')
for dataset in datasets:
print(f'Generating pipeline for {primitive.split(".")[-1]} on {dataset}')
if primitive.split(".")[-1] in ['Duke', 'Sloth']:
pipeline.fit_produce(
dataset,
output_yml_dir = './pipeline_runs',
submission = True
)
else:
if primitive.split(".")[-1] == 'NBEATS':
shutil.rmtree(f'/scratch_dir/nbeats')
pipeline.fit_score(
dataset,
output_yml_dir = './pipeline_runs',
output_score_dir = f'/pipeline_scores/{primitive.split(".")[-1]}',
submission = True
)
os.system('gzip -r pipeline_runs')
if __name__ == '__main__':
fire.Fire(generate_pipelines)
| 50.990244
| 118
| 0.672534
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 4,322
| 0.41347
|
a16015f7fdd109191a18e2ce3c5cc5cd31b338c6
| 210
|
py
|
Python
|
gorynych/ontologies/gch/edges/basic/__init__.py
|
vurmux/gorynych
|
d721e8cdb61f7c7ee6bc4bd31026605df15f2d9d
|
[
"Apache-2.0"
] | null | null | null |
gorynych/ontologies/gch/edges/basic/__init__.py
|
vurmux/gorynych
|
d721e8cdb61f7c7ee6bc4bd31026605df15f2d9d
|
[
"Apache-2.0"
] | null | null | null |
gorynych/ontologies/gch/edges/basic/__init__.py
|
vurmux/gorynych
|
d721e8cdb61f7c7ee6bc4bd31026605df15f2d9d
|
[
"Apache-2.0"
] | null | null | null |
__all__ = [
"aggregation",
"association",
"composition",
"connection",
"containment",
"dependency",
"includes",
"membership",
"ownership",
"responsibility",
"usage"
]
| 16.153846
| 21
| 0.557143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 132
| 0.628571
|
a162116929e58d2ceb5db3d4712dce3ef830f40a
| 3,851
|
py
|
Python
|
square.py
|
chriswilson1982/black-and-white
|
e275e6f534aa51f12f4545730b627ce280aae8c3
|
[
"MIT"
] | null | null | null |
square.py
|
chriswilson1982/black-and-white
|
e275e6f534aa51f12f4545730b627ce280aae8c3
|
[
"MIT"
] | null | null | null |
square.py
|
chriswilson1982/black-and-white
|
e275e6f534aa51f12f4545730b627ce280aae8c3
|
[
"MIT"
] | 2
|
2020-06-05T04:37:08.000Z
|
2020-09-30T06:15:22.000Z
|
# coding: utf-8
"""Square module.
Represents the squares on the game grid.
"""
from scene import *
from common import *
import sound
class Square (SpriteNode):
"""Represents the squares on the game grid.
Main properties are their row and column (used for path calculation) and state (corresponds to color in the game).
"""
def __init__(self, row, col, position, size, state, color):
self.row = row
self.col = col
self.position = position
self.size = size
self.color = color
self.z_position = 0.2
self.state = state
self.last_state = state
self.press = False
self.star = False
if self.state == 1:
self.color = color1
if self.state == 2:
self.color = color2
def set_color(self):
self.color = all_colors[self.state - 1]
# Find neighbouring white squares
def white_neighbours(self, square_list):
white_neighbours = []
for s in square_list:
if (((s.row == self.row - 1) and (s.col == self.col)) or ((s.row == self.row + 1) and (s.col == self.col)) or ((s.row == self.row) and (s.col == self.col - 1)) or ((s.row == self.row) and (s.col == self.col + 1))) and s.state == 2:
white_neighbours.append(s)
return white_neighbours
# Find squares to toggle when square pressed
def toggle_neighbours(self, squares):
for square in squares:
if square.row >= self.row - 1 and square.row <= self.row + 1 and square.col >= self.col - 1 and square.col <= self.col + 1 and not (square.row == self.row and square.col == self.col) and (square.state == 1 or square.state == 2):
square.toggle()
# Square pressed
def pressed(self):
# If power-up 3 active
if self.parent.can_flip:
self.toggle()
sound.play_effect(reds_away)
return
# State saved so power-up 2 can unlock
self.last_state = self.state
self.press = True
self.z_position = 0.3
self.run_action(pressed_action_1)
self.state = 0
self.color = color3
# Bonus star destroyed if star square pressed
if self.star:
self.star = False
self.parent.star_square = None
self.star_icon.run_action(
A.sequence(A.scale_to(0, 0.5), A.remove()))
sound.play_effect(star_away_sound)
self.parent.level_label.text = "Goodbye star!"
else:
sound.play_effect(tap_sound)
# Square toggles between black and white
def toggle(self):
# Ignore if square already pressed
if self.state == 0:
return
if self.rotation == 0:
self.run_action(toggle_action_1)
else:
self.run_action(toggle_action_2)
if self.state == 1:
self.state = 2
self.color = color2
elif self.state == 2:
self.state = 1
self.color = color1
if self.star:
self.go_star()
self.scene.sparkle(self.color, self.position, image='shp:RoundRect', spread=square_size, n=2)
# Creates star icon if this square is the randomly selected star square
def go_star(self):
# Remove star icon first, if it exists
try:
self.star_icon.run_action(A.remove())
except:
pass
self.star = True
# Star icon depends on square color
if self.state == 1:
tex = Texture('typw:Star')
elif self.state == 2:
tex = Texture('typb:Star')
self.star_icon = SpriteNode(
texture=tex, position=self.position, size=(square_size - 5, square_size - 5))
self.star_icon.z_position = 0.6
self.parent.add_child(self.star_icon)
| 33.780702
| 243
| 0.578811
| 3,714
| 0.964425
| 0
| 0
| 0
| 0
| 0
| 0
| 717
| 0.186185
|
a1635f7424a1cd00dce9eb1d4e2acface083e3bd
| 1,128
|
py
|
Python
|
coocurrence_loader.py
|
miselico/KGlove
|
2bcbce3d14ed5173a319d80bfff95be6486b41e2
|
[
"MIT"
] | 2
|
2021-11-05T09:27:57.000Z
|
2022-02-25T12:33:14.000Z
|
coocurrence_loader.py
|
miselico/KGlove
|
2bcbce3d14ed5173a319d80bfff95be6486b41e2
|
[
"MIT"
] | null | null | null |
coocurrence_loader.py
|
miselico/KGlove
|
2bcbce3d14ed5173a319d80bfff95be6486b41e2
|
[
"MIT"
] | 1
|
2022-02-25T12:37:47.000Z
|
2022-02-25T12:37:47.000Z
|
import pathlib
from struct import unpack
from typing import BinaryIO, List, Optional, Tuple, cast
import numpy as np
import scipy.sparse
def _read_little_endian_crec(file: BinaryIO
) -> Optional[Tuple[int, int, float]]:
le_int = file.read(16)
# https://docs.python.org/3/library/struct.html#format-strings
if len(le_int) == 0:
return None
crec = cast(Tuple[int, int, float], unpack('<iid', le_int))
return crec
def load(cooccurrence_file_content: BinaryIO) -> scipy.sparse.coo_matrix:
row: List[int] = []
column: List[int] = []
data: List[float] = []
while (cooccurrence_file_content.readable()):
crec = _read_little_endian_crec(cooccurrence_file_content)
if crec is None:
break
row.append(crec[0])
column.append(crec[1])
data.append(crec[2])
result = scipy.sparse.coo_matrix((data, (row, column)), dtype=np.float64)
return result
if __name__ == "__main__":
p = pathlib.Path("output/cooccurrence_file.bin")
with open(p, 'rb') as file:
m = load(file)
print(m.tocsc())
| 28.2
| 77
| 0.639184
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 112
| 0.099291
|
a163e601ea9b0587f0a7996da2ea54d7b047cc87
| 597
|
py
|
Python
|
api_app/migrations/0001_initial.py
|
DurkinDevelopment/coinbase_api
|
0cea72234d481d09ff906f7bc064cfe16111c785
|
[
"MIT"
] | null | null | null |
api_app/migrations/0001_initial.py
|
DurkinDevelopment/coinbase_api
|
0cea72234d481d09ff906f7bc064cfe16111c785
|
[
"MIT"
] | null | null | null |
api_app/migrations/0001_initial.py
|
DurkinDevelopment/coinbase_api
|
0cea72234d481d09ff906f7bc064cfe16111c785
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.12 on 2022-02-15 02:57
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='SpotPrice',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('currency', models.CharField(max_length=200)),
('amount', models.FloatField()),
('timestamp', models.DateField()),
],
),
]
| 24.875
| 117
| 0.562814
| 503
| 0.842546
| 0
| 0
| 0
| 0
| 0
| 0
| 96
| 0.160804
|
a163f9dace925925161f417c4fc2f6f13d99f9d2
| 924
|
py
|
Python
|
Kalender/views.py
|
RamonvdW/nhb-apps
|
5a9f840bfe066cd964174515c06b806a7b170c69
|
[
"BSD-3-Clause-Clear"
] | 1
|
2021-12-22T13:11:12.000Z
|
2021-12-22T13:11:12.000Z
|
Kalender/views.py
|
RamonvdW/nhb-apps
|
5a9f840bfe066cd964174515c06b806a7b170c69
|
[
"BSD-3-Clause-Clear"
] | 9
|
2020-10-28T07:07:05.000Z
|
2021-06-28T20:05:37.000Z
|
Kalender/views.py
|
RamonvdW/nhb-apps
|
5a9f840bfe066cd964174515c06b806a7b170c69
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2021 Ramon van der Winkel.
# All rights reserved.
# Licensed under BSD-3-Clause-Clear. See LICENSE file for details.
from django.views.generic import View
from django.urls import reverse
from django.http import HttpResponseRedirect
from Functie.rol import Rollen, rol_get_huidige
from .view_maand import get_url_huidige_maand
class KalenderLandingPageView(View):
""" Deze pagina is puur voor het doorsturen naar een van de andere pagina's
afhankelijk van de gekozen rol.
"""
@staticmethod
def get(request, *args, **kwargs):
rol_nu = rol_get_huidige(request)
if rol_nu == Rollen.ROL_BB:
url = reverse('Kalender:manager')
elif rol_nu == Rollen.ROL_HWL:
url = reverse('Kalender:vereniging')
else:
url = get_url_huidige_maand()
return HttpResponseRedirect(url)
# end of file
| 26.4
| 79
| 0.683983
| 534
| 0.577922
| 0
| 0
| 365
| 0.395022
| 0
| 0
| 331
| 0.358225
|
a166142b9f7a87deb268c549d8183c79b3298038
| 9,511
|
py
|
Python
|
profile.py
|
giswqs/Depression-filling-1D
|
3c0ed86bbbe6f0b8573212a3efd59375dc7be45e
|
[
"MIT"
] | 1
|
2022-02-27T14:40:00.000Z
|
2022-02-27T14:40:00.000Z
|
profile.py
|
giswqs/Depression-filling-1D
|
3c0ed86bbbe6f0b8573212a3efd59375dc7be45e
|
[
"MIT"
] | null | null | null |
profile.py
|
giswqs/Depression-filling-1D
|
3c0ed86bbbe6f0b8573212a3efd59375dc7be45e
|
[
"MIT"
] | null | null | null |
import os
import numpy as np
# class for depression
class Depression:
def __init__(self, id, width, depth, area, pour_elev, min_elev, points, internal_pts):
self.id = id
self.width = width
self.depth = depth
self.area = area
self.pour_elev = pour_elev
self.min_elev = min_elev
self.points = points
self.internal_pts = internal_pts
# read profile values from CSV
def read_csv(in_csv, header = True, col_index = 1):
with open(in_csv) as f:
lines = f.readlines()
if header:
lines = lines[1:]
values = []
for line in lines:
line = line.strip()
value = line.split(",")[col_index - 1]
values.append(float(value))
return values
def write_csv(in_csv, out_csv, col_name, in_values):
with open(in_csv) as f:
lines = f.readlines()
header = lines[0].strip() + "," + col_name + '\n'
lines.pop(0)
out_lines = []
for index, line in enumerate(lines):
line = line.strip()
line = line + ',' + str(in_values[index]) +'\n'
out_lines.append(line)
with open(out_csv, 'w') as ff:
ff.write(header)
ff.writelines(out_lines)
# check the depression type of a point based on its neighbors
def check_dep_type(value_list, index):
if index == 0:
if value_list[0] <= value_list[1]:
return 'ascending'
else:
return 'descending'
elif index == (len(value_list) - 1):
if value_list[len(value_list) - 2] >= value_list[len(value_list) - 1]:
return 'descending'
else:
return 'ascending'
else:
if (value_list[index] == value_list[index - 1]) and (value_list[index]) == value_list[index + 1]:
return 'flat'
elif (value_list[index] <= value_list[index - 1]) and (value_list[index]) <= value_list[index + 1]:
return 'depression'
elif (value_list[index] > value_list[index - 1]) and (value_list[index] < value_list[index + 1]):
return 'ascending'
elif (value_list[index] > value_list[index + 1]) and (value_list[index] < value_list[index - 1]):
return 'descending'
else:
return 'unknown'
# find forward ascending neighbors
def find_ascending(value_list, index):
ascending_loc = []
cursor = index
while (cursor < (len(value_list) - 1 )) and (value_list[cursor] < value_list[cursor + 1]):
ascending_loc.append(cursor)
cursor = cursor + 1
ascending_loc.append(cursor)
# print(ascending_loc)
return set(ascending_loc)
# find forward descending neighbors
def find_descending(value_list, index):
descending_loc = []
cursor = index
while (cursor < (len(value_list) - 1 )) and (value_list[cursor] > value_list[cursor + 1]):
descending_loc.append(cursor)
cursor = cursor + 1
descending_loc.append(cursor)
return set(descending_loc)
# find backward descending neighbors
def find_descending_backward(value_list, index):
descending_loc = []
cursor = index
while (cursor > 0) and (value_list[cursor] < value_list[cursor - 1]):
descending_loc.append(cursor)
cursor = cursor - 1
descending_loc.append(cursor)
return set(descending_loc[::-1])
# find all points associated with a depression based on one point
def find_single_depression(value_list, index):
dep_loc = []
ascending_loc = list(find_ascending(value_list, index))
# ascending_loc = ascending_loc.sort()
# print(ascending_loc.sort())
descending_loc = list(find_descending_backward(value_list, index))
dep_loc = descending_loc + ascending_loc
return set(dep_loc)
# remove acending edge and descending edge
def process_edges(value_list):
size = len(value_list)
pts_list = set(range(size))
left_edge = find_ascending(value_list, 0)
if len(left_edge) > 0:
for item in left_edge:
pts_list.remove(item)
right_edge = find_descending_backward(value_list, size - 1)
if len(right_edge) > 0:
for item in right_edge:
pts_list.remove(item)
return pts_list
# get depression width, height, and area
def get_width_depth_area(value_list, pts_set):
min_index = min(pts_set)
max_index = max(pts_set)
left_elev = value_list[min_index]
right_elev = value_list[max_index]
pts_list = list(pts_set)
pts_arr = np.array(value_list)[pts_list]
min_value = np.min(pts_arr)
pour_value = min([left_elev, right_elev])
depth = pour_value - min_value
new_pts_arr = pts_arr[pts_arr <= pour_value]
width = new_pts_arr.size
area = pour_value * new_pts_arr.size - np.sum(new_pts_arr)
new_pts_set = pts_set.copy()
for item in pts_set:
if value_list[item] > pour_value:
new_pts_set.remove(item)
return width, depth, area, pour_value, new_pts_set
# find all depressions recursively
def find_depressions(in_values, in_width = 0, in_depth = 0, in_area = 0, dep_list = []):
size = len(in_values)
global_set = process_edges(in_values)
num_arr = np.array(in_values)
# dep_list = []
while len(global_set) > 0:
# print("Remaining: {}".format(len(global_set)))
# if len(global_set) == 2:
# print(global_set)
tmp_arr = num_arr[list(global_set)]
min_value = np.min(tmp_arr)
min_candidates = list(np.where(num_arr == min_value)[0])
min_index = min_candidates[0]
if len(min_candidates) > 1:
for item in min_candidates:
items = [item - 1, item, item + 1]
con = all(elem in items for elem in global_set)
if con :
min_index = item
break
else:
ascending_loc_tmp = find_ascending(in_values, item)
descending_loc_tmp = find_descending_backward(in_values, item)
dep_loc_tmp = list(ascending_loc_tmp) + list(descending_loc_tmp)
max_value = np.max(num_arr[dep_loc_tmp])
num_arr[dep_loc_tmp] = max_value
for item in dep_loc_tmp:
if item in global_set:
global_set.remove(item)
min_index = -1
elif len(global_set) < 3:
global_set_tmp = global_set.copy()
for item in global_set_tmp:
global_set.remove(item)
min_index = -1
if min_index != -1:
dep_index = find_single_depression(list(num_arr), min_index)
# print(dep_index)
width, depth, area, pour_elev, dep_tmp_set = get_width_depth_area(in_values, dep_index)
# print(dep_tmp_set)
# if len(dep_tmp_set) == 1:
# print('stop')
# print(pour_elev)
if (width >= in_width) and (depth >= in_depth) and (area > in_area):
print("************************************")
print("depression loc: {}".format(dep_index))
print("min candidates: {}".format(min_candidates))
# print("Pour elevation: {}".format(pour_elev))
print("width = {}; depth = {}; area = {}; pour = {}".format(width, round(depth, 2), round(area, 2), pour_elev))
# dep_list.append(dep_index)
id = len(dep_list) + 1
dep_list.append(Depression(id, width, round(depth, 4), round(area, 4), pour_elev, min_value, dep_index, dep_tmp_set))
for item in dep_index:
if item in global_set:
global_set.remove(item)
# elif len(dep_tmp_set) == 1:
# continue
else:
# print(dep_tmp_set)
for item in dep_tmp_set:
num_arr[item] = pour_elev
global_set.add(item)
# for dep in dep_list:
# print(dep)
print("Number of depressions: {}".format(len(dep_list)))
return dep_list
def fill_depressions(in_values, dep_list):
for dep in dep_list:
points = dep.points
internal_pts = dep.internal_pts
pour_elev = dep.pour_elev
for point in internal_pts:
in_values[point] = pour_elev
return in_values
def get_hierarchy(in_csv, out_dir, width = 0, height = 0, area = 0):
pass
if __name__ == '__main__':
# ************************ change the following parameters if needed ******************************** #
width = 0
height = 0
area = 0
work_dir = os.path.dirname(__file__)
in_csv = os.path.join(work_dir, 'data/profile1.csv')
out_csv = in_csv.replace('.csv', '_level1.csv')
values = read_csv(in_csv, header=True, col_index=4)
size = len(values)
print("Total number of rows: {}".format(size))
dep_type = check_dep_type(values, 557)
# print(dep_type)
dep_pts = find_single_depression(values, index = 1087)
# print(dep_pts)
dep_list = find_depressions(values, in_width = 3, in_depth = 0)
out_values = fill_depressions(values, dep_list)
# print(out_values)
write_csv(in_csv, out_csv, "LEVEL-1", out_values)
# print(get_width_depth_area(values, dep_pts))
# ************************************************************************************************** #
| 33.255245
| 133
| 0.588161
| 346
| 0.036379
| 0
| 0
| 0
| 0
| 0
| 0
| 1,535
| 0.161392
|
a166258a27d4639c261790d1e5d9c74ab19c0e5f
| 4,544
|
py
|
Python
|
data/make_joint_comp_inc_data.py
|
gcunhase/StackedDeBERT
|
82777114fd99cafc6e2a3d760e774f007c563245
|
[
"MIT"
] | 32
|
2020-01-03T09:53:03.000Z
|
2021-09-07T07:23:26.000Z
|
data/make_joint_comp_inc_data.py
|
gcunhase/StackedDeBERT
|
82777114fd99cafc6e2a3d760e774f007c563245
|
[
"MIT"
] | null | null | null |
data/make_joint_comp_inc_data.py
|
gcunhase/StackedDeBERT
|
82777114fd99cafc6e2a3d760e774f007c563245
|
[
"MIT"
] | 6
|
2020-01-21T06:50:21.000Z
|
2021-01-22T08:04:00.000Z
|
import argparse
import os
import csv
import random
from utils import ensure_dir, get_project_path
from collections import defaultdict
# POS-tag for irrelevant tag selection
import nltk
nltk.download('punkt')
nltk.download('averaged_perceptron_tagger')
__author__ = "Gwena Cunha"
def write_tsv(intention_dir_path, filename, keys, dict):
file_test = open(intention_dir_path + "/" + filename, 'wt')
dict_writer = csv.writer(file_test, delimiter='\t')
dict_writer.writerow(keys)
r = zip(*dict.values())
for d in r:
dict_writer.writerow(d)
def make_dataset(root_data_dir, complete_data_dir, incomplete_data_dir, results_dir):
"""
:param root_data_dir: directory to save data
:param complete_data_dir: subdirectory with complete data
:param incomplete_data_dir: subdirectory with incomplete data
:param results_dir: subdirectory with incomplete data
:return:
"""
print("Making incomplete intention classification dataset...")
complete_data_dir_path = root_data_dir + '/' + complete_data_dir
incomplete_data_dir_path = root_data_dir + '/' + incomplete_data_dir
results_dir_path = root_data_dir + '/' + results_dir
ensure_dir(results_dir_path)
# Traverse all sub-directories
files_dictionary = defaultdict(lambda: [])
for sub_dir in os.walk(complete_data_dir_path):
if len(sub_dir[1]) == 0:
data_name = sub_dir[0].split('/')[-1]
files_dictionary[data_name] = sub_dir[2]
# Open train and test tsv files
for k, v in files_dictionary.items():
save_path = results_dir_path + '/' + k
ensure_dir(save_path)
for comp_v_i, inc_v_i in zip(['test.tsv', 'train.tsv'], ['test_withMissingWords.tsv', 'train_withMissingWords.tsv']):
complete_tsv_file = open(complete_data_dir_path + '/' + k + '/' + comp_v_i, 'r')
incomplete_tsv_file = open(incomplete_data_dir_path + '/' + k + '/' + inc_v_i, 'r')
reader_complete = csv.reader(complete_tsv_file, delimiter='\t')
reader_incomplete = csv.reader(incomplete_tsv_file, delimiter='\t')
sentences, labels, missing_words_arr, targets = [], [], [], []
row_count = 0
for row_comp, row_inc in zip(reader_complete, reader_incomplete):
if row_count != 0:
# Incomplete
sentences.append(row_inc[0])
labels.append(row_inc[1])
missing_words_arr.append(row_inc[2])
targets.append(row_comp[0])
if 'train' in comp_v_i:
# Complete
sentences.append(row_comp[0])
labels.append(row_comp[1])
missing_words_arr.append('')
targets.append(row_comp[0])
row_count += 1
# Shuffle
if 'train' in comp_v_i:
c = list(zip(sentences, labels, missing_words_arr, targets))
random.shuffle(c)
sentences, labels, missing_words_arr, targets = zip(*c)
# Save train, test, val in files in the format (sentence, label)
keys = ['sentence', 'label', 'missing', 'target']
data_dict = {'sentence': sentences, 'label': labels, 'missing': missing_words_arr, 'target': targets}
write_tsv(save_path, comp_v_i, keys, data_dict)
print("Complete + Incomplete intention classification dataset completed")
def init_args():
parser = argparse.ArgumentParser(description="Script to make intention recognition dataset")
parser.add_argument('--root_data_dir', type=str, default=get_project_path() + "/data",
help='Directory to save subdirectories, needs to be an absolute path')
parser.add_argument('--complete_data_dir', type=str, default="complete_data",
help='Subdirectory with complete data')
parser.add_argument('--incomplete_data_dir', type=str, default="incomplete_data_tfidf_lower_0.8_noMissingTag",
help='Subdirectory with incomplete data')
parser.add_argument('--results_dir', type=str, default="comp_with_incomplete_data_tfidf_lower_0.8_noMissingTag",
help='Subdirectory to save Joint Complete and Incomplete data')
return parser.parse_args()
if __name__ == '__main__':
args = init_args()
make_dataset(args.root_data_dir, args.complete_data_dir, args.incomplete_data_dir, args.results_dir)
| 42.867925
| 125
| 0.645026
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,279
| 0.28147
|
a16669ec079300a0633ffd694b38772760885089
| 4,989
|
py
|
Python
|
recipes/models.py
|
JakubKoralewski/django-recipes
|
3794c6a96fb0765e2e3cebfc3968dae88e4f084c
|
[
"MIT"
] | null | null | null |
recipes/models.py
|
JakubKoralewski/django-recipes
|
3794c6a96fb0765e2e3cebfc3968dae88e4f084c
|
[
"MIT"
] | 5
|
2021-03-19T03:49:52.000Z
|
2021-06-10T19:16:05.000Z
|
recipes/models.py
|
JakubKoralewski/django-recipes
|
3794c6a96fb0765e2e3cebfc3968dae88e4f084c
|
[
"MIT"
] | null | null | null |
from typing import List, Dict, Union
from django.db import models
# Create your models here.
# https://en.wikipedia.org/wiki/Cooking_weights_and_measures
class AmountType(models.TextChoices):
GRAMS = ('g', 'grams')
KILOGRAMS = ('kg', 'kilograms')
MILLILITERS = ('ml', 'milliliters')
TABLE_SPOONS = ('tbsp', 'tablespoons')
TEA_SPOONS = ('tsp', 'teaspoons')
COUNT = ('x', 'items')
class Author(models.Model):
name = models.CharField(max_length=100)
def __str__(self):
return f'Author: "{self.name}"'
class IngredientType(models.IntegerChoices):
DAIRY_PRODUCT = (0, 'Dairy product')
VEGETABLE = (1, 'Vegetable')
FRUIT = (2, 'Fruit')
MEAT = (3, 'Meat')
FLOUR_LIKE = (4, 'Flour-like')
RICE_LIKE = (5, 'Rice-like')
OTHER = (100, 'Unknown')
class Ingredient(models.Model):
name = models.CharField(max_length=200)
photo = models.URLField(max_length=400)
type = models.IntegerField(choices=IngredientType.choices, default=IngredientType.OTHER)
def __str__(self):
return f'Ingredient: "{self.name}"'
class IngredientToBeAdded:
name: str
amount: Union[float, int]
amount_type: str
photo: str
type: int # cuz choices are ints
def __str__(self):
return ','.join(self.__dict__.items())
def set_field(self, key: str, val):
key = key.lower()
if key == 'name':
if not isinstance(val, str):
raise Exception('name should be str')
self.name = val
elif key == 'photo':
if not isinstance(val, str):
raise Exception('photo should be str')
self.photo = val
elif key == 'amount':
if not isinstance(val, int) and not isinstance(val, float):
try:
val = float(val)
except:
raise Exception('amount of ingredient should be number')
self.amount = val
elif key == 'amount_type':
if not isinstance(val, str):
raise Exception('amount type should be string')
self.amount_type = val
elif key == 'type':
if not isinstance(val, int):
try:
val = int(val)
except:
raise Exception('type of ingredient should be int')
self.type = val
else:
raise Exception(f'Unknown ingredient field tried to be saved: "{key}" of value: "{val}"')
class StepToBeAdded:
description: str
photo: str
ingredients: List[IngredientToBeAdded]
def __init__(self):
self.ingredients = []
def __str__(self):
return ','.join(self.__dict__.items())
class Recipe(models.Model):
name = models.CharField(max_length=200)
photo = models.URLField(max_length=400, null=True, blank=True)
author = models.ForeignKey(Author, on_delete=models.CASCADE, null=True)
pub_date = models.DateTimeField(auto_now_add=True, editable=False, help_text='Published date')
votes = models.PositiveIntegerField(default=0)
steps = models.PositiveIntegerField(default=0)
@classmethod
def from_form(cls, name: str, photo: str, author: str):
self = Recipe(name=name, photo=photo)
try:
maybe_existing_author = Author.objects.get(name__iexact=author)
self.author = maybe_existing_author
except Author.DoesNotExist:
new_author = Author(name=author)
new_author.save()
self.author = new_author
return self
def add_steps(self, steps: Union[List[StepToBeAdded]]):
if isinstance(steps, list):
for step in steps:
self.add_step(step)
elif isinstance(steps, dict):
for step in steps.values():
self.add_step(step)
else:
raise Exception("invalid type of steps added to recipes")
def add_step(self, step: StepToBeAdded):
if not self.id:
raise Exception('Add the Recipe to the database before inserting steps!')
new_step = StepsOfRecipe(recipe=self, step_amt=self.steps, description=step.description)
new_step.save()
self.steps += 1
self.save()
for ing in step.ingredients:
ingredient: Ingredient
try:
maybe_ingredient = Ingredient.objects.get(name__iexact=ing.name)
ingredient = maybe_ingredient
except Ingredient.DoesNotExist:
new_ingredient = Ingredient(name=ing.name, photo=ing.photo, type=ing.type)
new_ingredient.save()
ingredient = new_ingredient
step_ing = IngredientsOfStep(
step=new_step,
ingredient=ingredient,
amount=ing.amount,
amount_type=ing.amount_type
)
step_ing.save()
def __str__(self):
return f'Recipe: name="{self.name}" author="{self.author.name if self.author else "no author"}"'
class StepsOfRecipe(models.Model):
recipe = models.ForeignKey(Recipe, on_delete=models.CASCADE)
step_amt = models.PositiveIntegerField(default=0)
description = models.TextField()
def __str__(self):
return f'{self.step_amt + 1}-th step of {self.recipe.name}'
class IngredientsOfStep(models.Model):
step = models.ForeignKey(StepsOfRecipe, on_delete=models.CASCADE)
ingredient = models.ForeignKey(Ingredient, on_delete=models.CASCADE)
amount = models.DecimalField(decimal_places=1, max_digits=100)
amount_type = models.CharField(max_length=10, choices=AmountType.choices, default=AmountType.COUNT)
def __str__(self):
return f"Ingredient of {self.ingredient.name}'s {self.step.step_amt + 1}-th step"
| 28.83815
| 100
| 0.71798
| 4,806
| 0.963319
| 0
| 0
| 351
| 0.070355
| 0
| 0
| 907
| 0.1818
|
a166f12db4d713441e75c22cdaa77f074c8a2431
| 835
|
py
|
Python
|
zoneh/conf.py
|
RaminAT/zoneh
|
73c8e66d76cbd0aa51551e21740d88ff439158a9
|
[
"MIT"
] | 8
|
2019-05-27T07:21:51.000Z
|
2021-09-14T21:26:53.000Z
|
zoneh/conf.py
|
RaminAT/zoneh
|
73c8e66d76cbd0aa51551e21740d88ff439158a9
|
[
"MIT"
] | 5
|
2020-04-08T12:10:44.000Z
|
2021-02-11T01:51:41.000Z
|
zoneh/conf.py
|
RaminAT/zoneh
|
73c8e66d76cbd0aa51551e21740d88ff439158a9
|
[
"MIT"
] | 5
|
2020-03-29T17:04:05.000Z
|
2021-09-14T21:26:58.000Z
|
"""Config module."""
import json
import logging
import os
from zoneh.exceptions import ConfigError
_log = logging.getLogger(__name__)
_CONFIG_FILE = 'config.json'
def _load_config():
"""Load telegram and filters configuration from config file."""
if not os.path.isfile(_CONFIG_FILE):
err_msg = f'Cannot find {_CONFIG_FILE} configuration file'
_log.error(err_msg)
raise ConfigError(err_msg)
with open(_CONFIG_FILE, 'r') as fd:
config = fd.read()
try:
config = json.loads(config)
except json.decoder.JSONDecodeError:
err_msg = f'Malformed JSON in {_CONFIG_FILE} configuration file'
_log.error(err_msg)
raise ConfigError(err_msg)
return config
_CONF = _load_config()
def get_config():
"""Return config as singleton."""
return _CONF
| 22.567568
| 72
| 0.68024
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 234
| 0.28024
|
a1676b1833d7b48b6064b056da63a6fba24af86a
| 3,629
|
py
|
Python
|
mlogger.py
|
morris178/mqtt-data-logger
|
75e0fbbe0311ecaba8c905df356d6f7d8a0e3615
|
[
"MIT"
] | null | null | null |
mlogger.py
|
morris178/mqtt-data-logger
|
75e0fbbe0311ecaba8c905df356d6f7d8a0e3615
|
[
"MIT"
] | null | null | null |
mlogger.py
|
morris178/mqtt-data-logger
|
75e0fbbe0311ecaba8c905df356d6f7d8a0e3615
|
[
"MIT"
] | null | null | null |
###demo code provided by Steve Cope at www.steves-internet-guide.com
##email steve@steves-internet-guide.com
###Free to use for any purpose
"""
implements data logging class
"""
import time, os, json, logging
###############
class m_logger(object):
"""Class for logging data to a file. You can set the maximim bunber
of messages in a file the default is 1000. When the file is full
a new file is created.Log files are store under a root directoy
and a sub directory that uses the timestamp for the directory name
Log file data is flushed immediately to disk so that data is not lost.
Data can be stored as plain text or in JSON format """
def __init__(self, log_dir="mlogs", log_recs=1000, number_logs=0):
self.log_dir = log_dir
self.log_recs = log_recs
self.number_logs = number_logs
self.count = 0
self.log_dir = self.create_log_dir(self.log_dir)
self.fo = self.get_log_name(self.log_dir, self.count)
self.new_file_flag = 0
self.writecount = 0
self.timenow = time.time()
self.flush_flag = True
self.flush_time = 2 # flush logs to disk every 2 seconds
def __flushlogs(self): # write to disk
self.fo.flush()
# logging.info("flushing logs")
os.fsync(self.fo.fileno())
self.timenow = time.time()
def __del__(self):
if not self.fo.closed:
print("closing log file")
self.fo.close()
def close_file(self):
if not self.fo.closed:
print("closing log file")
self.fo.close()
def create_log_dir(self, log_dir):
"""Function for creating new log directories
using the timestamp for the name"""
self.t = time.localtime(time.time())
self.time_stamp = (str(self.t[1]) + "-" + str(self.t[2]) + "-" +
str(self.t[3]) + "-" + str(self.t[4]))
logging.info("creating sub directory" + str(self.time_stamp))
try:
os.stat(self.log_dir)
except:
os.mkdir(self.log_dir)
self.log_sub_dir = self.log_dir + "/" + self.time_stamp
try:
os.stat(self.log_sub_dir)
except:
os.mkdir(self.log_sub_dir)
return (self.log_sub_dir)
def get_log_name(self, log_dir, count):
"""get log files and directories"""
self.log_numbr = "{0:003d}".format(count)
logging.info("s is" + str(self.log_numbr))
self.file_name = self.log_dir + "/" + "log" + self.log_numbr
logging.info("creating log file " + self.file_name)
f = open(self.file_name, 'w') # clears file if it exists
f.close()
f = open(self.file_name, 'a')
return (f)
def log_json(self, data):
jdata = json.dumps(data) + "\n"
self.log_data(jdata)
def log_data(self, data):
self.data = data
try:
self.fo.write(data)
self.writecount += 1
self.__flushlogs()
if self.writecount >= self.log_recs:
self.count += 1 # counts number of logs
if self.count > self.number_logs and self.number_logs != 0:
logging.info("too many logs: starting from 0")
self.count = 0 # reset
self.fo = self.get_log_name(self.log_dir, self.count)
self.writecount = 0
except BaseException as e:
logging.error("Error on_data: %s" % str(e))
return False
return True
| 36.656566
| 76
| 0.572058
| 3,390
| 0.934142
| 0
| 0
| 0
| 0
| 0
| 0
| 1,041
| 0.286856
|
a16793db9e30c478f5f315f915ced2b2053b7849
| 6,299
|
py
|
Python
|
ptools/lipytools/little_methods.py
|
piteren/ptools_module
|
5117d06d7dea4716b573b93d5feb10137966c373
|
[
"MIT"
] | null | null | null |
ptools/lipytools/little_methods.py
|
piteren/ptools_module
|
5117d06d7dea4716b573b93d5feb10137966c373
|
[
"MIT"
] | null | null | null |
ptools/lipytools/little_methods.py
|
piteren/ptools_module
|
5117d06d7dea4716b573b93d5feb10137966c373
|
[
"MIT"
] | null | null | null |
"""
2018 (c) piteren
some little methods (but frequently used) for Python
"""
from collections import OrderedDict
import csv
import inspect
import json
import os
import pickle
import random
import shutil
import string
import time
from typing import List, Callable, Any, Optional
# prepares function parameters dictionary
def get_params(function: Callable):
params_dict = {'without_defaults':[], 'with_defaults':OrderedDict()}
if function:
specs = inspect.getfullargspec(function)
params = specs.args
if not params: params = []
vals = specs.defaults
if not vals: vals = ()
while len(params) > len(vals):
params_dict['without_defaults'].append(params.pop(0))
params_dict['with_defaults'] = {k: v for k,v in zip(params,vals)}
return params_dict
# short(compressed) scientific notation for floats
def short_scin(
fl: float,
precision:int= 1):
sh = f'{fl:.{precision}E}'
sh = sh.replace('+0','')
sh = sh.replace('+','')
sh = sh.replace('-0','-')
sh = sh.replace('E','e')
return sh
# returns sting from float, always of given width
def float_to_str(
num: float,
width: int= 7):
if width < 5: width = 5
scientific_decimals = width-6 if width>6 else 0
ff = f'{num:.{scientific_decimals}E}'
if 1000 > num > 0.0001: ff = str(num)[:width]
if len(ff)<width: ff += '0'*(width-len(ff))
return ff
# *********************************************************************************************** file readers / writers
# ********************************************* for raise_exception=False each reader will return None if file not found
def r_pickle( # pickle reader
file_path,
obj_type= None, # if obj_type is given checks for compatibility with given type
raise_exception= False):
if not os.path.isfile(file_path):
if raise_exception: raise FileNotFoundError(f'file {file_path} not exists!')
return None
# obj = pickle.load(open(file_path, 'rb')) << replaced by:
with open(file_path, 'rb') as file: obj = pickle.load(file)
if obj_type: assert type(obj) is obj_type, f'ERROR: obj from file is not {str(obj_type)} type !!!'
return obj
def w_pickle( # pickle writer
obj,
file_path):
with open(file_path, 'wb') as file:
pickle.dump(obj, file)
def r_json( # json reader
file_path,
raise_exception= False):
if not os.path.isfile(file_path):
if raise_exception: raise FileNotFoundError(f'file {file_path} not exists!')
return None
with open(file_path, 'r', encoding='utf-8') as file:
return json.load(file)
def w_json( # json writer
data: dict,
file_path):
with open(file_path, 'w', encoding='utf-8') as file:
json.dump(data, file, indent=4, ensure_ascii=False)
def r_jsonl( # jsonl reader
file_path,
raise_exception=False):
if not os.path.isfile(file_path):
if raise_exception: raise FileNotFoundError(f'file {file_path} not exists!')
return None
with open(file_path, 'r', encoding='utf-8') as file:
return [json.loads(line) for line in file]
def w_jsonl( # jsonl writer
data: List[dict],
file_path):
with open(file_path, 'w', encoding='utf-8') as file:
for d in data:
json.dump(d, file, ensure_ascii=False)
file.write('\n')
def r_csv( # csv reader
file_path,
raise_exception= False):
if not os.path.isfile(file_path):
if raise_exception: raise FileNotFoundError(f'file {file_path} not exists!')
return None
with open(file_path, newline='') as f:
reader = csv.reader(f)
return [row for row in reader][1:]
# returns timestamp string
def stamp(
year= False,
date= True,
letters: Optional[int]= 3):
random.seed(time.time())
if date:
if year: stp = time.strftime('%y%m%d_%H%M')
else: stp = time.strftime('%m%d_%H%M')
else: stp = ''
if letters:
if date: stp += '_'
stp += ''.join([random.choice(string.ascii_letters) for _ in range(letters)])
return stp
# returns nice string of given list
def list_str(ls: List[Any], limit:Optional[int]=200):
lstr = [str(e) for e in ls]
lstr = '; '.join(lstr)
if limit: lstr = lstr[:limit]
return lstr
# prints nested dict
def print_nested_dict(dc: dict, ind_scale=2, line_limit=200):
tpD = {
dict: 'D',
list: 'L',
tuple: 'T',
str: 'S'}
def __prn_root(root: dict, ind, ind_scale=2, line_limit=line_limit):
spacer = ' ' * ind * ind_scale
for k in sorted(list(root.keys())):
tp = tpD.get(type(root[k]),'O')
ln = len(root[k]) if tp in tpD.values() else ''
exmpl = ''
if tp!='D':
exmpl = str(root[k])
if line_limit:
if len(exmpl)>line_limit: exmpl = f'{exmpl[:line_limit]}..'
exmpl = f' : {exmpl}'
print(f'{spacer}{k} [{tp}.{ln}]{exmpl}')
if type(root[k]) is dict: __prn_root(root[k],ind+1,ind_scale)
__prn_root(dc,ind=0,ind_scale=ind_scale)
# prepares folder, creates or flushes
def prep_folder(
folder_path :str, # folder path
flush_non_empty= False):
if flush_non_empty and os.path.isdir(folder_path): shutil.rmtree(folder_path)
os.makedirs(folder_path, exist_ok=True)
# random <0;1> probability function
def prob(p: float) -> bool:
return random.random() < p
# terminal progress bar
def progress_ (
iteration: float or int, # current iteration
total: float or int, # total iterations
prefix: str= '', # prefix string
suffix: str= '', # suffix string
length: int= 20,
fill: str= '█',
print_end: str= ''):
prog = iteration / total
if prog > 1: prog = 1
filled_length = int(length * prog)
bar = fill * filled_length + '-' * (length - filled_length)
print(f'\r{prefix} |{bar}| {prog*100:.1f}% {suffix}', end = print_end)
if prog == 1: print()
| 30.877451
| 120
| 0.582156
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,508
| 0.239327
|
a16884524638226d0ba06be614706d7a5f91b5dc
| 2,135
|
py
|
Python
|
tests/test.py
|
zephenryus/botw-grass
|
31adaebd69b56c4177bcdaf8e933fee5e8bc8433
|
[
"MIT"
] | 1
|
2020-10-11T07:07:31.000Z
|
2020-10-11T07:07:31.000Z
|
tests/test.py
|
zephenryus/botw-grass
|
31adaebd69b56c4177bcdaf8e933fee5e8bc8433
|
[
"MIT"
] | null | null | null |
tests/test.py
|
zephenryus/botw-grass
|
31adaebd69b56c4177bcdaf8e933fee5e8bc8433
|
[
"MIT"
] | 1
|
2020-10-11T07:07:33.000Z
|
2020-10-11T07:07:33.000Z
|
import filecmp
import hashlib
import json
import grass
def grass_to_json():
"""
Tests reading of grass file and exports data as a json file
"""
data = grass.read_grass("assets/5000000000.grass.extm")
print("Saving file output/5000000000.grass.extm.json...")
with open("output/5000000000.grass.extm.json", "w+") as outfile:
out_obj = []
for entry in data:
out_obj.append(entry.__dict__)
outfile.write(json.dumps(out_obj, indent=4, separators=(',', ': ')))
def grass_to_binary_string():
"""
Tests that data is recompiled correctly and matches the original file
"""
data = grass.read_grass("assets/5000000000.grass.extm")
binary_data = grass.compile_grass(data)
hash_md5 = hashlib.md5()
with open("assets/5000000000.grass.extm", "rb") as infile:
for chunk in iter(lambda: infile.read(4096), b""):
hash_md5.update(chunk)
file_hash = hash_md5.hexdigest()
hash_md5 = hashlib.md5()
pos = 0
for chunk in iter(lambda: binary_data[pos:pos + 4096], b""):
pos += 4096
hash_md5.update(chunk)
string_hash = hash_md5.hexdigest()
print("The file and binary string are the same: {0}".format(file_hash == string_hash))
def grass_to_binary_file():
"""
Tests reading data from grass file then writes the same data back as a binary
"""
data = grass.read_grass("assets/5000000000.grass.extm")
grass.write_grass(data, "output/5000000000.grass.extm")
print("The files are the same: {0}".format(
filecmp.cmp("assets/5000000000.grass.extm", "output/5000000000.grass.extm")))
def grass_to_image():
"""
Tests reading data from grass file then generating height and color map images
"""
data = grass.read_grass("assets/5000000000.grass.extm")
grass.generate_height_map(data, 'output/5000000000.grass.extm.height.tiff')
grass.generate_color_map(data, 'output/5000000000.grass.extm.color.tiff')
def main():
grass_to_json()
grass_to_binary_string()
grass_to_binary_file()
grass_to_image()
if __name__ == "__main__":
main()
| 28.092105
| 90
| 0.6726
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 861
| 0.403279
|
a16900fa8a0412a37028d1da77ef8f912a14e56f
| 259
|
py
|
Python
|
Control/control_common.py
|
TomE8/drones
|
c92865556dd3df2d5f5b73589cd48e413bff3a3a
|
[
"MIT"
] | 14
|
2018-10-29T00:52:18.000Z
|
2022-03-23T20:07:11.000Z
|
Control/control_common.py
|
TomE8/drones
|
c92865556dd3df2d5f5b73589cd48e413bff3a3a
|
[
"MIT"
] | 4
|
2020-07-12T05:19:05.000Z
|
2020-09-20T12:40:47.000Z
|
Control/control_common.py
|
TomE8/drones
|
c92865556dd3df2d5f5b73589cd48e413bff3a3a
|
[
"MIT"
] | 2
|
2019-03-08T01:36:47.000Z
|
2019-09-12T04:07:19.000Z
|
class AxisIndex(): #TODO: read this value from config file
LEFT_RIGHT=0
FORWARD_BACKWARDS=1
ROTATE=2
UP_DOWN=3
class ButtonIndex():
TRIGGER = 0
SIDE_BUTTON = 1
HOVERING = 2
EXIT = 10
class ThresHold():
SENDING_TIME = 0.5
| 17.266667
| 58
| 0.648649
| 254
| 0.980695
| 0
| 0
| 0
| 0
| 0
| 0
| 39
| 0.150579
|
a16aa7de0e511402c80303f34d1d2b678e7b0256
| 446
|
py
|
Python
|
tests/LayoutTest.py
|
lakhman/restructuredWeb
|
a8aff9f96c63415fdefe6832f923a6d395d4ebdd
|
[
"MIT"
] | 2
|
2021-05-19T15:43:26.000Z
|
2021-05-19T16:07:00.000Z
|
tests/LayoutTest.py
|
lakhman/restructuredWeb
|
a8aff9f96c63415fdefe6832f923a6d395d4ebdd
|
[
"MIT"
] | null | null | null |
tests/LayoutTest.py
|
lakhman/restructuredWeb
|
a8aff9f96c63415fdefe6832f923a6d395d4ebdd
|
[
"MIT"
] | 1
|
2021-05-19T15:43:44.000Z
|
2021-05-19T15:43:44.000Z
|
# -*- coding: utf-8 -*-
from .BaseTest import BaseTest
class LayoutTest(BaseTest):
def test_layout_switch(self):
"""
Test layout switch
"""
self.do_component_fixture_test_with_real_sphinx('layout', 'layout')
def test_layout_multiple(self):
"""
Test multiple layout directives throw an error
"""
self.do_component_fixture_test_with_real_sphinx('layout', 'multiple-error')
| 26.235294
| 83
| 0.650224
| 388
| 0.869955
| 0
| 0
| 0
| 0
| 0
| 0
| 175
| 0.392377
|
a16aadbd9d67147c97cce0ae81ac212da4c01e1c
| 2,472
|
py
|
Python
|
.leetcode/16.3-sum-closest.2.py
|
KuiyuanFu/PythonLeetCode
|
8962df2fa838eb7ae48fa59de272ba55a89756d8
|
[
"MIT"
] | null | null | null |
.leetcode/16.3-sum-closest.2.py
|
KuiyuanFu/PythonLeetCode
|
8962df2fa838eb7ae48fa59de272ba55a89756d8
|
[
"MIT"
] | null | null | null |
.leetcode/16.3-sum-closest.2.py
|
KuiyuanFu/PythonLeetCode
|
8962df2fa838eb7ae48fa59de272ba55a89756d8
|
[
"MIT"
] | null | null | null |
# @lc app=leetcode id=16 lang=python3
#
# [16] 3Sum Closest
#
# https://leetcode.com/problems/3sum-closest/description/
#
# algorithms
# Medium (46.33%)
# Likes: 3080
# Dislikes: 169
# Total Accepted: 570.4K
# Total Submissions: 1.2M
# Testcase Example: '[-1,2,1,-4]\n1'
#
# Given an array nums of n integers and an integer target, find three integers
# in nums such that the sum is closest to target. Return the sum of the three
# integers. You may assume that each input would have exactly one solution.
#
#
# Example 1:
#
#
# Input: nums = [-1,2,1,-4], target = 1
# Output: 2
# Explanation: The sum that is closest to the target is 2. (-1 + 2 + 1 =
# 2).
#
#
#
# Constraints:
#
#
# 3 <= nums.length <= 10^3
# -10^3 <= nums[i] <= 10^3
# -10^4 <= target <= 10^4
#
#
#
# @lc tags=array;two-pointers
# @lc imports=start
from imports import *
# @lc imports=end
# @lc idea=start
#
# 给定一个数组,求数组中三个元素和最接近目标的和。
# 使用双指针法。首先对数组排序,确定第一个值,之后在剩下的数组中,使用双指针法找最小的差值。因为有序,所以可以通过左右移动指针,来修改剩余两个数的和的大小变化方向。之后判断是否重复来剪枝。
#
# @lc idea=end
# @lc group=two-pointers
# @lc rank=10
# @lc code=start
class Solution:
def threeSumClosest(self, nums: List[int], target: int) -> int:
# dic = {}
# for n in nums:
# if not dic.__contains__(n):
# dic[n] = 1
# elif dic[n] < 3:
# dic[n] += 1
# nums = []
# for i in list(dic.keys()):
# nums += [i]*dic[i]
nums.sort()
s = nums[0] + nums[1] + nums[2]
dif = abs(s - target)
for i in range(len(nums) - 2):
# 重复元素。
if i > 0 and nums[i] == nums[i - 1]:
continue
l = i + 1
r = len(nums) - 1
t = target - nums[i]
while l < r:
if abs(t - nums[l] - nums[r]) < dif:
dif = abs(t - nums[l] - nums[r])
s = nums[i] + nums[l] + nums[r]
# 确定方向
if t - nums[l] - nums[r] > 0:
l = l + 1
else:
r = r - 1
if dif == 0:
break
return s
pass
# @lc code=end
# @lc main=start
if __name__ == '__main__':
print('Example 1:')
print('Input : ')
print('nums = [-1,2,1,-4], target = 1')
print('Output :')
print(str(Solution().threeSumClosest([-1, 2, 1, -4], 1)))
print('Exception :')
print('2')
print()
pass
# @lc main=end
| 22.071429
| 95
| 0.506068
| 1,088
| 0.398535
| 0
| 0
| 0
| 0
| 0
| 0
| 1,599
| 0.585714
|
a16be12b3f57a68c02b41dfe786a31910f86a92e
| 2,142
|
py
|
Python
|
test/test_functions/test_michalewicz.py
|
carefree0910/botorch
|
c0b252baba8f16a4ea2eb3f99c266fba47418b1f
|
[
"MIT"
] | null | null | null |
test/test_functions/test_michalewicz.py
|
carefree0910/botorch
|
c0b252baba8f16a4ea2eb3f99c266fba47418b1f
|
[
"MIT"
] | null | null | null |
test/test_functions/test_michalewicz.py
|
carefree0910/botorch
|
c0b252baba8f16a4ea2eb3f99c266fba47418b1f
|
[
"MIT"
] | 1
|
2019-05-07T23:53:08.000Z
|
2019-05-07T23:53:08.000Z
|
#! /usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import unittest
import torch
from botorch.test_functions.michalewicz import (
GLOBAL_MAXIMIZER,
GLOBAL_MAXIMUM,
neg_michalewicz,
)
class TestNegMichalewicz(unittest.TestCase):
def test_single_eval_neg_michalewicz(self, cuda=False):
device = torch.device("cuda") if cuda else torch.device("cpu")
for dtype in (torch.float, torch.double):
X = torch.zeros(10, device=device, dtype=dtype)
res = neg_michalewicz(X)
self.assertEqual(res.dtype, dtype)
self.assertEqual(res.device.type, device.type)
self.assertEqual(res.shape, torch.Size())
def test_single_eval_neg_michalewicz_cuda(self):
if torch.cuda.is_available():
self.test_single_eval_neg_michalewicz(cuda=True)
def test_batch_eval_neg_michalewicz(self, cuda=False):
device = torch.device("cuda") if cuda else torch.device("cpu")
for dtype in (torch.float, torch.double):
X = torch.zeros(2, 10, device=device, dtype=dtype)
res = neg_michalewicz(X)
self.assertEqual(res.dtype, dtype)
self.assertEqual(res.device.type, device.type)
self.assertEqual(res.shape, torch.Size([2]))
def test_batch_eval_neg_michalewicz_cuda(self):
if torch.cuda.is_available():
self.test_batch_eval_neg_michalewicz(cuda=True)
def test_neg_michalewicz_global_maximum(self, cuda=False):
device = torch.device("cuda") if cuda else torch.device("cpu")
for dtype in (torch.float, torch.double):
X = torch.tensor(
GLOBAL_MAXIMIZER, device=device, dtype=dtype, requires_grad=True
)
res = neg_michalewicz(X)
res.backward()
self.assertAlmostEqual(res.item(), GLOBAL_MAXIMUM, places=4)
self.assertLess(X.grad.abs().max().item(), 1e-3)
def test_neg_michalewicz_global_maximum_cuda(self):
if torch.cuda.is_available():
self.test_neg_michalewicz_global_maximum(cuda=False)
| 38.25
| 80
| 0.661531
| 1,898
| 0.886088
| 0
| 0
| 0
| 0
| 0
| 0
| 126
| 0.058824
|
a16cdf1f2057c870dd857dd5ffd7ccfb349decff
| 1,122
|
py
|
Python
|
example_scripts/write_mztab_result.py
|
gavswe/pyqms
|
299cd4d96b78611ebbe43e0ac625909c6a8d8fd9
|
[
"MIT"
] | 23
|
2017-06-28T07:53:42.000Z
|
2022-02-20T02:46:37.000Z
|
example_scripts/write_mztab_result.py
|
gavswe/pyqms
|
299cd4d96b78611ebbe43e0ac625909c6a8d8fd9
|
[
"MIT"
] | 23
|
2019-05-15T18:05:18.000Z
|
2022-01-21T13:27:11.000Z
|
example_scripts/write_mztab_result.py
|
gavswe/pyqms
|
299cd4d96b78611ebbe43e0ac625909c6a8d8fd9
|
[
"MIT"
] | 11
|
2017-06-26T13:22:57.000Z
|
2022-03-31T23:35:14.000Z
|
#!/usr/bin/env python3
# encoding: utf-8
"""
pyQms
-----
Python module for fast and accurate mass spectrometry data quantification
:license: MIT, see LICENSE.txt for more details
Authors:
* Leufken, J.
* Niehues, A.
* Sarin, L.P.
* Hippler, M.
* Leidel, S.A.
* Fufezan, C.
"""
import pickle
import sys
def main(result_pkl=None):
"""
usage:
./write_mztab_results.py <Path2ResultPkl>
Will write all results of a result pkl into a .mztab file. Please refer to
Documentation of :doc:`results` for further information.
Note:
Please note that the ouput in mzTab format is still in beta stage.
Since pyQms is a raw quantification tool, some meta data has to be
passed/set manually by the user.
"""
results_class = pickle.load(open(result_pkl, "rb"))
results_class.write_result_mztab(
output_file_name="{0}_results.mztab".format(result_pkl)
)
if __name__ == "__main__":
if len(sys.argv) < 2:
print(main.__doc__)
else:
main(result_pkl=sys.argv[1])
| 21.169811
| 78
| 0.622995
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 798
| 0.71123
|
a16d009cfff8e6fc878e82ac94cf0ba2221a05c0
| 5,516
|
py
|
Python
|
dbservice/dbprovider/MessageDAO.py
|
artyomche9/digest_bot
|
480e9038ac1f42a10a9a333a72b9e38fa9fe8385
|
[
"MIT"
] | 11
|
2019-10-25T12:42:03.000Z
|
2020-04-03T09:43:49.000Z
|
dbservice/dbprovider/MessageDAO.py
|
maybe-hello-world/digestbot
|
480e9038ac1f42a10a9a333a72b9e38fa9fe8385
|
[
"MIT"
] | 13
|
2020-12-12T12:33:55.000Z
|
2021-09-09T15:00:57.000Z
|
dbservice/dbprovider/MessageDAO.py
|
artyomche9/digest_bot
|
480e9038ac1f42a10a9a333a72b9e38fa9fe8385
|
[
"MIT"
] | 5
|
2019-10-06T09:55:24.000Z
|
2019-10-21T16:36:56.000Z
|
from decimal import Decimal
from typing import List, Any
from common.Enums import SortingType
from models import Message
from .engine import db_engine, DBEngine
class MessageDAO:
def __init__(self, engine: DBEngine):
self.engine = engine
@staticmethod
def __make_insert_values_from_messages_array(messages: List[Message]) -> List[tuple]:
return [
(
message.username,
message.text,
Decimal(message.timestamp),
message.reply_count,
message.reply_users_count,
message.reactions_rate,
message.thread_length,
message.channel_id,
)
for message in messages
]
@staticmethod
def __request_messages_to_message_class(request_messages: List[Any]) -> List[Message]:
return [Message(**message) for message in request_messages]
@staticmethod
def __make_link_update_values_from_messages_array(messages: List[Message]) -> List[tuple]:
return [(x.link, Decimal(x.timestamp), x.channel_id) for x in messages]
async def create_messages(self, messages: List[Message]) -> None:
request = f"""
INSERT INTO message (username, text, timestamp, reply_count, reply_users_count,
reactions_rate, thread_length, channel_id)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8);
"""
sequence = self.__make_insert_values_from_messages_array(messages)
await self.engine.make_execute_many(request, sequence)
async def upsert_messages(self, messages: List[Message]) -> None:
request = f"""
INSERT INTO message (username, text, timestamp, reply_count, reply_users_count,
reactions_rate, thread_length, channel_id)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
ON CONFLICT (timestamp, channel_id)
DO UPDATE SET
reply_count = EXCLUDED.reply_count,
reply_users_count = EXCLUDED.reply_users_count,
reactions_rate = EXCLUDED.reactions_rate,
thread_length = EXCLUDED.thread_length;
"""
sequence = self.__make_insert_values_from_messages_array(messages)
await self.engine.make_execute_many(request, sequence)
async def get_messages_without_links(self) -> List[Message]:
request = f"SELECT * FROM message WHERE link IS NULL;"
messages = await self.engine.make_fetch_rows(request)
return self.__request_messages_to_message_class(messages)
async def update_message_links(self, messages: List[Message]) -> None:
request = f" UPDATE message SET link=($1) WHERE timestamp=($2) AND channel_id=($3)"
sequence = self.__make_link_update_values_from_messages_array(messages)
await self.engine.make_execute_many(request, sequence)
async def get_top_messages(
self,
after_ts: str,
user_id: str,
sorting_type: SortingType = SortingType.REPLIES,
top_count: int = 10
) -> List[Message]:
request = f"""
SELECT * FROM message
WHERE timestamp >= $1 AND username NOT IN
(SELECT ignore_username FROM IgnoreList WHERE author_username = $3)
ORDER BY {sorting_type.value} DESC
LIMIT $2;
"""
messages = await self.engine.make_fetch_rows(request, after_ts, top_count, user_id)
return self.__request_messages_to_message_class(messages)
async def get_top_messages_by_channel_id(
self,
channel_id: str,
after_ts: str,
user_id: str,
sorting_type: SortingType = SortingType.REPLIES,
top_count: int = 10,
) -> List[Message]:
request = f"""
SELECT * FROM message
WHERE
channel_id=$1
AND
timestamp >= $2
AND
username NOT IN (SELECT ignore_username FROM IgnoreList WHERE author_username = $4)
ORDER BY {sorting_type.value} DESC
LIMIT $3;
"""
messages = await self.engine.make_fetch_rows(
request, channel_id, after_ts, top_count, user_id
)
return self.__request_messages_to_message_class(messages)
async def get_top_messages_by_preset_name(
self,
preset_name: str,
after_ts: str,
user_id: str,
sorting_type: SortingType = SortingType.REPLIES,
top_count: int = 10,
) -> List[Message]:
request = f"""
WITH presets AS (
SELECT *
FROM preset
WHERE name = $1
AND (username = $2 OR username IS NULL)
ORDER BY username NULLS LAST
LIMIT 1
)
SELECT message.* FROM message
JOIN presets preset
ON message.channel_id=ANY(preset.channel_ids)
WHERE message.timestamp >= $3 AND message.username NOT IN
(SELECT ignore_username FROM IgnoreList WHERE author_username = $2)
ORDER BY {sorting_type.value} DESC
LIMIT $4;
"""
messages = await self.engine.make_fetch_rows(
request, preset_name, user_id, after_ts, top_count
)
return self.__request_messages_to_message_class(messages)
message_dao = MessageDAO(db_engine)
| 36.773333
| 99
| 0.603336
| 5,313
| 0.963198
| 0
| 0
| 857
| 0.155366
| 4,308
| 0.781001
| 2,084
| 0.37781
|
a16f85e6fac2fb3f5423a543ab4b85436a1f1301
| 196
|
py
|
Python
|
Chapter09/fuzzing.py
|
firebitsbr/Penetration-Testing-with-Shellcode
|
2d756bccace6b727e050b2010ebf23e08d221fdc
|
[
"MIT"
] | 30
|
2018-05-15T21:45:09.000Z
|
2022-03-23T20:04:25.000Z
|
Chapter09/fuzzing.py
|
naveenselvan/Penetration-Testing-with-Shellcode
|
2d756bccace6b727e050b2010ebf23e08d221fdc
|
[
"MIT"
] | 1
|
2020-10-19T13:03:32.000Z
|
2020-11-24T05:50:17.000Z
|
Chapter09/fuzzing.py
|
naveenselvan/Penetration-Testing-with-Shellcode
|
2d756bccace6b727e050b2010ebf23e08d221fdc
|
[
"MIT"
] | 18
|
2018-02-20T21:21:23.000Z
|
2022-01-26T04:19:28.000Z
|
#!/usr/bin/python
import socket
import sys
junk = 'A'*500
s=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
connect = s.connect(('192.168.129.128',21))
s.recv(1024)
s.send('USER '+junk+'\r\n')
| 17.818182
| 50
| 0.69898
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 50
| 0.255102
|
a17081dce9dfbf674f07300258797fe7e68a0847
| 1,746
|
py
|
Python
|
017. Letter Combinations of a Phone Number.py
|
youhusky/Facebook_Prepare
|
4045bcb652537711b3680b2aa17204ae73c6bde8
|
[
"MIT"
] | 6
|
2017-10-30T05:35:46.000Z
|
2020-12-15T06:51:52.000Z
|
017. Letter Combinations of a Phone Number.py
|
youhusky/Facebook_Prepare
|
4045bcb652537711b3680b2aa17204ae73c6bde8
|
[
"MIT"
] | 1
|
2017-10-30T04:11:31.000Z
|
2017-10-30T05:46:24.000Z
|
017. Letter Combinations of a Phone Number.py
|
youhusky/Facebook_Prepare
|
4045bcb652537711b3680b2aa17204ae73c6bde8
|
[
"MIT"
] | 2
|
2020-09-03T07:14:02.000Z
|
2021-05-21T19:19:57.000Z
|
# Given a digit string, return all possible letter combinations that the number could represent.
# A mapping of digit to letters (just like on the telephone buttons) is given below.
# Input:Digit string "23"
# Output: ["ad", "ae", "af", "bd", "be", "bf", "cd", "ce", "cf"].
# DFS backtracking
class Solution(object):
def letterCombinations(self, digits):
"""
O(2^n)
:type digits: str
:rtype: List[str]
"""
if not digits:
return []
res = []
dic = {
'2': 'abc',
'3': 'def',
'4': 'ghi',
'5': 'jkl',
'6': 'mno',
'7': 'pqrs',
'8': 'tuv',
'9': 'wxyz'
}
self.dfs(res, '',dic,digits,0)
return res
def dfs(self, res, temp, dic,digits,index):
if len(temp) == len(digits):
res.append(temp)
return
# focus on !
# digits[index] -> 2: generate a,b,c
for letter in dic[digits[index]]:
self.dfs(res, temp+letter, dic, digits, index+1)
class Solution2(object):
def letterCombinations(self, password):
if not password:
return []
res = []
dic = {'a':"12", 'c':"34"}
for char in password:
if char not in dic:
dic[char] = char
self.dfs(res, "", dic, password, 0)
return res
def dfs(self, res, temp, dic, password, index):
if index == len(password):
res.append(temp)
return
for letter in dic[password[index]]:
self.dfs(res, temp+letter, dic, password, index+1)
m = Solution2()
print m.letterCombinations('abc')
| 27.28125
| 96
| 0.4874
| 1,374
| 0.786942
| 0
| 0
| 0
| 0
| 0
| 0
| 507
| 0.290378
|
a172ea5b14e8133a222d02986a593e89323cad7c
| 847
|
py
|
Python
|
FreeBSD/bsd_netstats_poller.py
|
failedrequest/telegraf-plugins
|
9cda0612a912f219fa84724f12af1f428483a37a
|
[
"BSD-2-Clause"
] | null | null | null |
FreeBSD/bsd_netstats_poller.py
|
failedrequest/telegraf-plugins
|
9cda0612a912f219fa84724f12af1f428483a37a
|
[
"BSD-2-Clause"
] | null | null | null |
FreeBSD/bsd_netstats_poller.py
|
failedrequest/telegraf-plugins
|
9cda0612a912f219fa84724f12af1f428483a37a
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python3
# 3/21/2021
# Updated for python3
# A Simple sysctl to telegraf plugin for freebsd's netstats ip info
from freebsd_sysctl import Sysctl as sysctl
import subprocess as sp
import re
import json
import sys
import pprint as pp
hostname = sysctl("kern.hostname").value
netstat_data = {}
points_netstat = {}
netstat_output = sp.check_output(["netstat", "-s", "-p", "ip", "--libxo", "json", "/dev/null"],universal_newlines=True)
netstat_data = json.loads(netstat_output)
for x in netstat_data["statistics"]:
for k,v in netstat_data["statistics"][x].items():
points_netstat[k] = v
def points_to_influx(points):
field_tags= ",".join(["{k}={v}".format(k=str(x[0]), v=x[1]) for x in list(points_netstat.items())])
print(("bsd_netstat,type=netstat {}").format(field_tags))
points_to_influx(points_netstat)
| 22.289474
| 119
| 0.709563
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 248
| 0.292798
|
a1730ed2d00a6babe52f239de2d480281d939967
| 13,395
|
py
|
Python
|
ixnetwork_restpy/testplatform/sessions/ixnetwork/topology/packetinlist_10d8adb40e4e05f4b37904f2c6428ca9.py
|
OpenIxia/ixnetwork_restpy
|
f628db450573a104f327cf3c737ca25586e067ae
|
[
"MIT"
] | 20
|
2019-05-07T01:59:14.000Z
|
2022-02-11T05:24:47.000Z
|
ixnetwork_restpy/testplatform/sessions/ixnetwork/topology/packetinlist_10d8adb40e4e05f4b37904f2c6428ca9.py
|
OpenIxia/ixnetwork_restpy
|
f628db450573a104f327cf3c737ca25586e067ae
|
[
"MIT"
] | 60
|
2019-04-03T18:59:35.000Z
|
2022-02-22T12:05:05.000Z
|
ixnetwork_restpy/testplatform/sessions/ixnetwork/topology/packetinlist_10d8adb40e4e05f4b37904f2c6428ca9.py
|
OpenIxia/ixnetwork_restpy
|
f628db450573a104f327cf3c737ca25586e067ae
|
[
"MIT"
] | 13
|
2019-05-20T10:48:31.000Z
|
2021-10-06T07:45:44.000Z
|
# MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
from typing import List, Any, Union
class PacketInList(Base):
"""Openflow Switch PacketIn Configuration
The PacketInList class encapsulates a list of packetInList resources that are managed by the system.
A list of resources can be retrieved from the server using the PacketInList.find() method.
"""
__slots__ = ()
_SDM_NAME = 'packetInList'
_SDM_ATT_MAP = {
'AuxiliaryId': 'auxiliaryId',
'Count': 'count',
'DescriptiveName': 'descriptiveName',
'FlowTable': 'flowTable',
'InPort': 'inPort',
'Name': 'name',
'PacketInName': 'packetInName',
'PhysicalInPort': 'physicalInPort',
'SendPacketIn': 'sendPacketIn',
'SwitchName': 'switchName',
}
_SDM_ENUM_MAP = {
}
def __init__(self, parent, list_op=False):
super(PacketInList, self).__init__(parent, list_op)
@property
def AuxiliaryId(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): The identifier for auxiliary connections.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AuxiliaryId']))
@property
def Count(self):
# type: () -> int
"""
Returns
-------
- number: Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
"""
return self._get_attribute(self._SDM_ATT_MAP['Count'])
@property
def DescriptiveName(self):
# type: () -> str
"""
Returns
-------
- str: Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.
"""
return self._get_attribute(self._SDM_ATT_MAP['DescriptiveName'])
@property
def FlowTable(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): If selected, the Switch looks up for each PacketIn configured in the Flow Table.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FlowTable']))
@property
def InPort(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): The Switch Port on which, this Packet has come.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['InPort']))
@property
def Name(self):
# type: () -> str
"""
Returns
-------
- str: Name of NGPF element, guaranteed to be unique in Scenario
"""
return self._get_attribute(self._SDM_ATT_MAP['Name'])
@Name.setter
def Name(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['Name'], value)
@property
def PacketInName(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): The description of the packet-in.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['PacketInName']))
@property
def PhysicalInPort(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): The physical In port value for this PacketIn range. It is the underlying physical port when packet is received on a logical port.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['PhysicalInPort']))
@property
def SendPacketIn(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): If selected, the Switch starts sending PacketIn messages when the session comes up.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SendPacketIn']))
@property
def SwitchName(self):
# type: () -> str
"""
Returns
-------
- str: Parent Switch Name
"""
return self._get_attribute(self._SDM_ATT_MAP['SwitchName'])
def update(self, Name=None):
# type: (str) -> PacketInList
"""Updates packetInList resource on the server.
This method has some named parameters with a type: obj (Multivalue).
The Multivalue class has documentation that details the possible values for those named parameters.
Args
----
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def add(self, Name=None):
# type: (str) -> PacketInList
"""Adds a new packetInList resource on the json, only valid with config assistant
Args
----
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
Returns
-------
- self: This instance with all currently retrieved packetInList resources using find and the newly added packetInList resources available through an iterator or index
Raises
------
- Exception: if this function is not being used with config assistance
"""
return self._add_xpath(self._map_locals(self._SDM_ATT_MAP, locals()))
def find(self, Count=None, DescriptiveName=None, Name=None, SwitchName=None):
# type: (int, str, str, str) -> PacketInList
"""Finds and retrieves packetInList resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve packetInList resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all packetInList resources from the server.
Args
----
- Count (number): Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
- DescriptiveName (str): Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
- SwitchName (str): Parent Switch Name
Returns
-------
- self: This instance with matching packetInList resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of packetInList data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the packetInList resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
def SendPause(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[List[str], None]
"""Executes the sendPause operation on the server.
Pause Sending PacketIn
sendPause(Arg2=list, async_operation=bool)list
----------------------------------------------
- Arg2 (list(number)): List of PacketIn.
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns list(str): ID to associate each async action invocation
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('sendPause', payload=payload, response_object=None)
def SendStart(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[List[str], None]
"""Executes the sendStart operation on the server.
Start Sending PacketIn
sendStart(Arg2=list, async_operation=bool)list
----------------------------------------------
- Arg2 (list(number)): List of PacketIn.
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns list(str): ID to associate each async action invocation
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('sendStart', payload=payload, response_object=None)
def SendStop(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[List[str], None]
"""Executes the sendStop operation on the server.
Stop Sending PacketIn
sendStop(Arg2=list, async_operation=bool)list
---------------------------------------------
- Arg2 (list(number)): List of PacketIn.
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns list(str): ID to associate each async action invocation
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('sendStop', payload=payload, response_object=None)
def get_device_ids(self, PortNames=None, AuxiliaryId=None, FlowTable=None, InPort=None, PacketInName=None, PhysicalInPort=None, SendPacketIn=None):
"""Base class infrastructure that gets a list of packetInList device ids encapsulated by this object.
Use the optional regex parameters in the method to refine the list of device ids encapsulated by this object.
Args
----
- PortNames (str): optional regex of port names
- AuxiliaryId (str): optional regex of auxiliaryId
- FlowTable (str): optional regex of flowTable
- InPort (str): optional regex of inPort
- PacketInName (str): optional regex of packetInName
- PhysicalInPort (str): optional regex of physicalInPort
- SendPacketIn (str): optional regex of sendPacketIn
Returns
-------
- list(int): A list of device ids that meets the regex criteria provided in the method parameters
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._get_ngpf_device_ids(locals())
| 40.468278
| 193
| 0.642255
| 12,161
| 0.907876
| 0
| 0
| 3,618
| 0.270101
| 0
| 0
| 9,597
| 0.716461
|
a173546fb4be8c1b52e29b792d62de5b919bbc8f
| 97
|
py
|
Python
|
Python/Phani.py
|
baroood/Hacktoberfest-2k17
|
87383df4bf705358866a5a4120dd678a3f2acd3e
|
[
"MIT"
] | 28
|
2017-10-04T19:42:26.000Z
|
2021-03-26T04:00:48.000Z
|
Python/Phani.py
|
baroood/Hacktoberfest-2k17
|
87383df4bf705358866a5a4120dd678a3f2acd3e
|
[
"MIT"
] | 375
|
2017-09-28T02:58:37.000Z
|
2019-10-31T09:10:38.000Z
|
Python/Phani.py
|
baroood/Hacktoberfest-2k17
|
87383df4bf705358866a5a4120dd678a3f2acd3e
|
[
"MIT"
] | 519
|
2017-09-28T02:40:29.000Z
|
2021-02-15T08:29:17.000Z
|
a = input("Enter the first number")
b = input("Enter the second number")
print('the sum is',a+b)
| 24.25
| 36
| 0.680412
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 61
| 0.628866
|
a1735e027f0563b68478c5ef69b57c79d02303e9
| 1,108
|
py
|
Python
|
servicecatalog_factory/constants_test.py
|
micwha/aws-service-catalog-factory
|
c50a922d64e3d47fd56dbe261d841d81f872f0fb
|
[
"Apache-2.0"
] | null | null | null |
servicecatalog_factory/constants_test.py
|
micwha/aws-service-catalog-factory
|
c50a922d64e3d47fd56dbe261d841d81f872f0fb
|
[
"Apache-2.0"
] | null | null | null |
servicecatalog_factory/constants_test.py
|
micwha/aws-service-catalog-factory
|
c50a922d64e3d47fd56dbe261d841d81f872f0fb
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
from pytest import fixture
@fixture
def sut():
from servicecatalog_factory import constants
return constants
def test_bootstrap_stack_name(sut):
# setup
expected_result = "servicecatalog-factory"
# execute
# verify
assert sut.BOOTSTRAP_STACK_NAME == expected_result
def test_service_catalog_factory_repo_name(sut):
# setup
expected_result = "ServiceCatalogFactory"
# execute
# verify
assert sut.SERVICE_CATALOG_FACTORY_REPO_NAME == expected_result
def test_non_recoverable_states(sut):
# setup
expected_result = [
"ROLLBACK_COMPLETE",
"CREATE_IN_PROGRESS",
"ROLLBACK_IN_PROGRESS",
"DELETE_IN_PROGRESS",
"UPDATE_IN_PROGRESS",
"UPDATE_COMPLETE_CLEANUP_IN_PROGRESS",
"UPDATE_ROLLBACK_IN_PROGRESS",
"UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS",
"REVIEW_IN_PROGRESS",
]
# execute
# verify
assert sut.NON_RECOVERABLE_STATES == expected_result
| 22.612245
| 73
| 0.712094
| 0
| 0
| 0
| 0
| 90
| 0.081227
| 0
| 0
| 462
| 0.416968
|
a173f091bd6a84a9640f8e5bfa3ab824665803fb
| 1,038
|
py
|
Python
|
django/contrib/contenttypes/tests/models.py
|
benjaoming/django
|
6dbe979b4d9396e1b307c7d27388c97c13beb21c
|
[
"BSD-3-Clause"
] | 2
|
2015-01-21T15:45:07.000Z
|
2015-02-21T02:38:13.000Z
|
env/lib/python2.7/site-packages/django/contrib/contenttypes/tests/models.py
|
luiscarlosgph/nas
|
e5acee61e8bbf12c34785fe971ce7df8dee775d4
|
[
"MIT"
] | 10
|
2019-12-26T17:31:31.000Z
|
2022-03-21T22:17:33.000Z
|
env/lib/python2.7/site-packages/django/contrib/contenttypes/tests/models.py
|
luiscarlosgph/nas
|
e5acee61e8bbf12c34785fe971ce7df8dee775d4
|
[
"MIT"
] | 1
|
2020-05-25T08:55:19.000Z
|
2020-05-25T08:55:19.000Z
|
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.http import urlquote
class ConcreteModel(models.Model):
name = models.CharField(max_length=10)
class ProxyModel(ConcreteModel):
class Meta:
proxy = True
@python_2_unicode_compatible
class FooWithoutUrl(models.Model):
"""
Fake model not defining ``get_absolute_url`` for
ContentTypesTests.test_shortcut_view_without_get_absolute_url()
"""
name = models.CharField(max_length=30, unique=True)
def __str__(self):
return self.name
class FooWithUrl(FooWithoutUrl):
"""
Fake model defining ``get_absolute_url`` for
ContentTypesTests.test_shortcut_view().
"""
def get_absolute_url(self):
return "/users/%s/" % urlquote(self.name)
class FooWithBrokenAbsoluteUrl(FooWithoutUrl):
"""
Fake model defining a ``get_absolute_url`` method containing an error
"""
def get_absolute_url(self):
return "/users/%s/" % self.unknown_field
| 23.590909
| 73
| 0.716763
| 864
| 0.83237
| 0
| 0
| 305
| 0.293834
| 0
| 0
| 345
| 0.33237
|
a174909b1f9a6d386413fccc83ffd4e52629d864
| 75,049
|
py
|
Python
|
tests/unit/utils/test_docker.py
|
springborland/salt
|
bee85e477d57e9a171884e54fefb9a59d0835ed0
|
[
"Apache-2.0"
] | 1
|
2020-04-09T03:25:10.000Z
|
2020-04-09T03:25:10.000Z
|
tests/unit/utils/test_docker.py
|
springborland/salt
|
bee85e477d57e9a171884e54fefb9a59d0835ed0
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/utils/test_docker.py
|
springborland/salt
|
bee85e477d57e9a171884e54fefb9a59d0835ed0
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
tests.unit.utils.test_docker
============================
Test the funcs in salt.utils.docker and salt.utils.docker.translate
"""
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import copy
import functools
import logging
import os
# Import salt libs
import salt.config
import salt.loader
import salt.utils.docker.translate.container
import salt.utils.docker.translate.network
import salt.utils.platform
from salt.exceptions import CommandExecutionError
# Import 3rd-party libs
from salt.ext import six
from salt.utils.docker.translate import helpers as translate_helpers
# Import Salt Testing Libs
from tests.support.unit import TestCase
log = logging.getLogger(__name__)
class Assert(object):
def __init__(self, translator):
self.translator = translator
def __call__(self, func):
self.func = func
return functools.wraps(func)(
# pylint: disable=unnecessary-lambda
lambda testcase, *args, **kwargs: self.wrap(testcase, *args, **kwargs)
# pylint: enable=unnecessary-lambda
)
def wrap(self, *args, **kwargs):
raise NotImplementedError
def test_stringlist(self, testcase, name):
alias = self.translator.ALIASES_REVMAP.get(name)
# Using file paths here because "volumes" must be passed through this
# set of assertions and it requires absolute paths.
if salt.utils.platform.is_windows():
data = [r"c:\foo", r"c:\bar", r"c:\baz"]
else:
data = ["/foo", "/bar", "/baz"]
for item in (name, alias):
if item is None:
continue
testcase.assertEqual(
salt.utils.docker.translate_input(
self.translator, **{item: ",".join(data)}
),
testcase.apply_defaults({name: data}),
)
testcase.assertEqual(
salt.utils.docker.translate_input(self.translator, **{item: data}),
testcase.apply_defaults({name: data}),
)
if name != "volumes":
# Test coercing to string
testcase.assertEqual(
salt.utils.docker.translate_input(
self.translator, **{item: ["one", 2]}
),
testcase.apply_defaults({name: ["one", "2"]}),
)
if alias is not None:
# Test collision
# sorted() used here because we want to confirm that we discard the
# alias' value and go with the unsorted version.
test_kwargs = {name: data, alias: sorted(data)}
testcase.assertEqual(
salt.utils.docker.translate_input(
self.translator, ignore_collisions=True, **test_kwargs
),
testcase.apply_defaults({name: test_kwargs[name]}),
)
with testcase.assertRaisesRegex(
CommandExecutionError, "is an alias for.+cannot both be used"
):
salt.utils.docker.translate_input(
self.translator, ignore_collisions=False, **test_kwargs
)
def test_key_value(self, testcase, name, delimiter):
"""
Common logic for key/value pair testing. IP address validation is
turned off here, and must be done separately in the wrapped function.
"""
alias = self.translator.ALIASES_REVMAP.get(name)
expected = {"foo": "bar", "baz": "qux"}
vals = "foo{0}bar,baz{0}qux".format(delimiter)
for item in (name, alias):
if item is None:
continue
for val in (vals, vals.split(",")):
testcase.assertEqual(
salt.utils.docker.translate_input(
self.translator, validate_ip_addrs=False, **{item: val}
),
testcase.apply_defaults({name: expected}),
)
# Dictionary input
testcase.assertEqual(
salt.utils.docker.translate_input(
self.translator, validate_ip_addrs=False, **{item: expected}
),
testcase.apply_defaults({name: expected}),
)
# "Dictlist" input from states
testcase.assertEqual(
salt.utils.docker.translate_input(
self.translator,
validate_ip_addrs=False,
**{item: [{"foo": "bar"}, {"baz": "qux"}]}
),
testcase.apply_defaults({name: expected}),
)
if alias is not None:
# Test collision
test_kwargs = {name: vals, alias: "hello{0}world".format(delimiter)}
testcase.assertEqual(
salt.utils.docker.translate_input(
self.translator,
validate_ip_addrs=False,
ignore_collisions=True,
**test_kwargs
),
testcase.apply_defaults({name: expected}),
)
with testcase.assertRaisesRegex(
CommandExecutionError, "is an alias for.+cannot both be used"
):
salt.utils.docker.translate_input(
self.translator,
validate_ip_addrs=False,
ignore_collisions=False,
**test_kwargs
)
class assert_bool(Assert):
"""
Test a boolean value
"""
def wrap(self, testcase, *args, **kwargs): # pylint: disable=arguments-differ
# Strip off the "test_" from the function name
name = self.func.__name__[5:]
alias = self.translator.ALIASES_REVMAP.get(name)
for item in (name, alias):
if item is None:
continue
testcase.assertEqual(
salt.utils.docker.translate_input(self.translator, **{item: True}),
testcase.apply_defaults({name: True}),
)
# These two are contrived examples, but they will test bool-ifying
# a non-bool value to ensure proper input format.
testcase.assertEqual(
salt.utils.docker.translate_input(self.translator, **{item: "foo"}),
testcase.apply_defaults({name: True}),
)
testcase.assertEqual(
salt.utils.docker.translate_input(self.translator, **{item: 0}),
testcase.apply_defaults({name: False}),
)
if alias is not None:
# Test collision
test_kwargs = {name: True, alias: False}
testcase.assertEqual(
salt.utils.docker.translate_input(
self.translator, ignore_collisions=True, **test_kwargs
),
testcase.apply_defaults({name: test_kwargs[name]}),
)
with testcase.assertRaisesRegex(
CommandExecutionError, "is an alias for.+cannot both be used"
):
salt.utils.docker.translate_input(
self.translator, ignore_collisions=False, **test_kwargs
)
return self.func(testcase, *args, **kwargs)
class assert_int(Assert):
"""
Test an integer value
"""
def wrap(self, testcase, *args, **kwargs): # pylint: disable=arguments-differ
# Strip off the "test_" from the function name
name = self.func.__name__[5:]
alias = self.translator.ALIASES_REVMAP.get(name)
for item in (name, alias):
if item is None:
continue
for val in (100, "100"):
testcase.assertEqual(
salt.utils.docker.translate_input(self.translator, **{item: val}),
testcase.apply_defaults({name: 100}),
)
# Error case: non-numeric value passed
with testcase.assertRaisesRegex(
CommandExecutionError, "'foo' is not an integer"
):
salt.utils.docker.translate_input(self.translator, **{item: "foo"})
if alias is not None:
# Test collision
test_kwargs = {name: 100, alias: 200}
testcase.assertEqual(
salt.utils.docker.translate_input(
self.translator, ignore_collisions=True, **test_kwargs
),
testcase.apply_defaults({name: test_kwargs[name]}),
)
with testcase.assertRaisesRegex(
CommandExecutionError, "is an alias for.+cannot both be used"
):
salt.utils.docker.translate_input(
self.translator, ignore_collisions=False, **test_kwargs
)
return self.func(testcase, *args, **kwargs)
class assert_string(Assert):
"""
Test that item is a string or is converted to one
"""
def wrap(self, testcase, *args, **kwargs): # pylint: disable=arguments-differ
# Strip off the "test_" from the function name
name = self.func.__name__[5:]
alias = self.translator.ALIASES_REVMAP.get(name)
# Using file paths here because "working_dir" must be passed through
# this set of assertions and it requires absolute paths.
if salt.utils.platform.is_windows():
data = r"c:\foo"
else:
data = "/foo"
for item in (name, alias):
if item is None:
continue
testcase.assertEqual(
salt.utils.docker.translate_input(self.translator, **{item: data}),
testcase.apply_defaults({name: data}),
)
if name != "working_dir":
# Test coercing to string
testcase.assertEqual(
salt.utils.docker.translate_input(self.translator, **{item: 123}),
testcase.apply_defaults({name: "123"}),
)
if alias is not None:
# Test collision
test_kwargs = {name: data, alias: data}
testcase.assertEqual(
salt.utils.docker.translate_input(
self.translator, ignore_collisions=True, **test_kwargs
),
testcase.apply_defaults({name: test_kwargs[name]}),
)
with testcase.assertRaisesRegex(
CommandExecutionError, "is an alias for.+cannot both be used"
):
salt.utils.docker.translate_input(
self.translator, ignore_collisions=False, **test_kwargs
)
return self.func(testcase, *args, **kwargs)
class assert_int_or_string(Assert):
"""
Test an integer or string value
"""
def wrap(self, testcase, *args, **kwargs): # pylint: disable=arguments-differ
# Strip off the "test_" from the function name
name = self.func.__name__[5:]
alias = self.translator.ALIASES_REVMAP.get(name)
for item in (name, alias):
if item is None:
continue
testcase.assertEqual(
salt.utils.docker.translate_input(self.translator, **{item: 100}),
testcase.apply_defaults({name: 100}),
)
testcase.assertEqual(
salt.utils.docker.translate_input(self.translator, **{item: "100M"}),
testcase.apply_defaults({name: "100M"}),
)
if alias is not None:
# Test collision
test_kwargs = {name: 100, alias: "100M"}
testcase.assertEqual(
salt.utils.docker.translate_input(
self.translator, ignore_collisions=True, **test_kwargs
),
testcase.apply_defaults({name: test_kwargs[name]}),
)
with testcase.assertRaisesRegex(
CommandExecutionError, "is an alias for.+cannot both be used"
):
salt.utils.docker.translate_input(
self.translator, ignore_collisions=False, **test_kwargs
)
return self.func(testcase, *args, **kwargs)
class assert_stringlist(Assert):
"""
Test a comma-separated or Python list of strings
"""
def wrap(self, testcase, *args, **kwargs): # pylint: disable=arguments-differ
# Strip off the "test_" from the function name
name = self.func.__name__[5:]
self.test_stringlist(testcase, name)
return self.func(testcase, *args, **kwargs)
class assert_dict(Assert):
"""
Dictionaries should be untouched, dictlists should be repacked and end up
as a single dictionary.
"""
def wrap(self, testcase, *args, **kwargs): # pylint: disable=arguments-differ
# Strip off the "test_" from the function name
name = self.func.__name__[5:]
alias = self.translator.ALIASES_REVMAP.get(name)
expected = {"foo": "bar", "baz": "qux"}
for item in (name, alias):
if item is None:
continue
testcase.assertEqual(
salt.utils.docker.translate_input(self.translator, **{item: expected}),
testcase.apply_defaults({name: expected}),
)
# "Dictlist" input from states
testcase.assertEqual(
salt.utils.docker.translate_input(
self.translator,
**{item: [{x: y} for x, y in six.iteritems(expected)]}
),
testcase.apply_defaults({name: expected}),
)
# Error case: non-dictionary input
with testcase.assertRaisesRegex(
CommandExecutionError, "'foo' is not a dictionary"
):
salt.utils.docker.translate_input(self.translator, **{item: "foo"})
if alias is not None:
# Test collision
test_kwargs = {name: "foo", alias: "bar"}
testcase.assertEqual(
salt.utils.docker.translate_input(
self.translator, ignore_collisions=True, **test_kwargs
),
testcase.apply_defaults({name: test_kwargs[name]}),
)
with testcase.assertRaisesRegex(
CommandExecutionError, "is an alias for.+cannot both be used"
):
salt.utils.docker.translate_input(
self.translator, ignore_collisions=False, **test_kwargs
)
return self.func(testcase, *args, **kwargs)
class assert_cmd(Assert):
"""
Test for a string, or a comma-separated or Python list of strings. This is
different from a stringlist in that we do not do any splitting. This
decorator is used both by the "command" and "entrypoint" arguments.
"""
def wrap(self, testcase, *args, **kwargs): # pylint: disable=arguments-differ
# Strip off the "test_" from the function name
name = self.func.__name__[5:]
alias = self.translator.ALIASES_REVMAP.get(name)
for item in (name, alias):
if item is None:
continue
testcase.assertEqual(
salt.utils.docker.translate_input(self.translator, **{item: "foo bar"}),
testcase.apply_defaults({name: "foo bar"}),
)
testcase.assertEqual(
salt.utils.docker.translate_input(
self.translator, **{item: ["foo", "bar"]}
),
testcase.apply_defaults({name: ["foo", "bar"]}),
)
# Test coercing to string
testcase.assertEqual(
salt.utils.docker.translate_input(self.translator, **{item: 123}),
testcase.apply_defaults({name: "123"}),
)
testcase.assertEqual(
salt.utils.docker.translate_input(
self.translator, **{item: ["one", 2]}
),
testcase.apply_defaults({name: ["one", "2"]}),
)
if alias is not None:
# Test collision
test_kwargs = {name: "foo", alias: "bar"}
testcase.assertEqual(
salt.utils.docker.translate_input(
self.translator, ignore_collisions=True, **test_kwargs
),
testcase.apply_defaults({name: test_kwargs[name]}),
)
with testcase.assertRaisesRegex(
CommandExecutionError, "is an alias for.+cannot both be used"
):
salt.utils.docker.translate_input(
self.translator, ignore_collisions=False, **test_kwargs
)
return self.func(testcase, *args, **kwargs)
class assert_key_colon_value(Assert):
"""
Test a key/value pair with parameters passed as key:value pairs
"""
def wrap(self, testcase, *args, **kwargs): # pylint: disable=arguments-differ
# Strip off the "test_" from the function name
name = self.func.__name__[5:]
self.test_key_value(testcase, name, ":")
return self.func(testcase, *args, **kwargs)
class assert_key_equals_value(Assert):
"""
Test a key/value pair with parameters passed as key=value pairs
"""
def wrap(self, testcase, *args, **kwargs): # pylint: disable=arguments-differ
# Strip off the "test_" from the function name
name = self.func.__name__[5:]
self.test_key_value(testcase, name, "=")
if name == "labels":
self.test_stringlist(testcase, name)
return self.func(testcase, *args, **kwargs)
class assert_labels(Assert):
def wrap(self, testcase, *args, **kwargs): # pylint: disable=arguments-differ
# Strip off the "test_" from the function name
name = self.func.__name__[5:]
alias = self.translator.ALIASES_REVMAP.get(name)
labels = ["foo", "bar=baz", {"hello": "world"}]
expected = {"foo": "", "bar": "baz", "hello": "world"}
for item in (name, alias):
if item is None:
continue
testcase.assertEqual(
salt.utils.docker.translate_input(self.translator, **{item: labels}),
testcase.apply_defaults({name: expected}),
)
# Error case: Passed a mutli-element dict in dictlist
bad_labels = copy.deepcopy(labels)
bad_labels[-1]["bad"] = "input"
with testcase.assertRaisesRegex(
CommandExecutionError, r"Invalid label\(s\)"
):
salt.utils.docker.translate_input(self.translator, **{item: bad_labels})
return self.func(testcase, *args, **kwargs)
class assert_device_rates(Assert):
"""
Tests for device_{read,write}_{bps,iops}. The bps values have a "Rate"
value expressed in bytes/kb/mb/gb, while the iops values have a "Rate"
expressed as a simple integer.
"""
def wrap(self, testcase, *args, **kwargs): # pylint: disable=arguments-differ
# Strip off the "test_" from the function name
name = self.func.__name__[5:]
alias = self.translator.ALIASES_REVMAP.get(name)
for item in (name, alias):
if item is None:
continue
# Error case: Not an absolute path
path = os.path.join("foo", "bar", "baz")
with testcase.assertRaisesRegex(
CommandExecutionError,
"Path '{0}' is not absolute".format(path.replace("\\", "\\\\")),
):
salt.utils.docker.translate_input(
self.translator, **{item: "{0}:1048576".format(path)}
)
if name.endswith("_bps"):
# Both integer bytes and a string providing a shorthand for kb,
# mb, or gb can be used, so we need to test for both.
expected = ({}, [])
vals = "/dev/sda:1048576,/dev/sdb:1048576"
for val in (vals, vals.split(",")):
testcase.assertEqual(
salt.utils.docker.translate_input(
self.translator, **{item: val}
),
testcase.apply_defaults(
{
name: [
{"Path": "/dev/sda", "Rate": 1048576},
{"Path": "/dev/sdb", "Rate": 1048576},
]
}
),
)
vals = "/dev/sda:1mb,/dev/sdb:5mb"
for val in (vals, vals.split(",")):
testcase.assertEqual(
salt.utils.docker.translate_input(
self.translator, **{item: val}
),
testcase.apply_defaults(
{
name: [
{"Path": "/dev/sda", "Rate": "1mb"},
{"Path": "/dev/sdb", "Rate": "5mb"},
]
}
),
)
if alias is not None:
# Test collision
test_kwargs = {
name: "/dev/sda:1048576,/dev/sdb:1048576",
alias: "/dev/sda:1mb,/dev/sdb:5mb",
}
testcase.assertEqual(
salt.utils.docker.translate_input(
self.translator, ignore_collisions=True, **test_kwargs
),
testcase.apply_defaults(
{
name: [
{"Path": "/dev/sda", "Rate": 1048576},
{"Path": "/dev/sdb", "Rate": 1048576},
]
}
),
)
with testcase.assertRaisesRegex(
CommandExecutionError, "is an alias for.+cannot both be used"
):
salt.utils.docker.translate_input(
self.translator, ignore_collisions=False, **test_kwargs
)
else:
# The "Rate" value must be an integer
vals = "/dev/sda:1000,/dev/sdb:500"
for val in (vals, vals.split(",")):
testcase.assertEqual(
salt.utils.docker.translate_input(
self.translator, **{item: val}
),
testcase.apply_defaults(
{
name: [
{"Path": "/dev/sda", "Rate": 1000},
{"Path": "/dev/sdb", "Rate": 500},
]
}
),
)
# Test non-integer input
expected = (
{},
{item: "Rate '5mb' for path '/dev/sdb' is non-numeric"},
[],
)
vals = "/dev/sda:1000,/dev/sdb:5mb"
for val in (vals, vals.split(",")):
with testcase.assertRaisesRegex(
CommandExecutionError,
"Rate '5mb' for path '/dev/sdb' is non-numeric",
):
salt.utils.docker.translate_input(
self.translator, **{item: val}
)
if alias is not None:
# Test collision
test_kwargs = {
name: "/dev/sda:1000,/dev/sdb:500",
alias: "/dev/sda:888,/dev/sdb:999",
}
testcase.assertEqual(
salt.utils.docker.translate_input(
self.translator, ignore_collisions=True, **test_kwargs
),
testcase.apply_defaults(
{
name: [
{"Path": "/dev/sda", "Rate": 1000},
{"Path": "/dev/sdb", "Rate": 500},
]
}
),
)
with testcase.assertRaisesRegex(
CommandExecutionError, "is an alias for.+cannot both be used"
):
salt.utils.docker.translate_input(
self.translator, ignore_collisions=False, **test_kwargs
)
return self.func(testcase, *args, **kwargs)
class assert_subnet(Assert):
"""
Test an IPv4 or IPv6 subnet
"""
def wrap(self, testcase, *args, **kwargs): # pylint: disable=arguments-differ
# Strip off the "test_" from the function name
name = self.func.__name__[5:]
alias = self.translator.ALIASES_REVMAP.get(name)
for item in (name, alias):
if item is None:
continue
for val in ("127.0.0.1/32", "::1/128"):
log.debug("Verifying '%s' is a valid subnet", val)
testcase.assertEqual(
salt.utils.docker.translate_input(
self.translator, validate_ip_addrs=True, **{item: val}
),
testcase.apply_defaults({name: val}),
)
# Error case: invalid subnet caught by validation
for val in (
"127.0.0.1",
"999.999.999.999/24",
"10.0.0.0/33",
"::1",
"feaz::1/128",
"::1/129",
):
log.debug("Verifying '%s' is not a valid subnet", val)
with testcase.assertRaisesRegex(
CommandExecutionError, "'{0}' is not a valid subnet".format(val)
):
salt.utils.docker.translate_input(
self.translator, validate_ip_addrs=True, **{item: val}
)
# This is not valid input but it will test whether or not subnet
# validation happened
val = "foo"
testcase.assertEqual(
salt.utils.docker.translate_input(
self.translator, validate_ip_addrs=False, **{item: val}
),
testcase.apply_defaults({name: val}),
)
if alias is not None:
# Test collision
test_kwargs = {name: "10.0.0.0/24", alias: "192.168.50.128/25"}
testcase.assertEqual(
salt.utils.docker.translate_input(
self.translator, ignore_collisions=True, **test_kwargs
),
testcase.apply_defaults({name: test_kwargs[name]}),
)
with testcase.assertRaisesRegex(
CommandExecutionError, "is an alias for.+cannot both be used"
):
salt.utils.docker.translate_input(
self.translator, ignore_collisions=False, **test_kwargs
)
return self.func(testcase, *args, **kwargs)
class TranslateBase(TestCase):
maxDiff = None
translator = None # Must be overridden in the subclass
def apply_defaults(self, ret, skip_translate=None):
if skip_translate is not True:
defaults = getattr(self.translator, "DEFAULTS", {})
for key, val in six.iteritems(defaults):
if key not in ret:
ret[key] = val
return ret
@staticmethod
def normalize_ports(ret):
"""
When we translate exposed ports, we can end up with a mixture of ints
(representing TCP ports) and tuples (representing UDP ports). Python 2
will sort an iterable containing these mixed types, but Python 3 will
not. This helper is used to munge the ports in the return data so that
the resulting list is sorted in a way that can reliably be compared to
the expected results in the test.
This helper should only be needed for port_bindings and ports.
"""
if "ports" in ret[0]:
tcp_ports = []
udp_ports = []
for item in ret[0]["ports"]:
if isinstance(item, six.integer_types):
tcp_ports.append(item)
else:
udp_ports.append(item)
ret[0]["ports"] = sorted(tcp_ports) + sorted(udp_ports)
return ret
def tearDown(self):
"""
Test skip_translate kwarg
"""
name = self.id().split(".")[-1][5:]
# The below is not valid input for the Docker API, but these
# assertions confirm that we successfully skipped translation.
for val in (True, name, [name]):
self.assertEqual(
salt.utils.docker.translate_input(
self.translator, skip_translate=val, **{name: "foo"}
),
self.apply_defaults({name: "foo"}, skip_translate=val),
)
class TranslateContainerInputTestCase(TranslateBase):
"""
Tests for salt.utils.docker.translate_input(), invoked using
salt.utils.docker.translate.container as the translator module.
"""
translator = salt.utils.docker.translate.container
@staticmethod
def normalize_ports(ret):
"""
When we translate exposed ports, we can end up with a mixture of ints
(representing TCP ports) and tuples (representing UDP ports). Python 2
will sort an iterable containing these mixed types, but Python 3 will
not. This helper is used to munge the ports in the return data so that
the resulting list is sorted in a way that can reliably be compared to
the expected results in the test.
This helper should only be needed for port_bindings and ports.
"""
if "ports" in ret:
tcp_ports = []
udp_ports = []
for item in ret["ports"]:
if isinstance(item, six.integer_types):
tcp_ports.append(item)
else:
udp_ports.append(item)
ret["ports"] = sorted(tcp_ports) + sorted(udp_ports)
return ret
@assert_bool(salt.utils.docker.translate.container)
def test_auto_remove(self):
"""
Should be a bool or converted to one
"""
def test_binds(self):
"""
Test the "binds" kwarg. Any volumes not defined in the "volumes" kwarg
should be added to the results.
"""
self.assertEqual(
salt.utils.docker.translate_input(
self.translator, binds="/srv/www:/var/www:ro", volumes="/testing"
),
{"binds": ["/srv/www:/var/www:ro"], "volumes": ["/testing", "/var/www"]},
)
self.assertEqual(
salt.utils.docker.translate_input(
self.translator, binds=["/srv/www:/var/www:ro"], volumes="/testing"
),
{"binds": ["/srv/www:/var/www:ro"], "volumes": ["/testing", "/var/www"]},
)
self.assertEqual(
salt.utils.docker.translate_input(
self.translator,
binds={"/srv/www": {"bind": "/var/www", "mode": "ro"}},
volumes="/testing",
),
{
"binds": {"/srv/www": {"bind": "/var/www", "mode": "ro"}},
"volumes": ["/testing", "/var/www"],
},
)
@assert_int(salt.utils.docker.translate.container)
def test_blkio_weight(self):
"""
Should be an int or converted to one
"""
def test_blkio_weight_device(self):
"""
Should translate a list of PATH:WEIGHT pairs to a list of dictionaries
with the following format: {'Path': PATH, 'Weight': WEIGHT}
"""
for val in ("/dev/sda:100,/dev/sdb:200", ["/dev/sda:100", "/dev/sdb:200"]):
self.assertEqual(
salt.utils.docker.translate_input(
self.translator, blkio_weight_device="/dev/sda:100,/dev/sdb:200"
),
{
"blkio_weight_device": [
{"Path": "/dev/sda", "Weight": 100},
{"Path": "/dev/sdb", "Weight": 200},
]
},
)
# Error cases
with self.assertRaisesRegex(
CommandExecutionError, r"'foo' contains 1 value\(s\) \(expected 2\)"
):
salt.utils.docker.translate_input(
self.translator, blkio_weight_device="foo"
)
with self.assertRaisesRegex(
CommandExecutionError, r"'foo:bar:baz' contains 3 value\(s\) \(expected 2\)"
):
salt.utils.docker.translate_input(
self.translator, blkio_weight_device="foo:bar:baz"
)
with self.assertRaisesRegex(
CommandExecutionError, r"Weight 'foo' for path '/dev/sdb' is not an integer"
):
salt.utils.docker.translate_input(
self.translator, blkio_weight_device=["/dev/sda:100", "/dev/sdb:foo"]
)
@assert_stringlist(salt.utils.docker.translate.container)
def test_cap_add(self):
"""
Should be a list of strings or converted to one
"""
@assert_stringlist(salt.utils.docker.translate.container)
def test_cap_drop(self):
"""
Should be a list of strings or converted to one
"""
@assert_cmd(salt.utils.docker.translate.container)
def test_command(self):
"""
Can either be a string or a comma-separated or Python list of strings.
"""
@assert_string(salt.utils.docker.translate.container)
def test_cpuset_cpus(self):
"""
Should be a string or converted to one
"""
@assert_string(salt.utils.docker.translate.container)
def test_cpuset_mems(self):
"""
Should be a string or converted to one
"""
@assert_int(salt.utils.docker.translate.container)
def test_cpu_group(self):
"""
Should be an int or converted to one
"""
@assert_int(salt.utils.docker.translate.container)
def test_cpu_period(self):
"""
Should be an int or converted to one
"""
@assert_int(salt.utils.docker.translate.container)
def test_cpu_shares(self):
"""
Should be an int or converted to one
"""
@assert_bool(salt.utils.docker.translate.container)
def test_detach(self):
"""
Should be a bool or converted to one
"""
@assert_device_rates(salt.utils.docker.translate.container)
def test_device_read_bps(self):
"""
CLI input is a list of PATH:RATE pairs, but the API expects a list of
dictionaries in the format [{'Path': path, 'Rate': rate}]
"""
@assert_device_rates(salt.utils.docker.translate.container)
def test_device_read_iops(self):
"""
CLI input is a list of PATH:RATE pairs, but the API expects a list of
dictionaries in the format [{'Path': path, 'Rate': rate}]
"""
@assert_device_rates(salt.utils.docker.translate.container)
def test_device_write_bps(self):
"""
CLI input is a list of PATH:RATE pairs, but the API expects a list of
dictionaries in the format [{'Path': path, 'Rate': rate}]
"""
@assert_device_rates(salt.utils.docker.translate.container)
def test_device_write_iops(self):
"""
CLI input is a list of PATH:RATE pairs, but the API expects a list of
dictionaries in the format [{'Path': path, 'Rate': rate}]
"""
@assert_stringlist(salt.utils.docker.translate.container)
def test_devices(self):
"""
Should be a list of strings or converted to one
"""
@assert_stringlist(salt.utils.docker.translate.container)
def test_dns_opt(self):
"""
Should be a list of strings or converted to one
"""
@assert_stringlist(salt.utils.docker.translate.container)
def test_dns_search(self):
"""
Should be a list of strings or converted to one
"""
def test_dns(self):
"""
While this is a stringlist, it also supports IP address validation, so
it can't use the test_stringlist decorator because we need to test both
with and without validation, and it isn't necessary to make all other
stringlist tests also do that same kind of testing.
"""
for val in ("8.8.8.8,8.8.4.4", ["8.8.8.8", "8.8.4.4"]):
self.assertEqual(
salt.utils.docker.translate_input(
self.translator, dns=val, validate_ip_addrs=True,
),
{"dns": ["8.8.8.8", "8.8.4.4"]},
)
# Error case: invalid IP address caught by validation
for val in ("8.8.8.888,8.8.4.4", ["8.8.8.888", "8.8.4.4"]):
with self.assertRaisesRegex(
CommandExecutionError, r"'8.8.8.888' is not a valid IP address"
):
salt.utils.docker.translate_input(
self.translator, dns=val, validate_ip_addrs=True,
)
# This is not valid input but it will test whether or not IP address
# validation happened.
for val in ("foo,bar", ["foo", "bar"]):
self.assertEqual(
salt.utils.docker.translate_input(
self.translator, dns=val, validate_ip_addrs=False,
),
{"dns": ["foo", "bar"]},
)
@assert_string(salt.utils.docker.translate.container)
def test_domainname(self):
"""
Should be a list of strings or converted to one
"""
@assert_cmd(salt.utils.docker.translate.container)
def test_entrypoint(self):
"""
Can either be a string or a comma-separated or Python list of strings.
"""
@assert_key_equals_value(salt.utils.docker.translate.container)
def test_environment(self):
"""
Can be passed in several formats but must end up as a dictionary
mapping keys to values
"""
def test_extra_hosts(self):
"""
Can be passed as a list of key:value pairs but can't be simply tested
using @assert_key_colon_value since we need to test both with and without
IP address validation.
"""
for val in ("web1:10.9.8.7,web2:10.9.8.8", ["web1:10.9.8.7", "web2:10.9.8.8"]):
self.assertEqual(
salt.utils.docker.translate_input(
self.translator, extra_hosts=val, validate_ip_addrs=True,
),
{"extra_hosts": {"web1": "10.9.8.7", "web2": "10.9.8.8"}},
)
# Error case: invalid IP address caught by validation
for val in (
"web1:10.9.8.299,web2:10.9.8.8",
["web1:10.9.8.299", "web2:10.9.8.8"],
):
with self.assertRaisesRegex(
CommandExecutionError, r"'10.9.8.299' is not a valid IP address"
):
salt.utils.docker.translate_input(
self.translator, extra_hosts=val, validate_ip_addrs=True,
)
# This is not valid input but it will test whether or not IP address
# validation happened.
for val in ("foo:bar,baz:qux", ["foo:bar", "baz:qux"]):
self.assertEqual(
salt.utils.docker.translate_input(
self.translator, extra_hosts=val, validate_ip_addrs=False,
),
{"extra_hosts": {"foo": "bar", "baz": "qux"}},
)
@assert_stringlist(salt.utils.docker.translate.container)
def test_group_add(self):
"""
Should be a list of strings or converted to one
"""
@assert_string(salt.utils.docker.translate.container)
def test_hostname(self):
"""
Should be a string or converted to one
"""
@assert_string(salt.utils.docker.translate.container)
def test_ipc_mode(self):
"""
Should be a string or converted to one
"""
@assert_string(salt.utils.docker.translate.container)
def test_isolation(self):
"""
Should be a string or converted to one
"""
@assert_labels(salt.utils.docker.translate.container)
def test_labels(self):
"""
Can be passed as a list of key=value pairs or a dictionary, and must
ultimately end up as a dictionary.
"""
@assert_key_colon_value(salt.utils.docker.translate.container)
def test_links(self):
"""
Can be passed as a list of key:value pairs or a dictionary, and must
ultimately end up as a dictionary.
"""
def test_log_config(self):
"""
This is a mixture of log_driver and log_opt, which get combined into a
dictionary.
log_driver is a simple string, but log_opt can be passed in several
ways, so we need to test them all.
"""
expected = (
{"log_config": {"Type": "foo", "Config": {"foo": "bar", "baz": "qux"}}},
{},
[],
)
for val in (
"foo=bar,baz=qux",
["foo=bar", "baz=qux"],
[{"foo": "bar"}, {"baz": "qux"}],
{"foo": "bar", "baz": "qux"},
):
self.assertEqual(
salt.utils.docker.translate_input(
self.translator, log_driver="foo", log_opt="foo=bar,baz=qux"
),
{"log_config": {"Type": "foo", "Config": {"foo": "bar", "baz": "qux"}}},
)
# Ensure passing either `log_driver` or `log_opt` alone works
self.assertEqual(
salt.utils.docker.translate_input(self.translator, log_driver="foo"),
{"log_config": {"Type": "foo", "Config": {}}},
)
self.assertEqual(
salt.utils.docker.translate_input(
self.translator, log_opt={"foo": "bar", "baz": "qux"}
),
{"log_config": {"Type": "none", "Config": {"foo": "bar", "baz": "qux"}}},
)
@assert_key_equals_value(salt.utils.docker.translate.container)
def test_lxc_conf(self):
"""
Can be passed as a list of key=value pairs or a dictionary, and must
ultimately end up as a dictionary.
"""
@assert_string(salt.utils.docker.translate.container)
def test_mac_address(self):
"""
Should be a string or converted to one
"""
@assert_int_or_string(salt.utils.docker.translate.container)
def test_mem_limit(self):
"""
Should be a string or converted to one
"""
@assert_int(salt.utils.docker.translate.container)
def test_mem_swappiness(self):
"""
Should be an int or converted to one
"""
@assert_int_or_string(salt.utils.docker.translate.container)
def test_memswap_limit(self):
"""
Should be a string or converted to one
"""
@assert_string(salt.utils.docker.translate.container)
def test_name(self):
"""
Should be a string or converted to one
"""
@assert_bool(salt.utils.docker.translate.container)
def test_network_disabled(self):
"""
Should be a bool or converted to one
"""
@assert_string(salt.utils.docker.translate.container)
def test_network_mode(self):
"""
Should be a string or converted to one
"""
@assert_bool(salt.utils.docker.translate.container)
def test_oom_kill_disable(self):
"""
Should be a bool or converted to one
"""
@assert_int(salt.utils.docker.translate.container)
def test_oom_score_adj(self):
"""
Should be an int or converted to one
"""
@assert_string(salt.utils.docker.translate.container)
def test_pid_mode(self):
"""
Should be a string or converted to one
"""
@assert_int(salt.utils.docker.translate.container)
def test_pids_limit(self):
"""
Should be an int or converted to one
"""
def test_port_bindings(self):
"""
This has several potential formats and can include port ranges. It
needs its own test.
"""
# ip:hostPort:containerPort - Bind a specific IP and port on the host
# to a specific port within the container.
bindings = (
"10.1.2.3:8080:80,10.1.2.3:8888:80,10.4.5.6:3333:3333,"
"10.7.8.9:14505-14506:4505-4506,10.1.2.3:8080:81/udp,"
"10.1.2.3:8888:81/udp,10.4.5.6:3334:3334/udp,"
"10.7.8.9:15505-15506:5505-5506/udp"
)
for val in (bindings, bindings.split(",")):
self.assertEqual(
self.normalize_ports(
salt.utils.docker.translate_input(
self.translator, port_bindings=val,
)
),
{
"port_bindings": {
80: [("10.1.2.3", 8080), ("10.1.2.3", 8888)],
3333: ("10.4.5.6", 3333),
4505: ("10.7.8.9", 14505),
4506: ("10.7.8.9", 14506),
"81/udp": [("10.1.2.3", 8080), ("10.1.2.3", 8888)],
"3334/udp": ("10.4.5.6", 3334),
"5505/udp": ("10.7.8.9", 15505),
"5506/udp": ("10.7.8.9", 15506),
},
"ports": [
80,
3333,
4505,
4506,
(81, "udp"),
(3334, "udp"),
(5505, "udp"),
(5506, "udp"),
],
},
)
# ip::containerPort - Bind a specific IP and an ephemeral port to a
# specific port within the container.
bindings = (
"10.1.2.3::80,10.1.2.3::80,10.4.5.6::3333,10.7.8.9::4505-4506,"
"10.1.2.3::81/udp,10.1.2.3::81/udp,10.4.5.6::3334/udp,"
"10.7.8.9::5505-5506/udp"
)
for val in (bindings, bindings.split(",")):
self.assertEqual(
self.normalize_ports(
salt.utils.docker.translate_input(
self.translator, port_bindings=val,
)
),
{
"port_bindings": {
80: [("10.1.2.3",), ("10.1.2.3",)],
3333: ("10.4.5.6",),
4505: ("10.7.8.9",),
4506: ("10.7.8.9",),
"81/udp": [("10.1.2.3",), ("10.1.2.3",)],
"3334/udp": ("10.4.5.6",),
"5505/udp": ("10.7.8.9",),
"5506/udp": ("10.7.8.9",),
},
"ports": [
80,
3333,
4505,
4506,
(81, "udp"),
(3334, "udp"),
(5505, "udp"),
(5506, "udp"),
],
},
)
# hostPort:containerPort - Bind a specific port on all of the host's
# interfaces to a specific port within the container.
bindings = (
"8080:80,8888:80,3333:3333,14505-14506:4505-4506,8080:81/udp,"
"8888:81/udp,3334:3334/udp,15505-15506:5505-5506/udp"
)
for val in (bindings, bindings.split(",")):
self.assertEqual(
self.normalize_ports(
salt.utils.docker.translate_input(
self.translator, port_bindings=val,
)
),
{
"port_bindings": {
80: [8080, 8888],
3333: 3333,
4505: 14505,
4506: 14506,
"81/udp": [8080, 8888],
"3334/udp": 3334,
"5505/udp": 15505,
"5506/udp": 15506,
},
"ports": [
80,
3333,
4505,
4506,
(81, "udp"),
(3334, "udp"),
(5505, "udp"),
(5506, "udp"),
],
},
)
# containerPort - Bind an ephemeral port on all of the host's
# interfaces to a specific port within the container.
bindings = "80,3333,4505-4506,81/udp,3334/udp,5505-5506/udp"
for val in (bindings, bindings.split(",")):
self.assertEqual(
self.normalize_ports(
salt.utils.docker.translate_input(
self.translator, port_bindings=val,
)
),
{
"port_bindings": {
80: None,
3333: None,
4505: None,
4506: None,
"81/udp": None,
"3334/udp": None,
"5505/udp": None,
"5506/udp": None,
},
"ports": [
80,
3333,
4505,
4506,
(81, "udp"),
(3334, "udp"),
(5505, "udp"),
(5506, "udp"),
],
},
)
# Test a mixture of different types of input
bindings = (
"10.1.2.3:8080:80,10.4.5.6::3333,14505-14506:4505-4506,"
"9999-10001,10.1.2.3:8080:81/udp,10.4.5.6::3334/udp,"
"15505-15506:5505-5506/udp,19999-20001/udp"
)
for val in (bindings, bindings.split(",")):
self.assertEqual(
self.normalize_ports(
salt.utils.docker.translate_input(
self.translator, port_bindings=val,
)
),
{
"port_bindings": {
80: ("10.1.2.3", 8080),
3333: ("10.4.5.6",),
4505: 14505,
4506: 14506,
9999: None,
10000: None,
10001: None,
"81/udp": ("10.1.2.3", 8080),
"3334/udp": ("10.4.5.6",),
"5505/udp": 15505,
"5506/udp": 15506,
"19999/udp": None,
"20000/udp": None,
"20001/udp": None,
},
"ports": [
80,
3333,
4505,
4506,
9999,
10000,
10001,
(81, "udp"),
(3334, "udp"),
(5505, "udp"),
(5506, "udp"),
(19999, "udp"),
(20000, "udp"),
(20001, "udp"),
],
},
)
# Error case: too many items (max 3)
with self.assertRaisesRegex(
CommandExecutionError,
r"'10.1.2.3:8080:80:123' is an invalid port binding "
r"definition \(at most 3 components are allowed, found 4\)",
):
salt.utils.docker.translate_input(
self.translator, port_bindings="10.1.2.3:8080:80:123"
)
# Error case: port range start is greater than end
for val in (
"10.1.2.3:5555-5554:1111-1112",
"10.1.2.3:1111-1112:5555-5554",
"10.1.2.3::5555-5554",
"5555-5554:1111-1112",
"1111-1112:5555-5554",
"5555-5554",
):
with self.assertRaisesRegex(
CommandExecutionError,
r"Start of port range \(5555\) cannot be greater than end "
r"of port range \(5554\)",
):
salt.utils.docker.translate_input(
self.translator, port_bindings=val,
)
# Error case: non-numeric port range
for val in (
"10.1.2.3:foo:1111-1112",
"10.1.2.3:1111-1112:foo",
"10.1.2.3::foo",
"foo:1111-1112",
"1111-1112:foo",
"foo",
):
with self.assertRaisesRegex(
CommandExecutionError, "'foo' is non-numeric or an invalid port range"
):
salt.utils.docker.translate_input(
self.translator, port_bindings=val,
)
# Error case: misatched port range
for val in ("10.1.2.3:1111-1113:1111-1112", "1111-1113:1111-1112"):
with self.assertRaisesRegex(
CommandExecutionError,
r"Host port range \(1111-1113\) does not have the same "
r"number of ports as the container port range \(1111-1112\)",
):
salt.utils.docker.translate_input(self.translator, port_bindings=val)
for val in ("10.1.2.3:1111-1112:1111-1113", "1111-1112:1111-1113"):
with self.assertRaisesRegex(
CommandExecutionError,
r"Host port range \(1111-1112\) does not have the same "
r"number of ports as the container port range \(1111-1113\)",
):
salt.utils.docker.translate_input(
self.translator, port_bindings=val,
)
# Error case: empty host port or container port
with self.assertRaisesRegex(
CommandExecutionError, "Empty host port in port binding definition ':1111'"
):
salt.utils.docker.translate_input(self.translator, port_bindings=":1111")
with self.assertRaisesRegex(
CommandExecutionError,
"Empty container port in port binding definition '1111:'",
):
salt.utils.docker.translate_input(self.translator, port_bindings="1111:")
with self.assertRaisesRegex(
CommandExecutionError, "Empty port binding definition found"
):
salt.utils.docker.translate_input(self.translator, port_bindings="")
def test_ports(self):
"""
Ports can be passed as a comma-separated or Python list of port
numbers, with '/tcp' being optional for TCP ports. They must ultimately
be a list of port definitions, in which an integer denotes a TCP port,
and a tuple in the format (port_num, 'udp') denotes a UDP port. Also,
the port numbers must end up as integers. None of the decorators will
suffice so this one must be tested specially.
"""
for val in (
"1111,2222/tcp,3333/udp,4505-4506",
[1111, "2222/tcp", "3333/udp", "4505-4506"],
["1111", "2222/tcp", "3333/udp", "4505-4506"],
):
self.assertEqual(
self.normalize_ports(
salt.utils.docker.translate_input(self.translator, ports=val,)
),
{"ports": [1111, 2222, 4505, 4506, (3333, "udp")]},
)
# Error case: non-integer and non/string value
for val in (1.0, [1.0]):
with self.assertRaisesRegex(
CommandExecutionError, "'1.0' is not a valid port definition"
):
salt.utils.docker.translate_input(
self.translator, ports=val,
)
# Error case: port range start is greater than end
with self.assertRaisesRegex(
CommandExecutionError,
r"Start of port range \(5555\) cannot be greater than end of "
r"port range \(5554\)",
):
salt.utils.docker.translate_input(
self.translator, ports="5555-5554",
)
@assert_bool(salt.utils.docker.translate.container)
def test_privileged(self):
"""
Should be a bool or converted to one
"""
@assert_bool(salt.utils.docker.translate.container)
def test_publish_all_ports(self):
"""
Should be a bool or converted to one
"""
@assert_bool(salt.utils.docker.translate.container)
def test_read_only(self):
"""
Should be a bool or converted to one
"""
def test_restart_policy(self):
"""
Input is in the format "name[:retry_count]", but the API wants it
in the format {'Name': name, 'MaximumRetryCount': retry_count}
"""
name = "restart_policy"
alias = "restart"
for item in (name, alias):
# Test with retry count
self.assertEqual(
salt.utils.docker.translate_input(
self.translator, **{item: "on-failure:5"}
),
{name: {"Name": "on-failure", "MaximumRetryCount": 5}},
)
# Test without retry count
self.assertEqual(
salt.utils.docker.translate_input(
self.translator, **{item: "on-failure"}
),
{name: {"Name": "on-failure", "MaximumRetryCount": 0}},
)
# Error case: more than one policy passed
with self.assertRaisesRegex(
CommandExecutionError, "Only one policy is permitted"
):
salt.utils.docker.translate_input(
self.translator, **{item: "on-failure,always"}
)
# Test collision
test_kwargs = {name: "on-failure:5", alias: "always"}
self.assertEqual(
salt.utils.docker.translate_input(
self.translator, ignore_collisions=True, **test_kwargs
),
{name: {"Name": "on-failure", "MaximumRetryCount": 5}},
)
with self.assertRaisesRegex(
CommandExecutionError, "'restart' is an alias for 'restart_policy'"
):
salt.utils.docker.translate_input(
self.translator, ignore_collisions=False, **test_kwargs
)
@assert_stringlist(salt.utils.docker.translate.container)
def test_security_opt(self):
"""
Should be a list of strings or converted to one
"""
@assert_int_or_string(salt.utils.docker.translate.container)
def test_shm_size(self):
"""
Should be a string or converted to one
"""
@assert_bool(salt.utils.docker.translate.container)
def test_stdin_open(self):
"""
Should be a bool or converted to one
"""
@assert_string(salt.utils.docker.translate.container)
def test_stop_signal(self):
"""
Should be a string or converted to one
"""
@assert_int(salt.utils.docker.translate.container)
def test_stop_timeout(self):
"""
Should be an int or converted to one
"""
@assert_key_equals_value(salt.utils.docker.translate.container)
def test_storage_opt(self):
"""
Can be passed in several formats but must end up as a dictionary
mapping keys to values
"""
@assert_key_equals_value(salt.utils.docker.translate.container)
def test_sysctls(self):
"""
Can be passed in several formats but must end up as a dictionary
mapping keys to values
"""
@assert_dict(salt.utils.docker.translate.container)
def test_tmpfs(self):
"""
Can be passed in several formats but must end up as a dictionary
mapping keys to values
"""
@assert_bool(salt.utils.docker.translate.container)
def test_tty(self):
"""
Should be a bool or converted to one
"""
def test_ulimits(self):
"""
Input is in the format "name=soft_limit[:hard_limit]", but the API
wants it in the format
{'Name': name, 'Soft': soft_limit, 'Hard': hard_limit}
"""
# Test with and without hard limit
ulimits = "nofile=1024:2048,nproc=50"
for val in (ulimits, ulimits.split(",")):
self.assertEqual(
salt.utils.docker.translate_input(self.translator, ulimits=val,),
{
"ulimits": [
{"Name": "nofile", "Soft": 1024, "Hard": 2048},
{"Name": "nproc", "Soft": 50, "Hard": 50},
]
},
)
# Error case: Invalid format
with self.assertRaisesRegex(
CommandExecutionError,
r"Ulimit definition 'nofile:1024:2048' is not in the format "
r"type=soft_limit\[:hard_limit\]",
):
salt.utils.docker.translate_input(
self.translator, ulimits="nofile:1024:2048"
)
# Error case: Invalid format
with self.assertRaisesRegex(
CommandExecutionError,
r"Limit 'nofile=foo:2048' contains non-numeric value\(s\)",
):
salt.utils.docker.translate_input(
self.translator, ulimits="nofile=foo:2048"
)
def test_user(self):
"""
Must be either username (string) or uid (int). An int passed as a
string (e.g. '0') should be converted to an int.
"""
# Username passed as string
self.assertEqual(
salt.utils.docker.translate_input(self.translator, user="foo"),
{"user": "foo"},
)
for val in (0, "0"):
self.assertEqual(
salt.utils.docker.translate_input(self.translator, user=val),
{"user": 0},
)
# Error case: non string/int passed
with self.assertRaisesRegex(
CommandExecutionError, "Value must be a username or uid"
):
salt.utils.docker.translate_input(self.translator, user=["foo"])
# Error case: negative int passed
with self.assertRaisesRegex(CommandExecutionError, "'-1' is an invalid uid"):
salt.utils.docker.translate_input(self.translator, user=-1)
@assert_string(salt.utils.docker.translate.container)
def test_userns_mode(self):
"""
Should be a bool or converted to one
"""
@assert_string(salt.utils.docker.translate.container)
def test_volume_driver(self):
"""
Should be a bool or converted to one
"""
@assert_stringlist(salt.utils.docker.translate.container)
def test_volumes(self):
"""
Should be a list of absolute paths
"""
# Error case: Not an absolute path
path = os.path.join("foo", "bar", "baz")
with self.assertRaisesRegex(
CommandExecutionError,
"'{0}' is not an absolute path".format(path.replace("\\", "\\\\")),
):
salt.utils.docker.translate_input(self.translator, volumes=path)
@assert_stringlist(salt.utils.docker.translate.container)
def test_volumes_from(self):
"""
Should be a list of strings or converted to one
"""
@assert_string(salt.utils.docker.translate.container)
def test_working_dir(self):
"""
Should be a single absolute path
"""
# Error case: Not an absolute path
path = os.path.join("foo", "bar", "baz")
with self.assertRaisesRegex(
CommandExecutionError,
"'{0}' is not an absolute path".format(path.replace("\\", "\\\\")),
):
salt.utils.docker.translate_input(self.translator, working_dir=path)
class TranslateNetworkInputTestCase(TranslateBase):
"""
Tests for salt.utils.docker.translate_input(), invoked using
salt.utils.docker.translate.network as the translator module.
"""
translator = salt.utils.docker.translate.network
ip_addrs = {
True: ("10.1.2.3", "::1"),
False: ("FOO", "0.9.800.1000", "feaz::1", "aj01::feac"),
}
@assert_string(salt.utils.docker.translate.network)
def test_driver(self):
"""
Should be a string or converted to one
"""
@assert_key_equals_value(salt.utils.docker.translate.network)
def test_options(self):
"""
Can be passed in several formats but must end up as a dictionary
mapping keys to values
"""
@assert_dict(salt.utils.docker.translate.network)
def test_ipam(self):
"""
Must be a dict
"""
@assert_bool(salt.utils.docker.translate.network)
def test_check_duplicate(self):
"""
Should be a bool or converted to one
"""
@assert_bool(salt.utils.docker.translate.network)
def test_internal(self):
"""
Should be a bool or converted to one
"""
@assert_labels(salt.utils.docker.translate.network)
def test_labels(self):
"""
Can be passed as a list of key=value pairs or a dictionary, and must
ultimately end up as a dictionary.
"""
@assert_bool(salt.utils.docker.translate.network)
def test_enable_ipv6(self):
"""
Should be a bool or converted to one
"""
@assert_bool(salt.utils.docker.translate.network)
def test_attachable(self):
"""
Should be a bool or converted to one
"""
@assert_bool(salt.utils.docker.translate.network)
def test_ingress(self):
"""
Should be a bool or converted to one
"""
@assert_string(salt.utils.docker.translate.network)
def test_ipam_driver(self):
"""
Should be a bool or converted to one
"""
@assert_key_equals_value(salt.utils.docker.translate.network)
def test_ipam_opts(self):
"""
Can be passed in several formats but must end up as a dictionary
mapping keys to values
"""
def ipam_pools(self):
"""
Must be a list of dictionaries (not a dictlist)
"""
good_pool = {
"subnet": "10.0.0.0/24",
"iprange": "10.0.0.128/25",
"gateway": "10.0.0.254",
"aux_addresses": {
"foo.bar.tld": "10.0.0.20",
"hello.world.tld": "10.0.0.21",
},
}
bad_pools = [
{
"subnet": "10.0.0.0/33",
"iprange": "10.0.0.128/25",
"gateway": "10.0.0.254",
"aux_addresses": {
"foo.bar.tld": "10.0.0.20",
"hello.world.tld": "10.0.0.21",
},
},
{
"subnet": "10.0.0.0/24",
"iprange": "foo/25",
"gateway": "10.0.0.254",
"aux_addresses": {
"foo.bar.tld": "10.0.0.20",
"hello.world.tld": "10.0.0.21",
},
},
{
"subnet": "10.0.0.0/24",
"iprange": "10.0.0.128/25",
"gateway": "10.0.0.256",
"aux_addresses": {
"foo.bar.tld": "10.0.0.20",
"hello.world.tld": "10.0.0.21",
},
},
{
"subnet": "10.0.0.0/24",
"iprange": "10.0.0.128/25",
"gateway": "10.0.0.254",
"aux_addresses": {
"foo.bar.tld": "10.0.0.20",
"hello.world.tld": "999.0.0.21",
},
},
]
self.assertEqual(
salt.utils.docker.translate_input(self.translator, ipam_pools=[good_pool],),
{"ipam_pools": [good_pool]},
)
for bad_pool in bad_pools:
with self.assertRaisesRegex(CommandExecutionError, "not a valid"):
salt.utils.docker.translate_input(
self.translator, ipam_pools=[good_pool, bad_pool]
)
@assert_subnet(salt.utils.docker.translate.network)
def test_subnet(self):
"""
Must be an IPv4 or IPv6 subnet
"""
@assert_subnet(salt.utils.docker.translate.network)
def test_iprange(self):
"""
Must be an IPv4 or IPv6 subnet
"""
def test_gateway(self):
"""
Must be an IPv4 or IPv6 address
"""
for val in self.ip_addrs[True]:
self.assertEqual(
salt.utils.docker.translate_input(
self.translator, validate_ip_addrs=True, gateway=val,
),
self.apply_defaults({"gateway": val}),
)
for val in self.ip_addrs[False]:
with self.assertRaisesRegex(
CommandExecutionError, "'{0}' is not a valid IP address".format(val)
):
salt.utils.docker.translate_input(
self.translator, validate_ip_addrs=True, gateway=val,
)
self.assertEqual(
salt.utils.docker.translate_input(
self.translator, validate_ip_addrs=False, gateway=val,
),
self.apply_defaults(
{
"gateway": val
if isinstance(val, six.string_types)
else six.text_type(val)
}
),
)
@assert_key_equals_value(salt.utils.docker.translate.network)
def test_aux_addresses(self):
"""
Must be a mapping of hostnames to IP addresses
"""
name = "aux_addresses"
alias = "aux_address"
for item in (name, alias):
for val in self.ip_addrs[True]:
addresses = {"foo.bar.tld": val}
self.assertEqual(
salt.utils.docker.translate_input(
self.translator, validate_ip_addrs=True, **{item: addresses}
),
self.apply_defaults({name: addresses}),
)
for val in self.ip_addrs[False]:
addresses = {"foo.bar.tld": val}
with self.assertRaisesRegex(
CommandExecutionError, "'{0}' is not a valid IP address".format(val)
):
salt.utils.docker.translate_input(
self.translator, validate_ip_addrs=True, **{item: addresses}
)
self.assertEqual(
salt.utils.docker.translate_input(
self.translator,
validate_ip_addrs=False,
aux_addresses=addresses,
),
self.apply_defaults({name: addresses}),
)
class DockerTranslateHelperTestCase(TestCase):
"""
Tests for a couple helper functions in salt.utils.docker.translate
"""
def test_get_port_def(self):
"""
Test translation of port definition (1234, '1234/tcp', '1234/udp',
etc.) into the format which docker-py uses (integer for TCP ports,
'port_num/udp' for UDP ports).
"""
# Test TCP port (passed as int, no protocol passed)
self.assertEqual(translate_helpers.get_port_def(2222), 2222)
# Test TCP port (passed as str, no protocol passed)
self.assertEqual(translate_helpers.get_port_def("2222"), 2222)
# Test TCP port (passed as str, with protocol passed)
self.assertEqual(translate_helpers.get_port_def("2222", "tcp"), 2222)
# Test TCP port (proto passed in port_num, with passed proto ignored).
# This is a contrived example as we would never invoke the function in
# this way, but it tests that we are taking the port number from the
# port_num argument and ignoring the passed protocol.
self.assertEqual(translate_helpers.get_port_def("2222/tcp", "udp"), 2222)
# Test UDP port (passed as int)
self.assertEqual(translate_helpers.get_port_def(2222, "udp"), (2222, "udp"))
# Test UDP port (passed as string)
self.assertEqual(translate_helpers.get_port_def("2222", "udp"), (2222, "udp"))
# Test UDP port (proto passed in port_num
self.assertEqual(translate_helpers.get_port_def("2222/udp"), (2222, "udp"))
def test_get_port_range(self):
"""
Test extracting the start and end of a port range from a port range
expression (e.g. 4505-4506)
"""
# Passing a single int should return the start and end as the same value
self.assertEqual(translate_helpers.get_port_range(2222), (2222, 2222))
# Same as above but with port number passed as a string
self.assertEqual(translate_helpers.get_port_range("2222"), (2222, 2222))
# Passing a port range
self.assertEqual(translate_helpers.get_port_range("2222-2223"), (2222, 2223))
# Error case: port range start is greater than end
with self.assertRaisesRegex(
ValueError,
r"Start of port range \(2222\) cannot be greater than end of "
r"port range \(2221\)",
):
translate_helpers.get_port_range("2222-2221")
# Error case: non-numeric input
with self.assertRaisesRegex(
ValueError, "'2222-bar' is non-numeric or an invalid port range"
):
translate_helpers.get_port_range("2222-bar")
| 37.134587
| 88
| 0.514371
| 74,245
| 0.989287
| 0
| 0
| 15,746
| 0.20981
| 0
| 0
| 23,282
| 0.310224
|
a174c86a4c793d497c49fdd9127b5aea515b4346
| 400
|
py
|
Python
|
utils/middleware.py
|
DavidRoldan523/elenas_test
|
8b520fae68a275654a42ad761713c9c932d17a76
|
[
"MIT"
] | null | null | null |
utils/middleware.py
|
DavidRoldan523/elenas_test
|
8b520fae68a275654a42ad761713c9c932d17a76
|
[
"MIT"
] | null | null | null |
utils/middleware.py
|
DavidRoldan523/elenas_test
|
8b520fae68a275654a42ad761713c9c932d17a76
|
[
"MIT"
] | null | null | null |
from django.http import HttpResponse
from django.utils.deprecation import MiddlewareMixin
class HealthCheckMiddleware:
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
if request.META["PATH_INFO"] == "/health-check/":
return HttpResponse("ok")
response = self.get_response(request)
return response
| 28.571429
| 57
| 0.7
| 308
| 0.77
| 0
| 0
| 0
| 0
| 0
| 0
| 31
| 0.0775
|
a175602faa0357ee58584137efdc7c85d289bf89
| 3,317
|
py
|
Python
|
bot/exts/evergreen/catify.py
|
chincholikarsalil/sir-lancebot
|
05ba3de5c99b30a8eba393da1460fae255373457
|
[
"MIT"
] | null | null | null |
bot/exts/evergreen/catify.py
|
chincholikarsalil/sir-lancebot
|
05ba3de5c99b30a8eba393da1460fae255373457
|
[
"MIT"
] | null | null | null |
bot/exts/evergreen/catify.py
|
chincholikarsalil/sir-lancebot
|
05ba3de5c99b30a8eba393da1460fae255373457
|
[
"MIT"
] | null | null | null |
import random
from contextlib import suppress
from typing import Optional
from discord import AllowedMentions, Embed, Forbidden
from discord.ext import commands
from bot.constants import Cats, Colours, NEGATIVE_REPLIES
from bot.utils import helpers
class Catify(commands.Cog):
"""Cog for the catify command."""
def __init__(self, bot: commands.Bot):
self.bot = bot
@commands.command(aliases=["ᓚᘏᗢify", "ᓚᘏᗢ"])
@commands.cooldown(1, 5, commands.BucketType.user)
async def catify(self, ctx: commands.Context, *, text: Optional[str]) -> None:
"""
Convert the provided text into a cat themed sentence by interspercing cats throughout text.
If no text is given then the users nickname is edited.
"""
if not text:
display_name = ctx.author.display_name
if len(display_name) > 26:
embed = Embed(
title=random.choice(NEGATIVE_REPLIES),
description=(
"Your display name is too long to be catified! "
"Please change it to be under 26 characters."
),
color=Colours.soft_red
)
await ctx.send(embed=embed)
return
else:
display_name += f" | {random.choice(Cats.cats)}"
await ctx.send(f"Your catified nickname is: `{display_name}`", allowed_mentions=AllowedMentions.none())
with suppress(Forbidden):
await ctx.author.edit(nick=display_name)
else:
if len(text) >= 1500:
embed = Embed(
title=random.choice(NEGATIVE_REPLIES),
description="Submitted text was too large! Please submit something under 1500 characters.",
color=Colours.soft_red
)
await ctx.send(embed=embed)
return
string_list = text.split()
for index, name in enumerate(string_list):
name = name.lower()
if "cat" in name:
if random.randint(0, 5) == 5:
string_list[index] = name.replace("cat", f"**{random.choice(Cats.cats)}**")
else:
string_list[index] = name.replace("cat", random.choice(Cats.cats))
for element in Cats.cats:
if element in name:
string_list[index] = name.replace(element, "cat")
string_len = len(string_list) // 3 or len(string_list)
for _ in range(random.randint(1, string_len)):
# insert cat at random index
if random.randint(0, 5) == 5:
string_list.insert(random.randint(0, len(string_list)), f"**{random.choice(Cats.cats)}**")
else:
string_list.insert(random.randint(0, len(string_list)), random.choice(Cats.cats))
text = helpers.suppress_links(" ".join(string_list))
await ctx.send(
f">>> {text}",
allowed_mentions=AllowedMentions.none()
)
def setup(bot: commands.Bot) -> None:
"""Loads the catify cog."""
bot.add_cog(Catify(bot))
| 37.269663
| 119
| 0.545674
| 2,974
| 0.893361
| 0
| 0
| 2,836
| 0.851907
| 2,720
| 0.817062
| 643
| 0.193151
|
a1773cd4561ed64fe6472e04a837e283a5378aa9
| 1,763
|
py
|
Python
|
data/ebmnlp/stream.py
|
bepnye/tf_ner
|
c68b9f489e56e0ec8cfb02b7115d2b07d721ac6f
|
[
"Apache-2.0"
] | null | null | null |
data/ebmnlp/stream.py
|
bepnye/tf_ner
|
c68b9f489e56e0ec8cfb02b7115d2b07d721ac6f
|
[
"Apache-2.0"
] | null | null | null |
data/ebmnlp/stream.py
|
bepnye/tf_ner
|
c68b9f489e56e0ec8cfb02b7115d2b07d721ac6f
|
[
"Apache-2.0"
] | null | null | null |
import os
import data_utils
from pathlib import Path
top_path = Path(os.path.dirname(os.path.abspath(__file__)))
EBM_NLP = Path('/Users/ben/Desktop/ebm_nlp/repo/ebm_nlp_2_00/')
NO_LABEL = '0'
def overwrite_tags(new_tags, tags):
for i, t in enumerate(new_tags):
if t != NO_LABEL:
tags[i] = t
def get_tags(d):
pmid_tags = {}
for e in ['participants', 'interventions', 'outcomes']:
for a in (EBM_NLP / 'annotations' / 'aggregated' / 'starting_spans' / e / d).glob('*.ann'):
pmid = a.stem.split('.')[0]
tags = a.open().read().split()
tags = [e[0] if t == '1' else NO_LABEL for t in tags]
if pmid not in pmid_tags:
pmid_tags[pmid] = tags
else:
overwrite_tags(tags, pmid_tags[pmid])
return pmid_tags
def get_words(pmids):
return { pmid: (EBM_NLP / 'documents' / '{}.tokens'.format(pmid)).open().read().split() for pmid in pmids }
def get_seqs(tag_d, word_d, keys):
tag_seqs = []
word_seqs = []
for k in keys:
words, tags = data_utils.generate_seqs(word_d[k], tag_d[k])
tag_seqs += tags
word_seqs += words
return word_seqs, tag_seqs
TRAIN_TAG_D = get_tags(Path('train/'))
TRAIN_PMIDS = sorted(TRAIN_TAG_D.keys())
TRAIN_WORD_D = get_words(TRAIN_PMIDS)
TRAIN_WORDS, TRAIN_TAGS = get_seqs(TRAIN_TAG_D, TRAIN_WORD_D, TRAIN_PMIDS)
TEST_TAG_D = get_tags(Path('test/gold/'))
TEST_PMIDS = sorted(TEST_TAG_D.keys())
TEST_WORD_D = get_words(TEST_PMIDS)
TEST_WORDS, TEST_TAGS = get_seqs(TEST_TAG_D, TEST_WORD_D, TEST_PMIDS)
def train_words():
return TRAIN_WORDS
def train_tags():
return TRAIN_TAGS
def test_words():
return TEST_WORDS
def test_tags():
return TEST_TAGS
def word_embeddings():
return ((top_path / '..' / 'embeddings' / 'glove.840B.300d.txt').open(), 300)
| 28.435484
| 109
| 0.683494
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 222
| 0.125922
|
a178917c391e8c7d6cc84a889a8b3efdf43b8cd9
| 16,753
|
py
|
Python
|
Kernels/Research/FFT/config/fft.py
|
WoodData/EndpointAI
|
8e4d145ff45cf5559ab009eba4f423e944dc6975
|
[
"Apache-2.0"
] | 190
|
2020-09-22T02:14:29.000Z
|
2022-03-28T02:35:57.000Z
|
Kernels/Research/FFT/config/fft.py
|
chuancqc/EndpointAI
|
ab67cefeae3c06f1c93f66812bcf988c14e72ff1
|
[
"Apache-2.0"
] | 2
|
2021-08-30T10:06:22.000Z
|
2021-11-05T20:37:58.000Z
|
Kernels/Research/FFT/config/fft.py
|
chuancqc/EndpointAI
|
ab67cefeae3c06f1c93f66812bcf988c14e72ff1
|
[
"Apache-2.0"
] | 80
|
2020-09-13T17:48:56.000Z
|
2022-03-19T10:45:05.000Z
|
#
#
# Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved.
#
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the License); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from sympy.ntheory import factorint
import numpy as np
from sympy.combinatorics import Permutation
import io
import math
from config.strtools import *
import itertools
import struct
import config.formats
# Conversion of double to fixed point values
#
# - 8000 gives 8000 in C (int16)
# So when it is multiplied it will give the wrong sign for the result
# of the multiplication except if DSPE instructions with saturation are used
# to compute the negate (and we should get 7FFF).
#
# So for cortex-m without DSP extension, we should try to use 8001
# It is done but not yet tested.
def to_q63(v,dspe):
r = int(round(v * 2**63))
if (r > 0x07FFFFFFFFFFFFFFF):
r = 0x07FFFFFFFFFFFFFFF
if (r < -0x08000000000000000):
if dspe:
r = -0x08000000000000000
else:
r = -0x07FFFFFFFFFFFFFFF
return ("0x%s" % format(struct.unpack('<Q', struct.pack('<q', r))[0],'016X'))
def to_q31(v,dspe):
r = int(round(v * 2**31))
if (r > 0x07FFFFFFF):
r = 0x07FFFFFFF
if (r < -0x080000000):
if dspe:
r = -0x080000000
else:
r = -0x07FFFFFFF
return ("0x%s" % format(struct.unpack('<I', struct.pack('<i', r))[0],'08X'))
def to_q15(v,dspe):
r = int(round(v * 2**15))
if (r > 0x07FFF):
r = 0x07FFF
if (r < -0x08000):
if dspe:
r = -0x08000
else:
r = -0x07FFF
return ("0x%s" % format(struct.unpack('<H', struct.pack('<h', r))[0],'04X'))
def to_q7(v,dspe):
r = int(round(v * 2**7))
if (r > 0x07F):
r = 0x07F
if (r < -0x080):#
if dspe:
r = -0x080
else:
r = -0x07F
return ("0x%s" % format(struct.unpack('<B', struct.pack('<b', r))[0],'02X'))
Q7=1
Q15=2
Q31=3
F16=4
F32=5
F64=6
# In the final C++ code, we have a loop for a given radix.
# The input list here has not grouped the factors.
# The list need to be transformed into a list of pair.
# The pair being (radix,exponent)
def groupFactors(factors):
n = 0
current=-1
result=[]
for f in factors:
if f != current:
if current != -1:
result = result + [current,n]
current=f
n=1
else:
n=n+1
result = result + [current,n]
return(result)
# Compute the grouped factors for the the FFT length originaln
# where the only possible radix are in primitiveFactors list.
def getFactors(primitiveFactors,originaln):
factors=[]
length=[]
primitiveFactors.sort(reverse=True)
n = originaln
while (n > 1) and primitiveFactors:
if (n % primitiveFactors[0] == 0):
factors.append(primitiveFactors[0])
n = n // primitiveFactors[0]
else:
primitiveFactors=primitiveFactors[1:]
# When lowest factors are at the beginning (like 2)
# we use a special implementation of the loopcore template
# and it is removing some cycles.
# So, we will get (for instance) 2x8x8x8 instead of 8x8x8x2
factors.reverse()
for f in factors:
originaln = originaln // f
length.append(originaln)
groupedfactors=groupFactors(factors)
return(groupedfactors,factors,length)
# Apply the radix decomposition to compute the input -> output permutation
# computed by the FFT.
def radixReverse(f,n):
a=np.array(range(0,n)).reshape(f)
r = list(range(0,len(f)))
r.reverse()
r = tuple(r)
a = np.transpose(a,r)
return(a.reshape(n))
def radixPermutation(factors,n):
a = radixReverse(factors,n)
tps = []
vectorizable=True
for c in Permutation.from_sequence(a).cyclic_form:
if (len(c)>2):
vectorizable = False
for i in range(len(c)-1,0,-1):
# 2 because those are indexes in an array of complex numbers but
# with a real type.
tps.append([2*c[i], 2*c[i-1]])
return(np.array(tps,dtype=int).flatten(),vectorizable)
# CFFT Twiddle table
def cfft_twiddle(n):
a=2.0*math.pi*np.linspace(0,n,num=n,endpoint=False)/n
c=np.cos(-a)
s=np.sin(-a)
r = np.empty((c.size + s.size,), dtype=c.dtype)
r[0::2] = c
r[1::2] = s
return(r)
# RFFT twiddle for the merge and split steps.
def rfft_twiddle(n):
a=2.0j*math.pi*np.linspace(0,n//2,num=n // 2,endpoint=False)/n
z=-1.0j * np.exp(-a)
r = z.view(dtype=np.float64)
return(r)
# Compute the twiddle tables
def twiddle(transform,n):
if transform=="CFFT":
return(cfft_twiddle(n))
if transform=="RFFT":
return(rfft_twiddle(n))
return(None)
NB_ELEMS_PER_LINE=3
# Generate C array content for a given datatype
def printFloat64Array(f,n):
nb=0
for s in n:
print("%.20f, " % s,end="",file=f)
nb = nb + 1
if nb == NB_ELEMS_PER_LINE:
nb=0
print("",file=f)
def printFloat32Array(f,n):
nb=0
for s in n:
print("%.20ff, " % s,end="",file=f)
nb = nb + 1
if nb == NB_ELEMS_PER_LINE:
nb=0
print("",file=f)
def printFloat16Array(f,n):
nb=0
for s in n:
print("%.8ff16, " % s,end="",file=f)
nb = nb + 1
if nb == NB_ELEMS_PER_LINE:
nb=0
print("",file=f)
def printQ31Array(f,mode,n):
DSPE=False
if mode == "DSP":
DSPE=True
nb=0
for s in n:
print(to_q31(s,DSPE) + ", ",end="",file=f)
nb = nb + 1
if nb == NB_ELEMS_PER_LINE:
nb=0
print("",file=f)
def printQ15Array(f,mode,n):
DSPE=False
if mode == "DSP":
DSPE=True
nb=0
for s in n:
print(to_q15(s,DSPE) + ", ",end="",file=f)
nb = nb + 1
if nb == NB_ELEMS_PER_LINE:
nb=0
print("",file=f)
def printQ7Array(f,mode,n):
DSPE=False
if mode == "DSP":
DSPE=True
nb=0
for s in n:
print(to_q7(s,DSPE) + ", ",end="",file=f)
nb = nb + 1
if nb == NB_ELEMS_PER_LINE:
nb=0
print("",file=f)
# Print a C array
# Using the type, dpse mode, name
# (dpse mode is for knowing if 0x8000 must be generated as 8000 or 8001
# to avoid sign issues when multiplying with the twiddles)
def printArray(f,ctype,mode,name,a):
nbSamples = len(a)
define = "NB_" + name.upper()
n = a.reshape(len(a))
print("__ALIGNED(8) const %s %s[%s]={" % (ctype,name,define),file=f)
if ctype == "float64_t":
printFloat64Array(f,n)
if ctype == "float32_t":
printFloat32Array(f,n)
if ctype == "float16_t":
printFloat16Array(f,n)
if ctype == "Q31":
printQ31Array(f,mode,n)
if ctype == "Q15":
printQ15Array(f,mode,n)
if ctype == "Q7":
printQ7Array(f,mode,n)
print("};",file=f)
# Convert a float value to a given datatype.
def convertToDatatype(r,ctype,mode):
DSPE=False
if mode == "DSP":
DSPE=True
if ctype == "float64_t":
result = "%.20f" % r
if ctype == "float32_t":
result = "%.20ff" % r
if ctype == "float16_t":
result = "%.20ff16" % r
if ctype == "Q31":
result = "Q31(%s)" % to_q31(r,DSPE)
if ctype == "Q15":
result = "Q15(%s)" % to_q15(r,DSPE)
if ctype == "Q7":
result = "Q7(%s)" % to_q7(r,DSPE)
return(result)
def printArrayHeader(f,ctype,name,nbSamples):
define = "NB_" + name.upper()
print("#define %s %d" % (define, nbSamples),file=f)
print("extern __ALIGNED(8) const %s %s[%s];\n" % (ctype,name,define),file=f)
# Print UINT arrays for permutations.
def printUInt32Array(f,name,a):
nbSamples = len(a)
define = "NB_" + name.upper()
n = a.reshape(len(a))
print("__ALIGNED(8) const uint32_t %s[%s]={" % (name,define),file=f)
nb=0
for s in n:
print("%d, " % s,end="",file=f)
nb = nb + 1
if nb == NB_ELEMS_PER_LINE:
nb=0
print("",file=f)
print("};",file=f)
def printUInt16Array(f,name,a):
nbSamples = len(a)
define = "NB_" + name.upper()
n = a.reshape(len(a))
print("__ALIGNED(8) const uint16_t %s[%s]={" % (name,define),file=f)
nb=0
for s in n:
print("%d, " % s,end="",file=f)
nb = nb + 1
if nb == NB_ELEMS_PER_LINE:
nb=0
print("",file=f)
print("};",file=f)
def printUInt32ArrayHeader(f,name,a):
nbSamples = len(a)
define = "NB_" + name.upper()
n = a.reshape(len(a))
print("#define %s %d" % (define, nbSamples),file=f)
print("extern __ALIGNED(8) const uint32_t %s[%s];\n" % (name,define),file=f)
def printUInt16ArrayHeader(f,name,a):
nbSamples = len(a)
define = "NB_" + name.upper()
n = a.reshape(len(a))
print("#define %s %d" % (define, nbSamples),file=f)
print("extern __ALIGNED(8) const uint16_t %s[%s];\n" % (name,define),file=f)
def getCtype(t):
if t == 'f64':
return("float64_t")
if t == 'f32':
return("float32_t")
if t == 'f16':
return("float16_t")
if t == 'q31':
return("Q31")
if t == 'q15':
return("Q15")
if t == 'q7':
return("Q7")
return("void")
# Configuration structures for CFFT and RFFT
cfftconfig = """cfftconfig<%s> config%d={
.normalization=%s,
.nbPerms=%s,
.perms=perm%d,
.nbTwiddle=%s,
.twiddle=twiddle%d,
.nbGroupedFactors=%d,
.nbFactors=%d,
.factors=factors%d,
.lengths=lengths%d,
.format=%d,
.reversalVectorizable=%d
};"""
rfftconfig = """rfftconfig<%s> config%d={
.nbTwiddle=%s,
.twiddle=twiddle%d
};"""
fftconfigHeader = """extern %sconfig<%s> config%d;"""
fftFactorArray = """const uint16_t factors%d[%d]=%s;\n"""
fftLengthArray = """const uint16_t lengths%d[%d]=%s;\n"""
# Descriptino of a permutation
class Perm:
PermID = 0
# Grouped factors and factors.
def getFactors(core,nb,datatype):
_groupedFactors,_factors,_lens=getFactors(core.radix(datatype,nb),nb)
return(_factors)
def __init__(self,core,nb,datatype):
Perm.PermID = Perm.PermID + 1
self._nb=nb
self._id = Perm.PermID
self._radixUsed=set([])
self._groupedFactors,self._factors,self._lens=getFactors(core.radix(datatype,nb),nb)
self._perms = None
self._core=core
self._isvectorizable=False
def permutations(self):
_permFactors=list(itertools.chain(*[self._core.getPermFactor(x) for x in self._factors]))
#print(_permFactors)
self._perms,self._isvectorizable = radixPermutation(_permFactors[::-1],self._nb)
@property
def isVectorizable(self):
return(self._isvectorizable)
@property
def permID(self):
return(self._id)
@property
def perms(self):
if self._perms is not None:
return(self._perms)
else:
self.permutations()
return(self._perms)
@property
def factors(self):
return(self._factors)
@property
def nbGroupedFactors(self):
return(int(len(self._groupedFactors)/2))
@property
def nbFactors(self):
return(len(self._factors))
def writePermHeader(self,h):
printUInt16ArrayHeader(h,"perm%d" % self.permID,self.perms)
def writePermCode(self,c):
printUInt16Array(c,"perm%d" % self.permID,self.perms)
def writeFactorDesc(self,c):
radixList="{%s}" % joinStr([str(x) for x in self._groupedFactors])
lengthList="{%s}" % joinStr([str(x) for x in self._lens])
print(fftFactorArray % (self.permID,2*self.nbGroupedFactors,radixList),file=c);
print(fftLengthArray % (self.permID,len(self._lens),lengthList),file=c);
class Twiddle:
TwiddleId = 0
def __init__(self,transform,nb,datatype,mode):
Twiddle.TwiddleId = Twiddle.TwiddleId + 1
self._id = Twiddle.TwiddleId
self._datatype = datatype
self._nb=nb
self._twiddle = None
self._transform=transform
self._mode=mode
@property
def twiddleID(self):
return(self._id)
@property
def datatype(self):
return(self._datatype)
@property
def samples(self):
if self._twiddle is None:
self._twiddle=twiddle(self._transform,self._nb)
return(self._twiddle)
@property
def nbSamples(self):
return(self._nb)
@property
def nbTwiddles(self):
if self._transform=="RFFT":
return(self._nb // 2)
else:
return(self._nb)
def writeTwidHeader(self,h):
ctype=getCtype(self.datatype)
# Twiddle is a complex array so 2*nbSamples must be used
printArrayHeader(h,ctype,"twiddle%d" % self.twiddleID,2*self.nbTwiddles)
def writeTwidCode(self,c):
ctype=getCtype(self.datatype)
printArray(c,ctype,self._mode,"twiddle%d" % self.twiddleID,self.samples)
class Config:
ConfigID = 0
def __init__(self,transform,twiddle,perms,coreMode):
Config.ConfigID = Config.ConfigID + 1
self._id = Config.ConfigID
self._twiddle=twiddle
self._perms=perms
self._transform=transform
self._coreMode=coreMode
@property
def transform(self):
return(self._transform)
@property
def configID(self):
return(self._id)
@property
def perms(self):
return(self._perms)
@property
def twiddle(self):
return(self._twiddle)
@property
def nbSamples(self):
return(self.twiddle.nbSamples)
def writeConfigHeader(self,c):
ctype=getCtype(self.twiddle.datatype)
print(fftconfigHeader % (self.transform.lower(),ctype,self.configID),file=c)
def writeConfigCode(self,c):
ctype=getCtype(self.twiddle.datatype)
twiddleLen = "NB_" + ("twiddle%d"% self.twiddle.twiddleID).upper()
if self.transform == "RFFT":
print(rfftconfig % (ctype,self.configID,twiddleLen,self.twiddle.twiddleID),file=c)
else:
normfactor = 1.0 / self.twiddle.nbSamples
normFactorStr = convertToDatatype(normfactor,ctype,self._coreMode)
permsLen = "NB_" + ("perm%d"% self.perms.permID).upper()
outputFormat = 0
#print(self.twiddle.datatype)
#print(self.twiddle.nbSamples)
#print(self.perms.factors)
# For fixed point, each stage will change the output format.
# We need to cmpute the final format of the FFT
# and record it in the initialization structure
# so that the user can easily know how to recover the
# input format (q31, q15). It is encoded as a shift value.
# The shift to apply to recover the input format
# But applying this shift will saturate the result in general.
if self.twiddle.datatype == "q15" or self.twiddle.datatype == "q31":
for f in self.perms.factors:
#print(f,self.twiddle.datatype,self._coreMode)
# The file "formats.py" is decribing the format of each radix
# and is used to compute the format of the FFT based
# on the decomposition of its length.
#
# Currently (since there is no vector version for fixed point)
# this is not taking into account the format change that may
# be implied by the vectorization in case it may be different
# from the scalar version.
formatForSize = config.formats.formats[f][self._coreMode]
outputFormat += formatForSize[self.twiddle.datatype]
vectorizable=0
if self.perms.isVectorizable:
vectorizable = 1
print(cfftconfig % (ctype,self.configID,normFactorStr,permsLen,self.perms.permID,
twiddleLen,self.twiddle.twiddleID,self.perms.nbGroupedFactors,self.perms.nbFactors,
self.perms.permID,self.perms.permID,outputFormat,vectorizable
),file=c)
| 27.463934
| 98
| 0.595595
| 6,312
| 0.376768
| 0
| 0
| 1,333
| 0.079568
| 0
| 0
| 4,460
| 0.266221
|
a179d95ca52452ffb3320f8150fc8f1ca9d9de24
| 1,275
|
py
|
Python
|
classification/resnetOnnx_inference_dynamicInput.py
|
SahilChachra/Onnx-Deposit
|
6cdf03903639166a43e0c809b67621a1aa2449dd
|
[
"BSD-3-Clause"
] | null | null | null |
classification/resnetOnnx_inference_dynamicInput.py
|
SahilChachra/Onnx-Deposit
|
6cdf03903639166a43e0c809b67621a1aa2449dd
|
[
"BSD-3-Clause"
] | null | null | null |
classification/resnetOnnx_inference_dynamicInput.py
|
SahilChachra/Onnx-Deposit
|
6cdf03903639166a43e0c809b67621a1aa2449dd
|
[
"BSD-3-Clause"
] | null | null | null |
'''
This inference script takes in images of dynamic size
Runs inference in batch
** In this images have been resized but not need for this script
'''
import onnx
import onnxruntime as ort
import numpy as np
import cv2
from imagenet_classlist import get_class
import os
model_path = 'resnet18.onnx'
model = onnx.load(model_path)
image_path = "../sample_images"
try:
print("Checking model...")
onnx.checker.check_model(model)
onnx.helper.printable_graph(model.graph)
print("Model checked...")
print("Running inference...")
ort_session = ort.InferenceSession(model_path)
img_list = []
for image in os.listdir(image_path):
img = cv2.imread(os.path.join(image_path, image), cv2.IMREAD_COLOR)
img = cv2.resize(img, ((224, 224)))
img = np.moveaxis(img, -1, 0) # (Batch_size, channels, width, heigth)
img_list.append(img/255.0) # Normalize the image
outputs = ort_session.run(None, {"input":img_list})
out = np.array(outputs)
for image_num, image_name in zip(range(out.shape[1]), os.listdir(image_path)):
index = out[0][image_num]
print("Image : {0}, Class : {1}".format(image_name, get_class(np.argmax(index))))
except Exception as e:
print("Exception occured : ", e)
| 28.977273
| 89
| 0.680784
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 358
| 0.280784
|
a17a4e7f440bd9450eae4bfedcba472184cfe212
| 3,857
|
py
|
Python
|
demo/Master/TaskMaker.py
|
build2last/JOCC
|
8eedaa923c6444a32e53e03fdd2a85a8031c46f5
|
[
"MIT"
] | null | null | null |
demo/Master/TaskMaker.py
|
build2last/JOCC
|
8eedaa923c6444a32e53e03fdd2a85a8031c46f5
|
[
"MIT"
] | null | null | null |
demo/Master/TaskMaker.py
|
build2last/JOCC
|
8eedaa923c6444a32e53e03fdd2a85a8031c46f5
|
[
"MIT"
] | null | null | null |
# coding:utf-8
import time
import MySQLdb
import conf
import Server
# Another way to load data to MySQL:
# load data infile "C://ProgramData/MySQL/MySQL Server 5.7/Uploads/track_info_url_0_part0.txt" ignore into table develop.task(mid, url);
# doing: load data infile "C://ProgramData/MySQL/MySQL Server 5.7/Uploads/track_info_url_1_part1.txt" ignore into table develop.task(mid, url);
class Master:
def __init__(self):
CREATE_TABLE_SQL = (
"""CREATE TABLE IF NOT EXISTS `task` (
`mid` varchar(50) NOT NULL,
`status` tinyint(1) NOT NULL DEFAULT '0' COMMENT '0:未分配 1:已分配未反馈 2:已完成',
`worker` varchar(45) DEFAULT NULL,
`url` varchar(600) NOT NULL,
PRIMARY KEY (`mid`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='Give out tasks for distributed crawler.';""")
# Create table in MySQL
conn = MySQLdb.connect(host=conf.HOST, user=conf.USER, passwd=conf.PASS, db=conf.DB_NAME, port=conf.DBPORT, charset='utf8')
cursor = conn.cursor()
cursor.execute(CREATE_TABLE_SQL)
conn.commit()
conn.close()
self.mid = set()
self.urls = set()
def generate_task(self, task_file):
"""Promise to provide data with corrent format"""
with open(task_file) as fr:
while fr:
task = fr.readline().strip().split("\t")
if '' not in task:
yield(task)
else:
continue
def load_func(self, conn, items):
#特点:内存占用小
cursor = conn.cursor()
counter = 0
for item in items:
try:
insert_sql = """insert IGNORE into {table_name} ({column1}, {column2}) VALUES (%s, %s)""".format(table_name="task", column1="mid", column2="url")
cursor.execute(insert_sql, (item[0], item[1]))
counter += 1
conn.commit()
except Exception as e:
print(e)
print("Load %d items success"%counter)
def load_fast(self, conn, items):
#Fail
cursor = conn.cursor()
insert_sql = """insert into {table_name} ({column1}, {column2}) VALUES (%s, %s)""".format(table_name="task", column1="mid", column2="url")
paras = []
for i in items:
if i[0] not in self.mid and i[1] not in self.urls:
self.mid.add(i[0])
self.urls.add(i[1])
paras.append((i[0], i[1]))
counter = len(paras)
print(counter)
try:
print("inserting")
for index in range(len(paras))[::10000]:
para = paras[index:index+10000]
cursor.executemany(insert_sql, para)
print("Load items success")
except Exception as e:
print(e)
conn.commit()
def load_task(self, task_file):
try:
conn = MySQLdb.connect(host=conf.HOST, user=conf.USER, passwd=conf.PASS, db=conf.DB_NAME, port=conf.DBPORT, charset='utf8')
tasks = self.generate_task(task_file)
self.load_func(conn, tasks)
#self.load_fast(conn, tasks)
except Exception as e:
print(e)
finally:
conn.close()
def main():
master = Master()
task_files = ["track_info_url_5_part0.txt","track_info_url_5_part1.txt","track_info_url_6_part1.txt","track_info_url_8_part0.txt","track_info_url_8_part1.txt","track_info_url_9_part0.txt","track_info_url_9_part1.txt"]
for task_file in task_files:
print("Processing %s"%task_file)
path = r"C:\ProgramData\MySQL\MySQL Server 5.7\Uploads\\" + task_file
master.load_task(path)
if __name__ == '__main__':
tick = time.time()
main()
tock = time.time()
print("Cost %d s"%(tock - tick))
| 38.959596
| 221
| 0.577392
| 2,942
| 0.75494
| 326
| 0.083654
| 0
| 0
| 0
| 0
| 1,392
| 0.357198
|
a17ebf74350b134333915aa09bd51888d3742c03
| 770
|
py
|
Python
|
Inclass/python/sort_order_testing/sort_order.py
|
chenchuw/EC602-Design-by-Software
|
c233c9d08a67abc47235282fedd866d67ccaf4ce
|
[
"MIT"
] | null | null | null |
Inclass/python/sort_order_testing/sort_order.py
|
chenchuw/EC602-Design-by-Software
|
c233c9d08a67abc47235282fedd866d67ccaf4ce
|
[
"MIT"
] | null | null | null |
Inclass/python/sort_order_testing/sort_order.py
|
chenchuw/EC602-Design-by-Software
|
c233c9d08a67abc47235282fedd866d67ccaf4ce
|
[
"MIT"
] | 1
|
2022-01-11T20:23:47.000Z
|
2022-01-11T20:23:47.000Z
|
#!/Users/francischen/opt/anaconda3/bin/python
#pythons sorts are STABLE: order is the same as original in tie.
# sort: key, reverse
q = ['two','twelve','One','3']
#sort q, result being a modified list. nothing is returned
q.sort()
print(q)
q = ['two','twelve','One','3',"this has lots of t's"]
q.sort(reverse=True)
print(q)
def f(x):
return x.count('t')
q.sort(key = f)
print(q)
q = ['twelve','two','One','3',"this has lots of t's"]
q.sort(key=f)
print(q)
#Multiple sorts
q = ['twelve','two','One','3',"this has lots of t's"]
q.sort()
q.sort(key=f)
# sort based on 1,2,and then 3
# sort 3, then sort 2, then sort 1
print(q)
def complicated(x):
return(x.count('t'),len(x),x)
q = ['two','otw','wot','Z','t','tt','longer t']
q.sort(key=complicated)
print(q)
| 18.333333
| 64
| 0.62987
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 457
| 0.593506
|
a17ec4639df7fdbb530566bb66941b664210b137
| 96
|
py
|
Python
|
bhinneka/utils.py
|
kangfend/scrapy-bhinneka
|
a4a6e4ae5295e8bf83b213c1dace9c7de70f128c
|
[
"MIT"
] | 1
|
2016-10-04T10:10:05.000Z
|
2016-10-04T10:10:05.000Z
|
bhinneka/utils.py
|
kangfend/scrapy-bhinneka
|
a4a6e4ae5295e8bf83b213c1dace9c7de70f128c
|
[
"MIT"
] | null | null | null |
bhinneka/utils.py
|
kangfend/scrapy-bhinneka
|
a4a6e4ae5295e8bf83b213c1dace9c7de70f128c
|
[
"MIT"
] | null | null | null |
from bhinneka.settings import BASE_URL
def get_absolute_url(path):
return BASE_URL + path
| 16
| 38
| 0.78125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
a17ef045f77adc98f9fc666a8b89d72884c7ebf6
| 287
|
py
|
Python
|
tests/test_vsan/vars.py
|
wardy3/mdssdk
|
393102fab146917a3893b6aa2bd6a0449ad491c5
|
[
"Apache-2.0"
] | 4
|
2020-12-13T20:02:43.000Z
|
2022-02-27T23:36:58.000Z
|
tests/test_vsan/vars.py
|
wardy3/mdssdk
|
393102fab146917a3893b6aa2bd6a0449ad491c5
|
[
"Apache-2.0"
] | 13
|
2020-09-23T07:30:15.000Z
|
2022-03-30T01:12:25.000Z
|
tests/test_vsan/vars.py
|
wardy3/mdssdk
|
393102fab146917a3893b6aa2bd6a0449ad491c5
|
[
"Apache-2.0"
] | 12
|
2020-05-11T09:33:21.000Z
|
2022-03-18T11:11:28.000Z
|
import logging
import random
log = logging.getLogger(__name__)
reserved_id = [4079, 4094]
boundary_id = [0, 4095]
# No need to have end=4094 as there are some inbetween vsans reserved for fport-channel-trunk
def get_random_id(start=2, end=400):
return random.randint(start, end)
| 22.076923
| 93
| 0.756098
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 93
| 0.324042
|
a17f452cabac62c273c6e040b99703605a01fbfa
| 1,403
|
py
|
Python
|
testing_ideas/try_pymed_package/try_pymed_and_ss_api.py
|
hschilling/data-collection-and-prep
|
b70ab54fd887592bad05d5748f492fc2f9ef0f6f
|
[
"Unlicense"
] | null | null | null |
testing_ideas/try_pymed_package/try_pymed_and_ss_api.py
|
hschilling/data-collection-and-prep
|
b70ab54fd887592bad05d5748f492fc2f9ef0f6f
|
[
"Unlicense"
] | 41
|
2021-01-01T14:01:30.000Z
|
2021-01-27T20:17:21.000Z
|
testing_ideas/try_pymed_package/try_pymed_and_ss_api.py
|
hschilling/data-collection-and-prep
|
b70ab54fd887592bad05d5748f492fc2f9ef0f6f
|
[
"Unlicense"
] | 5
|
2021-02-08T14:19:35.000Z
|
2021-10-19T12:10:55.000Z
|
# Use the pymed package to call the PubMed API to get lots of papers from, in this case, JEB
from pymed import PubMed
import pandas as pd
import requests
_REQUESTS_TIMEOUT = 3.0
df_jeb = pd.DataFrame(columns=['title', 'abstract'])
df_jeb = df_jeb.convert_dtypes()
pubmed = PubMed(tool="MyTool", email="my@email.address")
# query = '("The Journal of experimental biology"[Journal]) AND (("2002/01/01"[Date - Publication] : "3000"[Date - Publication]))'
query = '("The Journal of experimental biology"[Journal]) AND (("2002/01/01"[Date - Publication] : "2018/10/10"[Date - Publication]))'
# results = pubmed.query(query, max_results=10000)
results = pubmed.query(query, max_results=100)
for r in results:
doi = "http://dx.doi.org/" + r.doi if r.doi else ''
df_jeb = df_jeb.append(
{'title': r.title,
'abstract': r.abstract,
'doi': doi,
'pmid': f"https://pubmed.ncbi.nlm.nih.gov/{r.pubmed_id}/",
},
ignore_index=True)
ss_api_url = f'https://api.semanticscholar.org/v1/paper/{r.doi}'
response = requests.get(ss_api_url, timeout=_REQUESTS_TIMEOUT)
ss_api_results = response.json()
print('is open access', ss_api_results['is_open_access'])
if r.title.startswith("Bumb"):
print(response)
print('is open access', ss_api_results['is_open_access'])
df_jeb.to_csv("pubmed_titles_abstracts_doi_pmid_100_only.csv")
| 40.085714
| 134
| 0.68211
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 708
| 0.504633
|
a17f65f1db1e9d6fc0255b219c8e7f7acd085081
| 287
|
py
|
Python
|
simple_functions/__init__.py
|
JihaoXin/ci_acse1
|
6ba30368cc2000bb13aab0dc213837d530753612
|
[
"MIT"
] | null | null | null |
simple_functions/__init__.py
|
JihaoXin/ci_acse1
|
6ba30368cc2000bb13aab0dc213837d530753612
|
[
"MIT"
] | null | null | null |
simple_functions/__init__.py
|
JihaoXin/ci_acse1
|
6ba30368cc2000bb13aab0dc213837d530753612
|
[
"MIT"
] | null | null | null |
from .functions1 import my_sum, factorial
from .constants import pi
from .print import myprint
from pkg_resources import get_distribution, DistributionNotFound
try:
__version__ = get_distribution(__name__).version
except DistributionNotFound:
# package is not installed
pass
| 28.7
| 64
| 0.811847
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 26
| 0.090592
|
a17f75ddc89a6583319e9dcd13c17dded131aa22
| 1,259
|
bzl
|
Python
|
tools/build_defs/native_tools/tool_access.bzl
|
slsyy/rules_foreign_cc
|
34ab7f86a3ab1b2381cb4820d08a1c892f55bf54
|
[
"Apache-2.0"
] | null | null | null |
tools/build_defs/native_tools/tool_access.bzl
|
slsyy/rules_foreign_cc
|
34ab7f86a3ab1b2381cb4820d08a1c892f55bf54
|
[
"Apache-2.0"
] | null | null | null |
tools/build_defs/native_tools/tool_access.bzl
|
slsyy/rules_foreign_cc
|
34ab7f86a3ab1b2381cb4820d08a1c892f55bf54
|
[
"Apache-2.0"
] | null | null | null |
# buildifier: disable=module-docstring
load(":native_tools_toolchain.bzl", "access_tool")
def get_cmake_data(ctx):
return _access_and_expect_label_copied("@rules_foreign_cc//tools/build_defs:cmake_toolchain", ctx, "cmake")
def get_ninja_data(ctx):
return _access_and_expect_label_copied("@rules_foreign_cc//tools/build_defs:ninja_toolchain", ctx, "ninja")
def get_make_data(ctx):
return _access_and_expect_label_copied("@rules_foreign_cc//tools/build_defs:make_toolchain", ctx, "make")
def _access_and_expect_label_copied(toolchain_type_, ctx, tool_name):
tool_data = access_tool(toolchain_type_, ctx, tool_name)
if tool_data.target:
# This could be made more efficient by changing the
# toolchain to provide the executable as a target
cmd_file = tool_data
for f in tool_data.target.files.to_list():
if f.path.endswith("/" + tool_data.path):
cmd_file = f
break
return struct(
deps = [tool_data.target],
# as the tool will be copied into tools directory
path = "$EXT_BUILD_ROOT/{}".format(cmd_file.path),
)
else:
return struct(
deps = [],
path = tool_data.path,
)
| 38.151515
| 111
| 0.669579
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 430
| 0.341541
|
a1813bf8f98dea1f19c9411401522d50224116bd
| 5,805
|
py
|
Python
|
tests/test_model.py
|
jakehadar/py-snake
|
3c19d572afb275768c504c66d331b5727515fd71
|
[
"MIT"
] | null | null | null |
tests/test_model.py
|
jakehadar/py-snake
|
3c19d572afb275768c504c66d331b5727515fd71
|
[
"MIT"
] | null | null | null |
tests/test_model.py
|
jakehadar/py-snake
|
3c19d572afb275768c504c66d331b5727515fd71
|
[
"MIT"
] | 1
|
2021-11-30T10:14:32.000Z
|
2021-11-30T10:14:32.000Z
|
# -*- coding: utf-8 -*-
import sys
import pytest
from snake.common import Frame, Point, BoundaryCollision, SelfCollision
from snake.config import GameConfig
from snake.model import SnakeModel
@pytest.fixture
def config():
config = GameConfig()
config.solid_walls = True
config.initial_food_count = 0
config.food_increase_interval = 0
return config
@pytest.fixture
def model(config):
"""Initial state (T0)."""
frame = Frame(10, 10)
m = SnakeModel(frame, config)
return m
class TestSnakeModelInitialState:
def test_length(self, model):
assert len(model) == 1
def test_score(self, model):
assert model.score == 0
def test_occupied_locations(self, model):
assert {model.head_location} == set(model.occupied_locations)
def test_empty_locations(self, model):
assert model.head_location not in model.empty_locations
def test_available_food_locations(self, model):
assert model.available_food_locations == model.empty_locations
@pytest.fixture
def model2(model):
"""Initial state (T0) + 3 steps forward, where each spot had food."""
model.face_up()
model.food_locations.append(model.head_location + Point(0, 1))
model.food_locations.append(model.head_location + Point(0, 2))
model.food_locations.append(model.head_location + Point(0, 3))
model.step()
model.step()
model.step()
return model
class TestSnakeEatsAndGrows:
def test_length(self, model2):
assert len(model2) == 4
def test_score(self, model2):
assert model2.score == 3
class TestBoundaryCollision:
def test_raises_scenario_1(self, config):
model = SnakeModel(Frame(3, 3), config)
model.face_up()
with pytest.raises(BoundaryCollision):
model.step()
model.step()
def test_raises_scenario_2(self, config):
model = SnakeModel(Frame(3, 3), config)
model.face_down()
with pytest.raises(BoundaryCollision):
model.step()
model.step()
def test_raises_scenario_3(self, config):
model = SnakeModel(Frame(3, 3), config)
model.face_left()
with pytest.raises(BoundaryCollision):
model.step()
model.step()
def test_raises_scenario_4(self, config):
model = SnakeModel(Frame(3, 3), config)
model.face_right()
with pytest.raises(BoundaryCollision):
model.step()
model.step()
class TestSelfCollision:
def test_valid_scenario_raises(self, model):
"""Snake turns into itself."""
model.face_up()
model.step(should_grow=True)
model.step(should_grow=True)
model.step(should_grow=True)
model.face_right()
model.step()
model.face_down()
model.step()
model.face_left()
with pytest.raises(SelfCollision):
model.step()
# The scenarios below should never raise
def test_scenario_1a(self, model):
model.face_up()
model.step(should_grow=True)
model.face_down()
model.step()
def test_scenario_1b(self, model):
model.face_down()
model.step(should_grow=True)
model.face_up()
model.step()
def test_scenario_1c(self, model):
model.face_left()
model.step(should_grow=True)
model.face_right()
model.step()
def test_scenario_1d(self, model):
model.face_right()
model.step(should_grow=True)
model.face_left()
model.step()
@pytest.mark.skipif(sys.version_info.major == 3, reason='Non-critical test failure from Python2.')
def test_scenario_2a(self, model):
model.face_up()
model.step(should_grow=True)
model.face_left()
model.face_down()
model.step()
@pytest.mark.skipif(sys.version_info.major == 3, reason='Non-critical test failure from Python2.')
def test_scenario_2b(self, model):
model.face_up()
model.step(should_grow=True)
model.face_right()
model.face_down()
model.step()
@pytest.mark.skipif(sys.version_info.major == 3, reason='Non-critical test failure from Python2.')
def test_scenario_3a(self, model):
model.face_down()
model.step(should_grow=True)
model.face_left()
model.face_up()
model.step()
@pytest.mark.skipif(sys.version_info.major == 3, reason='Non-critical test failure from Python2.')
def test_scenario_3b(self, model):
model.face_down()
model.step(should_grow=True)
model.face_right()
model.face_up()
model.step()
@pytest.mark.skipif(sys.version_info.major == 3, reason='Non-critical test failure from Python2.')
def test_scenario_4a(self, model):
model.face_left()
model.step(should_grow=True)
model.face_down()
model.face_right()
model.step()
@pytest.mark.skipif(sys.version_info.major == 3, reason='Non-critical test failure from Python2.')
def test_scenario_4b(self, model):
model.face_left()
model.step(should_grow=True)
model.face_up()
model.face_right()
model.step()
@pytest.mark.skipif(sys.version_info.major == 3, reason='Non-critical test failure from Python2.')
def test_scenario_5a(self, model):
model.face_right()
model.step(should_grow=True)
model.face_down()
model.face_left()
model.step()
@pytest.mark.skipif(sys.version_info.major == 3, reason='Non-critical test failure from Python2.')
def test_scenario_5b(self, model):
model.face_right()
model.step(should_grow=True)
model.face_up()
model.face_left()
model.step()
| 28.880597
| 102
| 0.64186
| 4,880
| 0.840655
| 0
| 0
| 2,887
| 0.49733
| 0
| 0
| 515
| 0.088717
|
a1823c37136cd59bed9a94266ef25fc93fb40d71
| 255
|
py
|
Python
|
gallery/photo/urls.py
|
andyjohn23/django-photo
|
e65ee3ab6fdad3a9d836d32b7f1026efcc728a41
|
[
"MIT"
] | null | null | null |
gallery/photo/urls.py
|
andyjohn23/django-photo
|
e65ee3ab6fdad3a9d836d32b7f1026efcc728a41
|
[
"MIT"
] | null | null | null |
gallery/photo/urls.py
|
andyjohn23/django-photo
|
e65ee3ab6fdad3a9d836d32b7f1026efcc728a41
|
[
"MIT"
] | null | null | null |
from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name="index"),
path('category/<category>/', views.CategoryListView.as_view(), name="category"),
path('search/', views.image_search, name='image-search'),
]
| 31.875
| 84
| 0.686275
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 64
| 0.25098
|
a182a47e0e9e4e6e3cf93dede6480b43b9da9679
| 381
|
py
|
Python
|
book2/s4_ex2.py
|
Felipe-Tommaselli/Python4everbody_Michigan
|
f4f940c15a4b165b144d14ead79d583bf31b805b
|
[
"MIT"
] | null | null | null |
book2/s4_ex2.py
|
Felipe-Tommaselli/Python4everbody_Michigan
|
f4f940c15a4b165b144d14ead79d583bf31b805b
|
[
"MIT"
] | null | null | null |
book2/s4_ex2.py
|
Felipe-Tommaselli/Python4everbody_Michigan
|
f4f940c15a4b165b144d14ead79d583bf31b805b
|
[
"MIT"
] | null | null | null |
fname = input("Enter file name: ")
if len(fname) < 1 : fname = "mbox-short.txt"
list = list()
f = open(fname)
count = 0
for line in f:
line = line.rstrip()
list = line.split()
if list == []: continue
elif list[0].lower() == 'from':
count += 1
print(list[1])
print("There were", count, "lines in the file with From as the first word")
| 25.4
| 75
| 0.564304
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 100
| 0.262467
|
a183121368090836638181c5ae887b713f923588
| 6,358
|
py
|
Python
|
fedsimul/models/mnist/mclr.py
|
cshjin/fedsimul
|
1e2b9a9d9034fbc679dfaff059c42dea5642971d
|
[
"MIT"
] | 11
|
2021-05-07T01:28:26.000Z
|
2022-03-10T08:23:16.000Z
|
fedsimul/models/mnist/mclr.py
|
cshjin/fedsimul
|
1e2b9a9d9034fbc679dfaff059c42dea5642971d
|
[
"MIT"
] | 2
|
2021-08-13T10:12:13.000Z
|
2021-08-31T02:03:20.000Z
|
fedsimul/models/mnist/mclr.py
|
cshjin/fedsimul
|
1e2b9a9d9034fbc679dfaff059c42dea5642971d
|
[
"MIT"
] | 1
|
2021-06-08T07:23:22.000Z
|
2021-06-08T07:23:22.000Z
|
import numpy as np
import tensorflow as tf
from tqdm import trange
from fedsimul.utils.model_utils import batch_data
from fedsimul.utils.tf_utils import graph_size
from fedsimul.utils.tf_utils import process_grad
class Model(object):
'''
This is the tf model for the MNIST dataset with multiple class learner regression.
Images are 28px by 28px.
'''
def __init__(self, num_classes, optimizer, gpu_id=0, seed=1):
""" Initialize the learner.
Args:
num_classes: int
optimizer: tf.train.Optimizer
gpu_id: int, default 0
seed: int, default 1
"""
# params
self.num_classes = num_classes
# create computation graph
self.graph = tf.Graph()
with self.graph.as_default():
tf.set_random_seed(123 + seed)
_created = self.create_model(optimizer)
self.features = _created[0]
self.labels = _created[1]
self.train_op = _created[2]
self.grads = _created[3]
self.eval_metric_ops = _created[4]
self.loss = _created[5]
self.saver = tf.train.Saver()
# set the gpu resources
gpu_options = tf.compat.v1.GPUOptions(visible_device_list="{}".format(gpu_id), allow_growth=True)
config = tf.compat.v1.ConfigProto(gpu_options=gpu_options)
self.sess = tf.Session(graph=self.graph, config=config)
# self.sess = tf.Session(graph=self.graph)
# REVIEW: find memory footprint and compute cost of the model
self.size = graph_size(self.graph)
with self.graph.as_default():
self.sess.run(tf.global_variables_initializer())
metadata = tf.RunMetadata()
opts = tf.profiler.ProfileOptionBuilder.float_operation()
self.flops = tf.profiler.profile(self.graph, run_meta=metadata, cmd='scope', options=opts).total_float_ops
def create_model(self, optimizer):
""" Model function for Logistic Regression.
Args:
optimizer: tf.train.Optimizer
Returns:
tuple: (features, labels, train_op, grads, eval_metric_ops, loss)
"""
features = tf.placeholder(tf.float32, shape=[None, 784], name='features')
labels = tf.placeholder(tf.int64, shape=[None, ], name='labels')
logits = tf.layers.dense(inputs=features,
units=self.num_classes,
kernel_regularizer=tf.contrib.layers.l2_regularizer(0.001))
predictions = {
"classes": tf.argmax(input=logits, axis=1),
"probabilities": tf.nn.softmax(logits, name="softmax_tensor")
}
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
grads_and_vars = optimizer.compute_gradients(loss)
grads, _ = zip(*grads_and_vars)
train_op = optimizer.apply_gradients(grads_and_vars, global_step=tf.train.get_global_step())
eval_metric_ops = tf.count_nonzero(tf.equal(labels, predictions["classes"]))
return features, labels, train_op, grads, eval_metric_ops, loss
def set_params(self, latest_params=None, momentum=False, gamma=0.9):
""" Set parameters from server
Args:
latest_params: list
list of tf.Variables
momentum: boolean
gamma: float
TODO: update variable with its local variable and the value from
latest_params
TODO: DO NOT set_params from the global, instead, use the global gradient to update
"""
if latest_params is not None:
with self.graph.as_default():
# previous gradient
all_vars = tf.trainable_variables()
for variable, value in zip(all_vars, latest_params):
if momentum:
curr_val = self.sess.run(variable)
new_val = gamma * curr_val + (1 - gamma) * value
# TODO: use `assign` function instead of `load`
variable.load(new_val, self.sess)
else:
variable.load(value, self.sess)
def get_params(self):
""" Get model parameters.
Returns:
model_params: list
list of tf.Variables
"""
with self.graph.as_default():
model_params = self.sess.run(tf.trainable_variables())
return model_params
def get_gradients(self, data, model_len):
""" Access gradients of a given dataset.
Args:
data: dict
model_len: int
Returns:
num_samples: int
grads: tuple
"""
grads = np.zeros(model_len)
num_samples = len(data['y'])
with self.graph.as_default():
model_grads = self.sess.run(self.grads, feed_dict={self.features: data['x'],
self.labels: data['y']})
grads = process_grad(model_grads)
return num_samples, grads
def solve_inner(self, data, num_epochs=1, batch_size=32):
'''Solves local optimization problem.
Args:
data: dict with format {'x':[], 'y':[]}
num_epochs: int
batch_size: int
Returns:
soln: list
comp: float
'''
for _ in trange(num_epochs, desc='Epoch: ', leave=False, ncols=120):
for X, y in batch_data(data, batch_size):
with self.graph.as_default():
self.sess.run(self.train_op, feed_dict={self.features: X, self.labels: y})
soln = self.get_params()
comp = num_epochs * (len(data['y']) // batch_size) * batch_size * self.flops
return soln, comp
def test(self, data):
'''
Args:
data: dict of the form {'x': [], 'y': []}
Returns:
tot_correct: int
loss: float
'''
with self.graph.as_default():
tot_correct, loss = self.sess.run([self.eval_metric_ops, self.loss],
feed_dict={self.features: data['x'], self.labels: data['y']})
return tot_correct, loss
def close(self):
self.sess.close()
| 35.920904
| 118
| 0.574394
| 6,141
| 0.96587
| 0
| 0
| 0
| 0
| 0
| 0
| 1,936
| 0.304498
|