hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a62f8914f4df3b79cc0145fecbfc98cc209f25e4 | 2,536 | py | Python | syncopy/nwanalysis/granger.py | kajal5888/syncopy | f7d49808a09ff65eec64cda1cfb4c87a012e0c2b | [
"BSD-3-Clause"
] | null | null | null | syncopy/nwanalysis/granger.py | kajal5888/syncopy | f7d49808a09ff65eec64cda1cfb4c87a012e0c2b | [
"BSD-3-Clause"
] | null | null | null | syncopy/nwanalysis/granger.py | kajal5888/syncopy | f7d49808a09ff65eec64cda1cfb4c87a012e0c2b | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Implementation of Granger-Geweke causality
#
#
# Builtin/3rd party package imports
import numpy as np
def granger(CSD, Hfunc, Sigma):
"""
Computes the pairwise Granger-Geweke causalities
for all (non-symmetric!) channel combinations
according to Equation 8 in [1]_.
The transfer functions `Hfunc` and noise covariance
`Sigma` are expected to have been already computed.
Parameters
----------
CSD : (nFreq, N, N) :class:`numpy.ndarray`
Complex cross spectra for all channel combinations ``i,j``
`N` corresponds to number of input channels.
Hfunc : (nFreq, N, N) :class:`numpy.ndarray`
Spectral transfer functions for all channel combinations ``i,j``
Sigma : (N, N) :class:`numpy.ndarray`
The noise covariances
Returns
-------
Granger : (nFreq, N, N) :class:`numpy.ndarray`
Spectral Granger-Geweke causality between all channel
combinations. Directionality follows array
notation: causality from ``i -> j`` is ``Granger[:,i,j]``,
causality from ``j -> i`` is ``Granger[:,j,i]``
See also
--------
wilson_sf : :func:`~syncopy.connectivity.wilson_sf.wilson_sf
Spectral matrix factorization that yields the
transfer functions and noise covariances
from a cross spectral density.
Notes
-----
.. [1] Dhamala, Mukeshwar, Govindan Rangarajan, and Mingzhou Ding.
"Estimating Granger causality from Fourier and wavelet transforms
of time series data." Physical review letters 100.1 (2008): 018701.
"""
nChannels = CSD.shape[1]
auto_spectra = CSD.transpose(1, 2, 0).diagonal()
auto_spectra = np.abs(auto_spectra) # auto-spectra are real
# we need the stacked auto-spectra of the form (nChannel=3):
# S_11 S_22 S_33
# Smat(f) = S_11 S_22 S_33
# S_11 S_22 S_33
Smat = auto_spectra[:, None, :] * np.ones(nChannels)[:, None]
# Granger i->j needs H_ji entry
Hmat = np.abs(Hfunc.transpose(0, 2, 1))**2
# Granger i->j needs Sigma_ji entry
SigmaJI = np.abs(Sigma.T)
# imag part should be 0
auto_cov = np.abs(Sigma.diagonal())
# same stacking as for the auto spectra (without freq axis)
SigmaII = auto_cov[None, :] * np.ones(nChannels)[:, None]
# the denominator
denom = SigmaII.T - SigmaJI**2 / SigmaII
denom = Smat - denom * Hmat
# linear causality i -> j
Granger = np.log(Smat / denom)
return Granger
| 31.7 | 75 | 0.633675 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,916 | 0.755521 |
a632c715f3c468a9c0bbb7af69ade32291a51b6d | 365 | py | Python | sailfish/kernel/__init__.py | macfadyen/sailfish | 44752a6769a2a7566a90dd9c8df21d4e2c49d720 | [
"MIT"
] | 9 | 2021-06-29T15:43:58.000Z | 2022-03-20T10:13:26.000Z | sailfish/kernel/__init__.py | macfadyen/sailfish | 44752a6769a2a7566a90dd9c8df21d4e2c49d720 | [
"MIT"
] | 1 | 2021-07-14T02:24:53.000Z | 2021-07-14T02:24:53.000Z | sailfish/kernel/__init__.py | macfadyen/sailfish | 44752a6769a2a7566a90dd9c8df21d4e2c49d720 | [
"MIT"
] | 8 | 2021-06-09T09:11:15.000Z | 2021-11-02T20:25:27.000Z | """
A Python module to facilitate JIT-compiled CPU-GPU agnostic compute kernels.
Kernel libraries are collections of functions written in C code that can be
compiled for CPU execution using a normal C compiler via the CFFI module, or
for GPU execution using a CUDA or ROCm compiler via cupy.
"""
from . import library
from . import parse_api
from . import system
| 30.416667 | 76 | 0.786301 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 296 | 0.810959 |
a63318a3fdd93456eccbc73216923e5d8710b3ba | 2,574 | py | Python | spring_cloud/commons/client/loadbalancer/supplier/base.py | haribo0915/Spring-Cloud-in-Python | 0bcd7093869c797df14428bf2d1b0a779f96e573 | [
"Apache-2.0"
] | 5 | 2020-10-06T09:48:23.000Z | 2020-10-07T13:19:46.000Z | spring_cloud/commons/client/loadbalancer/supplier/base.py | haribo0915/Spring-Cloud-in-Python | 0bcd7093869c797df14428bf2d1b0a779f96e573 | [
"Apache-2.0"
] | 5 | 2020-10-05T09:57:01.000Z | 2020-10-12T19:52:48.000Z | spring_cloud/commons/client/loadbalancer/supplier/base.py | haribo0915/Spring-Cloud-in-Python | 0bcd7093869c797df14428bf2d1b0a779f96e573 | [
"Apache-2.0"
] | 8 | 2020-10-05T06:34:49.000Z | 2020-10-07T13:19:46.000Z | # -*- coding: utf-8 -*-
"""
Since the load-balancer is responsible for choosing one instance
per service request from a list of instances. We need a ServiceInstanceListSupplier for
each service to decouple the source of the instances from load-balancers.
"""
# standard library
from abc import ABC, abstractmethod
from typing import List
# scip plugin
from spring_cloud.commons.client import ServiceInstance
from spring_cloud.commons.client.discovery import DiscoveryClient
__author__ = "Waterball (johnny850807@gmail.com)"
__license__ = "Apache 2.0"
class ServiceInstanceListSupplier(ABC):
"""
Non-Reactive version of ServiceInstanceListSupplier.
(Spring Cloud implement the supplier in the reactive way, means that
its supplier returns an Observable which broadcasts the instances on every change.)
We may consider to adopt reactive programming in the future.
"""
@property
@abstractmethod
def service_id(self) -> str:
"""
:return: (str) the service's id
"""
pass
@abstractmethod
def get(self, request=None) -> List[ServiceInstance]:
"""
:param request (opt) TODO not sure will we need this,
this extension was designed by spring-cloud.
:return: (*ServiceInstance) a list of instances
"""
pass
class FixedServiceInstanceListSupplier(ServiceInstanceListSupplier):
"""
A supplier that is initialized with fixed instances. (i.e. they won't be changed)
"""
def __init__(self, service_id: str, instances: List[ServiceInstance]):
"""
:param service_id: (str)
:param instances: (*ServiceInstance)
"""
self._service_id = service_id
self._instances = instances
def get(self, request=None) -> List[ServiceInstance]:
return self._instances
@property
def service_id(self) -> str:
return self._service_id
class DiscoveryClientServiceInstanceListSupplier(ServiceInstanceListSupplier):
"""
The adapter delegating to discovery client for querying instances
"""
def __init__(self, service_id: str, discovery_client: DiscoveryClient):
"""
:param service_id: (str)
:param discovery_client: (DiscoveryClient)
"""
self.__service_id = service_id
self.__delegate = discovery_client
@property
def service_id(self) -> str:
return self.__service_id
def get(self, request=None) -> List[ServiceInstance]:
return self.__delegate.get_instances(self.service_id)
| 29.930233 | 87 | 0.685315 | 2,012 | 0.781663 | 0 | 0 | 585 | 0.227273 | 0 | 0 | 1,258 | 0.488733 |
a6340747773815299adf15817934a33c2b1bc670 | 314 | py | Python | Chapter 07/Chap07_Example7.71.py | Anancha/Programming-Techniques-using-Python | e80c329d2a27383909d358741a5cab03cb22fd8b | [
"MIT"
] | null | null | null | Chapter 07/Chap07_Example7.71.py | Anancha/Programming-Techniques-using-Python | e80c329d2a27383909d358741a5cab03cb22fd8b | [
"MIT"
] | null | null | null | Chapter 07/Chap07_Example7.71.py | Anancha/Programming-Techniques-using-Python | e80c329d2a27383909d358741a5cab03cb22fd8b | [
"MIT"
] | null | null | null | myl1 = []
num = int(input("Enter the number of elements: "))
for loop in range(num):
myl1.append(input(f"Enter element at index {loop} : "))
print(myl1)
print(type(myl1))
myt1 = tuple(myl1)
print(myt1)
print(type(myt1))
print("The elements of tuple object are: ")
for loop in myt1:
print(loop) | 20.933333 | 61 | 0.656051 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 103 | 0.328025 |
a6341371a2c86884d45f4514cc575e4cd83574f3 | 2,655 | py | Python | pytplot/tplot_math/split_vec.py | xnchu/PyTplot | 65ad594ea216d56f0acf8886d8cf3b14f48ed333 | [
"MIT"
] | 12 | 2018-02-21T21:29:52.000Z | 2022-01-23T19:49:38.000Z | pytplot/tplot_math/split_vec.py | xnchu/PyTplot | 65ad594ea216d56f0acf8886d8cf3b14f48ed333 | [
"MIT"
] | 128 | 2018-07-25T22:38:44.000Z | 2022-01-17T06:21:58.000Z | pytplot/tplot_math/split_vec.py | xnchu/PyTplot | 65ad594ea216d56f0acf8886d8cf3b14f48ed333 | [
"MIT"
] | 25 | 2018-01-30T20:18:07.000Z | 2021-12-25T05:07:30.000Z | import pytplot
import numpy as np
def split_vec(tvar, new_name=None, columns='all', suffix=None):
"""
Splits up 2D data into many 1D tplot variables.
.. note::
This analysis routine assumes the data is no more than 2 dimensions. If there are more, they may become flattened!
Parameters:
tvar : str
Name of tplot variable to split up
newtvars : int/list, optional
The names of the new tplot variables. This must be the same length as the number of variables created.
columns : list of ints, optional
The specific column numbers to grab from the data. The default is to split all columns.
Returns:
None
Examples:
>>> pytplot.store_data('b', data={'x':[2,5,8,11,14,17,20], 'y':[[1,1,1,1,1,1],[2,2,5,4,1,1],[100,100,3,50,1,1],[4,4,8,58,1,1],[5,5,9,21,1,1],[6,6,2,2,1,1],[7,7,1,6,1,1]]})
>>> pytplot.tplot_math.split_vec('b',['b1','b2','b3'],[0,[1,3],4])
>>> print(pytplot.data_quants['b2'].values)
"""
# Make sure the tvar is found
if tvar not in pytplot.data_quants:
print(f"Error: {tvar} not found in memory.")
return
# Give a default to the new name
if new_name is None:
new_name = tvar
# Gather data from the tvar
alldata = pytplot.get_data(tvar)
time = alldata[0]
data = alldata[1]
dim = data.shape
# If already size one, simply return
if len(dim) == 1:
return [tvar]
vec_length = dim[1]
# Determine what the suffix list will be
if suffix is not None:
if vec_length > len(suffix):
print(f"split_vec error: number of columns ({vec_length}) is greater than the number of suffix entered")
else:
if vec_length == 3:
suffix = ["_x", "_y", "_z"]
else:
suffix = []
for i in range(vec_length):
suffix.append("_"+str(i))
created_variables = []
#grab column data
if columns == 'all':
columns = range(vec_length)
for i in columns:
#if not a list
if isinstance(i,list):
range_start = i[0]
range_end = i[1]
else:
range_start = i
range_end = i
split_col = list(range(range_start,range_end+1))
split_name = new_name + suffix[i]
created_variables = created_variables + [split_name]
data_for_tplot = {'x':time, 'y':data[:,split_col].squeeze()}
if not pytplot.store_data(split_name,data=data_for_tplot):
raise Exception(f"Failed to store {split_name} in pytplot.")
return created_variables
| 30.170455 | 179 | 0.588324 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,332 | 0.501695 |
a634a7d59658a71a40ac1013ecebacf7bdf8218a | 967 | py | Python | packages/artfx/mayaLib/tests/maya_test.py | Soulayrol/Pipeline | d0a2c834c3772198f0ca5f0ba6ea8b5e41a419e7 | [
"MIT"
] | null | null | null | packages/artfx/mayaLib/tests/maya_test.py | Soulayrol/Pipeline | d0a2c834c3772198f0ca5f0ba6ea8b5e41a419e7 | [
"MIT"
] | null | null | null | packages/artfx/mayaLib/tests/maya_test.py | Soulayrol/Pipeline | d0a2c834c3772198f0ca5f0ba6ea8b5e41a419e7 | [
"MIT"
] | null | null | null | import os
import sys
import maya.standalone
import mayaLib
print("=" * 30)
print("This is mayaLib package test")
print("=" * 30)
print("Initializing maya standalone ...")
maya.standalone.initialize(name="python")
# Create engine
maya_engine = mayaLib.MayaEngine()
print("Engine : " + str(maya_engine))
# Get engine path
print("Current file location : " + str(maya_engine.get_file_path()))
# Save
maya_engine_scene = os.path.join(os.path.join(os.environ["USERPROFILE"]), "Desktop", "test.ma")
maya_engine.save(maya_engine_scene)
print("Current file location after save : " + maya_engine.get_file_path())
# Open as
maya_engine.open_as(maya_engine.get_file_path())
print("Open as ")
print("Current file location after open as : " + maya_engine.get_file_path())
# Open
maya_engine.open(maya_engine_scene)
print("Current file location after open : " + maya_engine.get_file_path())
print("Uninitialized maya standalone ...")
maya.standalone.uninitialize()
sys.exit(0)
| 28.441176 | 95 | 0.75181 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 358 | 0.370217 |
a6353e0352eb39b16f50a9f0e0699f0ab72b2a30 | 756 | py | Python | 22. Generate Parentheses/solution1.py | sunshot/LeetCode | 8f6503201831055f1d49ed3abb25be44a13ec317 | [
"MIT"
] | null | null | null | 22. Generate Parentheses/solution1.py | sunshot/LeetCode | 8f6503201831055f1d49ed3abb25be44a13ec317 | [
"MIT"
] | null | null | null | 22. Generate Parentheses/solution1.py | sunshot/LeetCode | 8f6503201831055f1d49ed3abb25be44a13ec317 | [
"MIT"
] | null | null | null | from typing import List
class Solution:
def generateParenthesis(self, n: int) -> List[str]:
if n == 0:
return ['']
if n == 1:
return ['()']
if n == 2:
result = []
result.append('()()')
result.append('(())')
return result
ans = []
for i in range(n):
for left in self.generateParenthesis(i):
for right in self.generateParenthesis(n-1-i):
ans.append('({}){}'.format(left, right))
return ans
if __name__== '__main__':
solution = Solution()
n = 3
ans = solution.generateParenthesis(n)
print(ans)
n = 4
ans = solution.generateParenthesis(n)
print(ans) | 26.068966 | 61 | 0.488095 | 535 | 0.707672 | 0 | 0 | 0 | 0 | 0 | 0 | 36 | 0.047619 |
a6359dcbaf8a3e9343cefe0d8af4e3a9e190144b | 6,094 | py | Python | examples/openmdao.examples.mdao/openmdao/examples/mdao/sellar_BLISS.py | swryan/OpenMDAO-Framework | f50d60e1a8cadac7fe03d26ffad5fb660b2a15ec | [
"Apache-2.0"
] | null | null | null | examples/openmdao.examples.mdao/openmdao/examples/mdao/sellar_BLISS.py | swryan/OpenMDAO-Framework | f50d60e1a8cadac7fe03d26ffad5fb660b2a15ec | [
"Apache-2.0"
] | null | null | null | examples/openmdao.examples.mdao/openmdao/examples/mdao/sellar_BLISS.py | swryan/OpenMDAO-Framework | f50d60e1a8cadac7fe03d26ffad5fb660b2a15ec | [
"Apache-2.0"
] | null | null | null | """
Solution of the Sellar analytical problem using classic BLISS.
(Bi-Level Integrated System Synthesis)
MDA solved with a Broyden solver.
Global sensitivity calculated by finite-differencing the MDA-coupled
system. The MDA should be replaced with solution of the GSE to fully
match the original Sobiesky-Agte implementation.
"""
from openmdao.main.api import Assembly, SequentialWorkflow
from openmdao.lib.datatypes.api import Float, Array
from openmdao.lib.differentiators.finite_difference import FiniteDifference
from openmdao.lib.drivers.api import CONMINdriver, BroydenSolver, \
SensitivityDriver, FixedPointIterator
from openmdao.lib.optproblems import sellar
class SellarBLISS(Assembly):
""" Optimization of the Sellar problem using the BLISS algorithm
Disciplines coupled with FixedPointIterator.
"""
z_store = Array([0,0],dtype=Float)
x1_store = Float(0.0)
def configure(self):
""" Creates a new Assembly with this problem
Optimal Design at (1.9776, 0, 0)
Optimal Objective = 3.18339"""
# Disciplines
self.add('dis1', sellar.Discipline1())
self.add('dis2', sellar.Discipline2())
objective = '(dis1.x1)**2 + dis1.z2 + dis1.y1 + exp(-dis2.y2)'
constraint1 = 'dis1.y1 > 3.16'
constraint2 = 'dis2.y2 < 24.0'
# Top level is Fixed-Point Iteration
self.add('driver', FixedPointIterator())
self.driver.add_parameter('dis1.x1', low= 0.0, high=10.0, start=1.0)
self.driver.add_parameter(['dis1.z1','dis2.z1'], low=-10.0, high=10.0, start=5.0)
self.driver.add_parameter(['dis1.z2','dis2.z2'], low= 0.0, high=10.0,start=2.0)
self.driver.add_constraint('x1_store = dis1.x1')
self.driver.add_constraint('z_store[0] = dis1.z1')
self.driver.add_constraint('z_store[1] = dis1.z2')
self.driver.max_iteration = 50
self.driver.tolerance = .001
# Multidisciplinary Analysis
self.add('mda', BroydenSolver())
self.mda.add_parameter('dis1.y2', low=-9.e99, high=9.e99,start=0.0)
self.mda.add_constraint('dis2.y2 = dis1.y2')
self.mda.add_parameter('dis2.y1', low=-9.e99, high=9.e99,start=3.16)
self.mda.add_constraint('dis2.y1 = dis1.y1')
# Discipline 1 Sensitivity Analysis
self.add('sa_dis1', SensitivityDriver())
self.sa_dis1.workflow.add(['dis1'])
self.sa_dis1.add_parameter('dis1.x1', low= 0.0, high=10.0, fd_step=.001)
self.sa_dis1.add_constraint(constraint1)
self.sa_dis1.add_constraint(constraint2)
self.sa_dis1.add_objective(objective, name='obj')
self.sa_dis1.differentiator = FiniteDifference()
self.sa_dis1.default_stepsize = 1.0e-6
# Discipline 2 Sensitivity Analysis
# dis2 has no local parameter, so there is no need to treat it as
# a subsystem.
# System Level Sensitivity Analysis
# Note, we cheat here and run an MDA instead of solving the
# GSE equations. Have to put this on the TODO list.
self.add('ssa', SensitivityDriver())
self.ssa.workflow.add(['mda'])
self.ssa.add_parameter(['dis1.z1','dis2.z1'], low=-10.0, high=10.0)
self.ssa.add_parameter(['dis1.z2','dis2.z2'], low= 0.0, high=10.0)
self.ssa.add_constraint(constraint1)
self.ssa.add_constraint(constraint2)
self.ssa.add_objective(objective, name='obj')
self.ssa.differentiator = FiniteDifference()
self.ssa.default_stepsize = 1.0e-6
# Discipline Optimization
# (Only discipline1 has an optimization input)
self.add('bbopt1', CONMINdriver())
self.bbopt1.add_parameter('x1_store', low=0.0, high=10.0, start=1.0)
self.bbopt1.add_objective('sa_dis1.F[0] + sa_dis1.dF[0][0]*(x1_store-dis1.x1)')
self.bbopt1.add_constraint('sa_dis1.G[0] + sa_dis1.dG[0][0]*(x1_store-dis1.x1) < 0')
#this one is technically unncessary
self.bbopt1.add_constraint('sa_dis1.G[1] + sa_dis1.dG[1][0]*(x1_store-dis1.x1) < 0')
self.bbopt1.add_constraint('(x1_store-dis1.x1)<.5')
self.bbopt1.add_constraint('(x1_store-dis1.x1)>-.5')
self.bbopt1.iprint = 0
self.bbopt1.linobj = True
# Global Optimization
self.add('sysopt', CONMINdriver())
self.sysopt.add_parameter('z_store[0]', low=-10.0, high=10.0, start=5.0)
self.sysopt.add_parameter('z_store[1]', low=0.0, high=10.0, start=2.0)
self.sysopt.add_objective('ssa.F[0]+ ssa.dF[0][0]*(z_store[0]-dis1.z1) + ssa.dF[0][1]*(z_store[1]-dis1.z2)')
self.sysopt.add_constraint('ssa.G[0] + ssa.dG[0][0]*(z_store[0]-dis1.z1) + ssa.dG[0][1]*(z_store[1]-dis1.z2) < 0')
self.sysopt.add_constraint('ssa.G[1] + ssa.dG[1][0]*(z_store[0]-dis1.z1) + ssa.dG[1][1]*(z_store[1]-dis1.z2) < 0')
self.sysopt.add_constraint('z_store[0]-dis1.z1<.5')
self.sysopt.add_constraint('z_store[0]-dis1.z1>-.5')
self.sysopt.add_constraint('z_store[1]-dis1.z2<.5')
self.sysopt.add_constraint('z_store[1]-dis1.z2>-.5')
self.sysopt.iprint = 0
self.sysopt.linobj = True
self.driver.workflow = SequentialWorkflow()
self.driver.workflow.add(['ssa', 'sa_dis1', 'bbopt1', 'sysopt'])
if __name__ == "__main__": # pragma: no cover
import time
import math
prob = SellarBLISS()
prob.name = "top"
tt = time.time()
prob.run()
print "\n"
print "Minimum found at (%f, %f, %f)" % (prob.dis1.z1, \
prob.dis1.z2, \
prob.dis1.x1)
print "Couping vars: %f, %f" % (prob.dis1.y1, prob.dis2.y2)
print "Minimum objective: ", (prob.dis1.x1)**2 + prob.dis1.z2 + prob.dis1.y1 + math.exp(-prob.dis2.y2)
print "Elapsed time: ", time.time()-tt, "seconds" | 44.15942 | 122 | 0.614539 | 4,745 | 0.778635 | 0 | 0 | 0 | 0 | 0 | 0 | 2,246 | 0.368559 |
a636ae28679d3b242e5baf775599edb114840da0 | 13,743 | py | Python | simulation/aws-robomaker-sample-application-deepracer/simulation_ws/src/sagemaker_rl_agent/markov/rollout_worker.py | Lacan82/deepracer | 4503480cf80993f1e94cec8d26d783d6b2121cd8 | [
"Apache-2.0"
] | 16 | 2019-12-24T06:46:31.000Z | 2022-03-31T00:13:39.000Z | simulation/aws-robomaker-sample-application-deepracer/simulation_ws/src/sagemaker_rl_agent/markov/rollout_worker.py | Lacan82/deepracer | 4503480cf80993f1e94cec8d26d783d6b2121cd8 | [
"Apache-2.0"
] | 4 | 2019-11-02T16:19:14.000Z | 2019-11-02T21:31:30.000Z | simulation/aws-robomaker-sample-application-deepracer/simulation_ws/src/sagemaker_rl_agent/markov/rollout_worker.py | Lacan82/deepracer | 4503480cf80993f1e94cec8d26d783d6b2121cd8 | [
"Apache-2.0"
] | 5 | 2020-02-11T22:13:07.000Z | 2020-12-15T16:46:15.000Z | """
this rollout worker:
- restores a model from disk
- evaluates a predefined number of episodes
- contributes them to a distributed memory
- exits
"""
import argparse
import json
import math
import os
import sys
import time
import logging
import traceback
import markov.deepracer_memory as deepracer_memory
from google.protobuf import text_format
from tensorflow.python.training.checkpoint_state_pb2 import CheckpointState
import markov
import markov.defaults as defaults
from markov.s3_boto_data_store import S3BotoDataStore, S3BotoDataStoreParameters
from markov.s3_client import SageS3Client
from markov.utils import load_model_metadata
from rl_coach.base_parameters import TaskParameters, DistributedCoachSynchronizationType
from rl_coach.core_types import RunPhase, EnvironmentEpisodes, EnvironmentSteps
from rl_coach.data_stores.data_store import DataStoreParameters, SyncFiles
from rl_coach.logger import screen
from rl_coach.memories.backend.redis import RedisPubSubMemoryBackendParameters
from rl_coach.utils import short_dynamic_import
from markov import utils
logger = utils.Logger(__name__, logging.INFO).get_logger()
from gym.envs.registration import register
from gym.envs.registration import make
CUSTOM_FILES_PATH = "./custom_files"
if not os.path.exists(CUSTOM_FILES_PATH):
os.makedirs(CUSTOM_FILES_PATH)
# Q: specify alternative distributed memory, or should this go in the preset?
# A: preset must define distributed memory to be used. we aren't going to take
# a non-distributed preset and automatically distribute it.
def has_checkpoint(checkpoint_dir):
"""
True if a checkpoint is present in checkpoint_dir
"""
if os.path.isdir(checkpoint_dir):
if len(os.listdir(checkpoint_dir)) > 0:
return os.path.isfile(os.path.join(checkpoint_dir, "checkpoint"))
return False
def wait_for_checkpoint(checkpoint_dir, data_store=None, timeout=10):
"""
block until there is a checkpoint in checkpoint_dir
"""
for i in range(timeout):
if data_store:
data_store.load_from_store()
if has_checkpoint(checkpoint_dir):
return
time.sleep(10)
# one last time
if has_checkpoint(checkpoint_dir):
return
utils.json_format_logger("checkpoint never found in {}, Waited {} seconds. Job failed!".format(checkpoint_dir, timeout),
**utils.build_system_error_dict(utils.SIMAPP_SIMULATION_WORKER_EXCEPTION, utils.SIMAPP_EVENT_ERROR_CODE_503))
traceback.print_exc()
raise ValueError((
'Waited {timeout} seconds, but checkpoint never found in '
'{checkpoint_dir}'
).format(
timeout=timeout,
checkpoint_dir=checkpoint_dir,
))
def get_latest_checkpoint(checkpoint_dir):
if os.path.exists(os.path.join(checkpoint_dir, 'checkpoint')):
ckpt = CheckpointState()
contents = open(os.path.join(checkpoint_dir, 'checkpoint'), 'r').read()
text_format.Merge(contents, ckpt)
# rel_path = os.path.relpath(ckpt.model_checkpoint_path, checkpoint_dir)
rel_path = ckpt.model_checkpoint_path
return int(rel_path.split('_Step')[0])
def download_customer_reward_function(s3_client, reward_file_s3_key):
reward_function_local_path = os.path.join(CUSTOM_FILES_PATH, "customer_reward_function.py")
success_reward_function_download = s3_client.download_file(s3_key=reward_file_s3_key,
local_path=reward_function_local_path)
if not success_reward_function_download:
utils.json_format_logger("Could not download the customer reward function file. Job failed!",
**utils.build_system_error_dict(utils.SIMAPP_SIMULATION_WORKER_EXCEPTION, utils.SIMAPP_EVENT_ERROR_CODE_503))
traceback.print_exc()
sys.exit(1)
def download_custom_files_if_present(s3_client, s3_prefix):
environment_file_s3_key = os.path.normpath(s3_prefix + "/environments/deepracer_racetrack_env.py")
environment_local_path = os.path.join(CUSTOM_FILES_PATH, "deepracer_racetrack_env.py")
success_environment_download = s3_client.download_file(s3_key=environment_file_s3_key,
local_path=environment_local_path)
preset_file_s3_key = os.path.normpath(s3_prefix + "/presets/preset.py")
preset_local_path = os.path.join(CUSTOM_FILES_PATH, "preset.py")
success_preset_download = s3_client.download_file(s3_key=preset_file_s3_key,
local_path=preset_local_path)
return success_preset_download, success_environment_download
def should_stop(checkpoint_dir):
if os.path.exists(os.path.join(checkpoint_dir, SyncFiles.FINISHED.value)):
logger.info("Received termination signal from trainer. Goodbye.")
return True
return False
def rollout_worker(graph_manager, checkpoint_dir, data_store, num_workers, memory_backend_params):
"""
wait for first checkpoint then perform rollouts using the model
"""
wait_for_checkpoint(checkpoint_dir, data_store)
task_parameters = TaskParameters()
task_parameters.__dict__['checkpoint_restore_dir'] = checkpoint_dir
graph_manager.create_graph(task_parameters)
graph_manager.reset_internal_state()
for level in graph_manager.level_managers:
for agent in level.agents.values():
agent.memory.memory_backend = deepracer_memory.DeepRacerRolloutBackEnd(memory_backend_params,
graph_manager.agent_params.algorithm.num_consecutive_playing_steps)
with graph_manager.phase_context(RunPhase.TRAIN):
last_checkpoint = 0
act_steps = math.ceil((graph_manager.agent_params.algorithm.num_consecutive_playing_steps.num_steps) / num_workers)
for i in range(int(graph_manager.improve_steps.num_steps/act_steps)):
if should_stop(checkpoint_dir):
break
try:
# This will only work for DeepRacerRacetrackEnv enviroments
graph_manager.top_level_manager.environment.env.env.set_allow_servo_step_signals(True)
except Exception as ex:
utils.json_format_logger("Method not defined in enviroment class: {}".format(ex),
**utils.build_system_error_dict(utils.SIMAPP_SIMULATION_WORKER_EXCEPTION, utils.SIMAPP_EVENT_ERROR_CODE_500))
if type(graph_manager.agent_params.algorithm.num_consecutive_playing_steps) == EnvironmentSteps:
graph_manager.act(EnvironmentSteps(num_steps=act_steps), wait_for_full_episodes=graph_manager.agent_params.algorithm.act_for_full_episodes)
elif type(graph_manager.agent_params.algorithm.num_consecutive_playing_steps) == EnvironmentEpisodes:
graph_manager.act(EnvironmentEpisodes(num_steps=act_steps))
try:
# This will only work for DeepRacerRacetrackEnv enviroments
graph_manager.top_level_manager.environment.env.env.set_allow_servo_step_signals(False)
graph_manager.top_level_manager.environment.env.env.stop_car()
except Exception as ex:
utils.json_format_logger("Method not defined in enviroment class: {}".format(ex),
**utils.build_system_error_dict(utils.SIMAPP_SIMULATION_WORKER_EXCEPTION, utils.SIMAPP_EVENT_ERROR_CODE_500))
if graph_manager.agent_params.algorithm.distributed_coach_synchronization_type == DistributedCoachSynchronizationType.SYNC:
data_store.load_from_store(expected_checkpoint_number=last_checkpoint+1)
last_checkpoint = get_latest_checkpoint(checkpoint_dir)
graph_manager.restore_checkpoint()
if graph_manager.agent_params.algorithm.distributed_coach_synchronization_type == DistributedCoachSynchronizationType.ASYNC:
new_checkpoint = get_latest_checkpoint(checkpoint_dir)
if new_checkpoint > last_checkpoint:
graph_manager.restore_checkpoint()
last_checkpoint = new_checkpoint
def main():
screen.set_use_colors(False)
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--checkpoint_dir',
help='(string) Path to a folder containing a checkpoint to restore the model from.',
type=str,
default='./checkpoint')
parser.add_argument('--s3_bucket',
help='(string) S3 bucket',
type=str,
default=os.environ.get("SAGEMAKER_SHARED_S3_BUCKET", "gsaur-test"))
parser.add_argument('--s3_prefix',
help='(string) S3 prefix',
type=str,
default=os.environ.get("SAGEMAKER_SHARED_S3_PREFIX", "sagemaker"))
parser.add_argument('--num-workers',
help="(int) The number of workers started in this pool",
type=int,
default=1)
parser.add_argument('-r', '--redis_ip',
help="(string) IP or host for the redis server",
default='localhost',
type=str)
parser.add_argument('-rp', '--redis_port',
help="(int) Port of the redis server",
default=6379,
type=int)
parser.add_argument('--aws_region',
help='(string) AWS region',
type=str,
default=os.environ.get("APP_REGION", "us-east-1"))
parser.add_argument('--reward_file_s3_key',
help='(string) Reward File S3 Key',
type=str,
default=os.environ.get("REWARD_FILE_S3_KEY", None))
parser.add_argument('--model_metadata_s3_key',
help='(string) Model Metadata File S3 Key',
type=str,
default=os.environ.get("MODEL_METADATA_FILE_S3_KEY", None))
args = parser.parse_args()
s3_client = SageS3Client(bucket=args.s3_bucket, s3_prefix=args.s3_prefix, aws_region=args.aws_region)
logger.info("S3 bucket: %s" % args.s3_bucket)
logger.info("S3 prefix: %s" % args.s3_prefix)
# Load the model metadata
model_metadata_local_path = os.path.join(CUSTOM_FILES_PATH, 'model_metadata.json')
load_model_metadata(s3_client, args.model_metadata_s3_key, model_metadata_local_path)
# Download reward function
if not args.reward_file_s3_key:
utils.json_format_logger("Customer reward S3 key not supplied for s3 bucket {} prefix {}. Job failed!".format(args.s3_bucket, args.s3_prefix),
**utils.build_system_error_dict(utils.SIMAPP_SIMULATION_WORKER_EXCEPTION, utils.SIMAPP_EVENT_ERROR_CODE_503))
traceback.print_exc()
sys.exit(1)
download_customer_reward_function(s3_client, args.reward_file_s3_key)
# Register the gym enviroment, this will give clients the ability to creat the enviroment object
register(id=defaults.ENV_ID, entry_point=defaults.ENTRY_POINT,
max_episode_steps=defaults.MAX_STEPS, reward_threshold=defaults.THRESHOLD)
redis_ip = s3_client.get_ip()
logger.info("Received IP from SageMaker successfully: %s" % redis_ip)
# Download hyperparameters from SageMaker
hyperparameters_file_success = False
hyperparams_s3_key = os.path.normpath(args.s3_prefix + "/ip/hyperparameters.json")
hyperparameters_file_success = s3_client.download_file(s3_key=hyperparams_s3_key,
local_path="hyperparameters.json")
sm_hyperparams_dict = {}
if hyperparameters_file_success:
logger.info("Received Sagemaker hyperparameters successfully!")
with open("hyperparameters.json") as fp:
sm_hyperparams_dict = json.load(fp)
else:
logger.info("SageMaker hyperparameters not found.")
preset_file_success, _ = download_custom_files_if_present(s3_client, args.s3_prefix)
if preset_file_success:
preset_location = os.path.join(CUSTOM_FILES_PATH, "preset.py")
preset_location += ":graph_manager"
graph_manager = short_dynamic_import(preset_location, ignore_module_case=True)
logger.info("Using custom preset file!")
else:
from markov.sagemaker_graph_manager import get_graph_manager
graph_manager, _ = get_graph_manager(**sm_hyperparams_dict)
memory_backend_params = RedisPubSubMemoryBackendParameters(redis_address=redis_ip,
redis_port=6379,
run_type='worker',
channel=args.s3_prefix)
ds_params_instance = S3BotoDataStoreParameters(bucket_name=args.s3_bucket,
checkpoint_dir=args.checkpoint_dir, aws_region=args.aws_region,
s3_folder=args.s3_prefix)
data_store = S3BotoDataStore(ds_params_instance)
data_store.graph_manager = graph_manager
graph_manager.data_store = data_store
rollout_worker(
graph_manager=graph_manager,
checkpoint_dir=args.checkpoint_dir,
data_store=data_store,
num_workers=args.num_workers,
memory_backend_params = memory_backend_params
)
if __name__ == '__main__':
main()
| 45.207237 | 155 | 0.677727 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,601 | 0.18926 |
a638619d448044911a4ff3d2febd7839d0d6bf87 | 10,197 | py | Python | odin/bay/vi/losses.py | tirkarthi/odin-ai | 7900bef82ad8801d0c73880330d5b24d9ff7cd06 | [
"MIT"
] | null | null | null | odin/bay/vi/losses.py | tirkarthi/odin-ai | 7900bef82ad8801d0c73880330d5b24d9ff7cd06 | [
"MIT"
] | null | null | null | odin/bay/vi/losses.py | tirkarthi/odin-ai | 7900bef82ad8801d0c73880330d5b24d9ff7cd06 | [
"MIT"
] | null | null | null | import inspect
from typing import Callable, List, Union
import numpy as np
import tensorflow as tf
from odin.bay.helpers import kl_divergence
from tensorflow import Tensor
from tensorflow_probability.python.distributions import Distribution, Normal
from typing_extensions import Literal
__all__ = [
'disentangled_inferred_prior_loss',
'total_correlation',
'pairwise_distances',
'gaussian_kernel',
'maximum_mean_discrepancy',
]
# ===========================================================================
# Helper
# ===========================================================================
def get_divergence(name: str) -> Callable[[Distribution, Distribution], Tensor]:
div = dict(dip=disentangled_inferred_prior_loss,
tc=total_correlation,
mmd=maximum_mean_discrepancy,
kl=kl_divergence)
name = str(name).strip().lower()
if name not in div:
raise ValueError(
"Cannot find divergence with name: '%s', all available are: %s" %
(name, ', '.join(div.keys())))
return div[name]
# ===========================================================================
# Losses
# ===========================================================================
def disentangled_inferred_prior_loss(qZ_X: Distribution,
only_mean: bool = False,
lambda_offdiag: float = 2.,
lambda_diag: float = 1.) -> Tensor:
r""" Disentangled inferred prior (DIP) matches the covariance of the prior
distributions with the inferred prior
Uses `cov(z_mean) = E[z_mean*z_mean^T] - E[z_mean]E[z_mean]^T`.
Arguments:
qZ_X : `tensorflow_probability.Distribution`
only_mean : A Boolean. If `True`, applying DIP constraint only on the
mean of latents `Cov[E(z)]` (i.e. type 'i'),
otherwise, `E[Cov(z)] + Cov[E(z)]` (i.e. type 'ii')
lambda_offdiag : A Scalar. Weight for penalizing the off-diagonal part of
covariance matrix.
lambda_diag : A Scalar. Weight for penalizing the diagonal.
Reference:
Kumar, A., Sattigeri, P., Balakrishnan, A., 2018. Variational Inference of
Disentangled Latent Concepts from Unlabeled Observations.
arXiv:1711.00848 [cs, stat].
Github code https://github.com/IBM/AIX360
Github code https://github.com/google-research/disentanglement_lib
"""
z_mean = qZ_X.mean()
shape = z_mean.shape
if len(shape) > 2:
# [sample_shape * batch_size, zdim]
z_mean = tf.reshape(
z_mean, (tf.cast(tf.reduce_prod(shape[:-1]), tf.int32),) + shape[-1:])
expectation_z_mean_z_mean_t = tf.reduce_mean(tf.expand_dims(z_mean, 2) *
tf.expand_dims(z_mean, 1),
axis=0)
expectation_z_mean = tf.reduce_mean(z_mean, axis=0)
# cov_zmean [zdim, zdim]
cov_zmean = tf.subtract(
expectation_z_mean_z_mean_t,
tf.expand_dims(expectation_z_mean, 1) *
tf.expand_dims(expectation_z_mean, 0))
# Eq(5)
if only_mean:
z_cov = cov_zmean
else:
z_var = qZ_X.variance()
if len(shape) > 2:
z_var = tf.reshape(
z_var, (tf.cast(tf.reduce_prod(shape[:-1]), tf.int32),) + shape[-1:])
# mean_zcov [zdim, zdim]
mean_zcov = tf.reduce_mean(tf.linalg.diag(z_var), axis=0)
z_cov = cov_zmean + mean_zcov
# Eq(6) and Eq(7)
# z_cov [sample_shape, zdim, zdim]
# z_cov_diag [sample_shape, zdim]
# z_cov_offdiag [sample_shape, zdim, zdim]
z_cov_diag = tf.linalg.diag_part(z_cov)
z_cov_offdiag = z_cov - tf.linalg.diag(z_cov_diag)
return lambda_offdiag * tf.reduce_sum(z_cov_offdiag ** 2) + \
lambda_diag * tf.reduce_sum((z_cov_diag - 1.) ** 2)
def total_correlation(z_samples: Tensor, qZ_X: Distribution) -> Tensor:
r"""Estimate of total correlation using Gaussian distribution on a batch.
We need to compute the expectation over a batch of:
`E_j [log(q(z(x_j))) - log(prod_l q(z(x_j)_l))]`
We ignore the constants as they do not matter for the minimization.
The constant should be equal to
`(num_latents - 1) * log(batch_size * dataset_size)`
If `alpha = gamma = 1`, Eq(4) can be written as `ELBO + (1 - beta) * TC`.
(i.e. `(1. - beta) * total_correlation(z_sampled, qZ_X)`)
Arguments:
z: [batch_size, num_latents]-tensor with sampled representation.
z_mean: [batch_size, num_latents]-tensor with mean of the encoder.
z_logvar: [batch_size, num_latents]-tensor with log variance of the encoder.
Note:
This involve calculating pair-wise distance, memory complexity up to
`O(n*n*d)`.
Returns:
Total correlation estimated on a batch.
Reference:
Chen, R.T.Q., Li, X., Grosse, R., Duvenaud, D., 2019. Isolating Sources of
Disentanglement in Variational Autoencoders. arXiv:1802.04942 [cs, stat].
Github code https://github.com/google-research/disentanglement_lib
"""
gaus = Normal(loc=tf.expand_dims(qZ_X.mean(), 0),
scale=tf.expand_dims(qZ_X.stddev(), 0))
# Compute log(q(z(x_j)|x_i)) for every sample in the batch, which is a
# tensor of size [batch_size, batch_size, num_latents]. In the following
# comments, [batch_size, batch_size, num_latents] are indexed by [j, i, l].
log_qz_prob = gaus.log_prob(tf.expand_dims(z_samples, 1))
# Compute log prod_l p(z(x_j)_l) = sum_l(log(sum_i(q(z(z_j)_l|x_i)))
# + constant) for each sample in the batch, which is a vector of size
# [batch_size,].
log_qz_product = tf.reduce_sum(tf.reduce_logsumexp(log_qz_prob,
axis=1,
keepdims=False),
axis=1,
keepdims=False)
# Compute log(q(z(x_j))) as log(sum_i(q(z(x_j)|x_i))) + constant =
# log(sum_i(prod_l q(z(x_j)_l|x_i))) + constant.
log_qz = tf.reduce_logsumexp(tf.reduce_sum(log_qz_prob,
axis=2,
keepdims=False),
axis=1,
keepdims=False)
return tf.reduce_mean(log_qz - log_qz_product)
# ===========================================================================
# Maximum-mean discrepancy
# ===========================================================================
def pairwise_distances(x: Tensor, y: Tensor, keepdims: bool = True) -> Tensor:
r"""
Arguments:
x : a Tensor batch_shape1 + (dim,)
y : a Tensor batch_shape2 + (dim,)
keepdims : a Boolean. If True, reshape the output to keep the batch_shape1
and batch_shape2, otherwise, return flattened output.
Return:
distance : a Tensor (batch_shape1, batch_shape2, dim).
Pairwise distances for each row in x and y
"""
shape_x = tf.shape(x)
shape_y = tf.shape(y)
tf.assert_equal(shape_x[-1], shape_y[-1],
"The last dimension of x and y must be equal")
feat_dim = shape_x[-1]
# reshape to 2-D
x = tf.cond(tf.rank(x) > 2, lambda: tf.reshape(x, (-1, feat_dim)), lambda: x)
y = tf.cond(tf.rank(y) > 2, lambda: tf.reshape(y, (-1, feat_dim)), lambda: y)
# distance
x = tf.expand_dims(x, axis=1)
d = x - y
# reshape to the original
if keepdims:
d = tf.reshape(d,
tf.concat([shape_x[:-1], shape_y[:-1], (feat_dim,)], axis=0))
return d
def gaussian_kernel(x, y, sigma=None):
r""" Gaussian radial basis function
Arguments:
x : a Tensor [num_samples, num_features]
y : a Tensor [num_samples, num_features]
sigma : a Scalar which denote the width of the Gaussian in the kernel.
Reference:
Radial basis function kernel :
https://en.wikipedia.org/wiki/Radial_basis_function_kernel
"""
d = pairwise_distances(x, y, keepdims=False)
if sigma is None:
gamma = 1. / tf.cast(tf.shape(x)[-1], dtype=d.dtype)
else:
sigma = tf.convert_to_tensor(sigma, dtype=d.dtype)
gamma = 1. / (2. * tf.square(sigma))
# L2-norm
d = tf.reduce_sum(tf.square(d), axis=-1)
return tf.reduce_sum(tf.math.exp(-tf.expand_dims(d, axis=-1) * gamma),
axis=-1)
def linear_kernel(x, y):
d = pairwise_distances(x, y, keepdims=False)
return tf.math.abs(tf.reduce_sum(d, axis=-1))
def polynomial_kernel(x, y, d=2):
raise NotImplementedError()
d = pairwise_distances(x, y, keepdims=False)
def maximum_mean_discrepancy(
qZ: Distribution,
pZ: Distribution,
q_sample_shape: Union[int, List[int]] = (),
p_sample_shape: Union[int, List[int]] = 100,
kernel: Literal['gaussian', 'linear', 'polynomial'] = 'gaussian') -> Tensor:
r""" is a distance-measure between distributions p(X) and q(Y) which is
defined as the squared distance between their embeddings in the a
"reproducing kernel Hilbert space".
Given n examples from p(X) and m samples from q(Y), one can formulate a
test statistic based on the empirical estimate of the MMD:
MMD^2(P, Q) = || \E{\phi(x)} - \E{\phi(y)} ||^2
= \E{ K(x, x) } + \E{ K(y, y) } - 2 \E{ K(x, y) }
Arguments:
nq : a Scalar. Number of posterior samples
np : a Scalar. Number of prior samples
Reference:
Gretton, A., Borgwardt, K., Rasch, M.J., Scholkopf, B., Smola, A.J., 2008.
"A Kernel Method for the Two-Sample Problem". arXiv:0805.2368 [cs].
"""
assert isinstance(
qZ, Distribution
), 'qZ must be instance of tensorflow_probability.Distribution'
assert isinstance(
pZ, Distribution
), 'pZ must be instance of tensorflow_probability.Distribution'
# prepare the samples
if q_sample_shape is None: # reuse sampled examples
x = tf.convert_to_tensor(qZ)
else:
x = qZ.sample(q_sample_shape)
y = pZ.sample(p_sample_shape)
# select the kernel
if kernel == 'gaussian':
kernel = gaussian_kernel
elif kernel == 'linear':
kernel = linear_kernel
elif kernel == 'polynomial':
kernel = polynomial_kernel
else:
raise NotImplementedError("No support for kernel: '%s'" % kernel)
k_xx = kernel(x, x)
k_yy = kernel(y, y)
k_xy = kernel(x, y)
return tf.reduce_mean(k_xx) + tf.reduce_mean(k_yy) - 2 * tf.reduce_mean(k_xy)
| 37.488971 | 80 | 0.611749 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,199 | 0.509856 |
a638f3615cf3c6c6cc824b89b01d099d467d9d75 | 558 | py | Python | scraper/settings.py | Red-Pheonix/test_scraping | f89d81a90af84444552a5bcc28dee2946ca2e58e | [
"MIT"
] | null | null | null | scraper/settings.py | Red-Pheonix/test_scraping | f89d81a90af84444552a5bcc28dee2946ca2e58e | [
"MIT"
] | null | null | null | scraper/settings.py | Red-Pheonix/test_scraping | f89d81a90af84444552a5bcc28dee2946ca2e58e | [
"MIT"
] | null | null | null | # Scrapy settings for scraper project
BOT_NAME = 'scraper'
SPIDER_MODULES = ['scraper.spiders']
NEWSPIDER_MODULE = 'scraper.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
USER_AGENT = 'Googlebot-News'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# MONGO URI for accessing MongoDB
MONGO_URI = ""
MONGO_DATABASE = ""
# sqlite database location
SQLITE_DB = ""
# pipelines are disabled by default
ITEM_PIPELINES = {
#'scraper.pipelines.SQLitePipeline': 300,
#'scraper.pipelines.MongoPipeline': 600,
}
| 21.461538 | 80 | 0.747312 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 380 | 0.681004 |
a63c831ec9710b64344fb7dfa36c7cec29b0cae2 | 358 | py | Python | Intern/variables2.py | AalsiCodeMan/Notebook-Ex | fd1cf8beddf26f6dd94f476f4c308b9057cc5ac7 | [
"Unlicense"
] | 1 | 2020-06-28T12:35:55.000Z | 2020-06-28T12:35:55.000Z | Intern/variables2.py | AalsiCodeMan/Notebook-Ex | fd1cf8beddf26f6dd94f476f4c308b9057cc5ac7 | [
"Unlicense"
] | 1 | 2021-10-02T05:33:51.000Z | 2021-10-02T05:34:02.000Z | Intern/variables2.py | AalsiCodeMan/Notebook-Ex | fd1cf8beddf26f6dd94f476f4c308b9057cc5ac7 | [
"Unlicense"
] | 3 | 2020-10-17T08:19:02.000Z | 2021-10-11T12:33:18.000Z | ## Data Categorisation
'''
1) Whole Number (Ints) - 100, 1000, -450, 999
2) Real Numbers (Floats) - 33.33, 44.01, -1000.033
3) String - "Bangalore", "India", "Raj", "abc123"
4) Boolean - True, False
Variables in python are dynamic in nature
'''
a = 10
print(a)
print(type(a))
a = 10.33
print(a)
print(type(a))
a = 'New Jersey'
print(a)
print(type(a))
| 14.916667 | 50 | 0.636872 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 258 | 0.72067 |
a63cff634a2350a5c9d905417fc77ffe0cbb8d60 | 833 | py | Python | migrations/versions/b0c12eb8ae59_initial_migration.py | edumorris/pomodoro | cde372be1d5c37dd8221ebd40b684d07fbb472b5 | [
"MIT"
] | 1 | 2022-01-10T14:48:16.000Z | 2022-01-10T14:48:16.000Z | migrations/versions/b0c12eb8ae59_initial_migration.py | edumorris/pomodoro | cde372be1d5c37dd8221ebd40b684d07fbb472b5 | [
"MIT"
] | null | null | null | migrations/versions/b0c12eb8ae59_initial_migration.py | edumorris/pomodoro | cde372be1d5c37dd8221ebd40b684d07fbb472b5 | [
"MIT"
] | null | null | null | """Initial Migration
Revision ID: b0c12eb8ae59
Revises: 7cb850d9441c
Create Date: 2020-07-15 11:44:46.190193
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'b0c12eb8ae59'
down_revision = '7cb850d9441c'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column('password_hash', sa.String(length=255), nullable=True))
op.drop_column('users', 'pass_secure')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column('pass_secure', sa.VARCHAR(length=255), autoincrement=False, nullable=True))
op.drop_column('users', 'password_hash')
# ### end Alembic commands ###
| 26.870968 | 112 | 0.704682 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 448 | 0.537815 |
a63d1ac18dbb8521b652e406baf4c3ebd14d1bd4 | 1,281 | py | Python | PycharmProjects/PythonExercicios/ex045.py | RodrigoMASRamos/Projects.py | ed15981b320914c9667305dcd5fb5b7906fd9b00 | [
"MIT"
] | null | null | null | PycharmProjects/PythonExercicios/ex045.py | RodrigoMASRamos/Projects.py | ed15981b320914c9667305dcd5fb5b7906fd9b00 | [
"MIT"
] | null | null | null | PycharmProjects/PythonExercicios/ex045.py | RodrigoMASRamos/Projects.py | ed15981b320914c9667305dcd5fb5b7906fd9b00 | [
"MIT"
] | null | null | null | # Exercício Python #045 - GAME: Pedra Papel e Tesoura
#
# Crie um programa que faça o computador jogar JOKENPÔ com você.
# Aprenda a arrumar as cores nas respostas!
from random import choice
from random import randint # Maneira utilizada na resolução deste exercício
from time import sleep
print('\033[1;31mATENÇÃO! ESTE É UM JOGO ALTAMENTE PERIGOSO ONDE NÃO HÁ CHANCES DE VITÓRIA PARA VOCÊ!\033[m')
Uc = input('\033[0;30mMe diga, \033[1;34mó grande jogador, \033[0;30mvocê escolhe \033[1;35mPEDRA, \033[1;31mPAPEL, '
'\033[0;30mou \033[1;36mTESOURA? ').strip().upper()
PC = ['\033[1;35mPEDRA\033[m', '\033[1;31mPAPEL\033[m', '\033[1;36mTESOURA\033[m']
PCc = choice(PC)
sleep(0.5)
print('JO')
sleep(1)
print('KEN')
sleep(1)
print('PO!')
if PCc == 'PEDRA' and Uc == 'TESOURA' or PCc == 'TESOURA' and Uc == 'PAPEL' or PCc == 'PAPEL' and Uc == 'PEDRA':
print(f'\033[1;31mHAHAHA! Eu venci! \033[0;30mEu escolhi \033[m{PCc} \033[0;30me você \033[m{Uc}\033[0;30m!')
elif PCc == Uc:
print(f'\033[1;33mEMPATE! Vamos jogar novamente! Eu escolhi \033[m{PCc} \033[0;30me você \033[m{Uc}')
else:
print(f'\033[0;34mT-T Infelizmente,\033[1;32mvocê venceu... \033[0;30mEu escolhi \033[m{PCc}, \033[0;30me você '
f'escolheu \033[m{Uc}\033[0;30m...\033[m') | 38.818182 | 117 | 0.672912 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 945 | 0.726364 |
a63d1b744946d4b341dc8b6cce819d4426122992 | 6,209 | py | Python | im2txt/losses.py | wangheda/ImageCaption-UnderFitting | ca98807d3a35a40a35446678d7e7b43242767a63 | [
"Apache-2.0"
] | 8 | 2018-05-08T12:29:56.000Z | 2021-04-07T03:11:32.000Z | im2txt/losses.py | wangheda/ImageCaption-UnderFitting | ca98807d3a35a40a35446678d7e7b43242767a63 | [
"Apache-2.0"
] | null | null | null | im2txt/losses.py | wangheda/ImageCaption-UnderFitting | ca98807d3a35a40a35446678d7e7b43242767a63 | [
"Apache-2.0"
] | 4 | 2017-12-22T02:10:38.000Z | 2021-04-07T03:12:24.000Z | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides definitions for non-regularized training or test losses."""
import sys
import numpy as np
import tensorflow as tf
from tensorflow import flags
import tf_cider
FLAGS = flags.FLAGS
LOG_TENSOR = True
def log_tensor(name, g=None, l=None):
if LOG_TENSOR:
if g is None and l is None:
print >> sys.stderr, name, eval(name, {"self":self})
else:
print >> sys.stderr, name, eval(name, g, l)
class BaseLoss(object):
"""Inherit from this class when implementing new losses."""
def calculate_loss(self, unused_predictions, unused_labels, **unused_params):
"""Calculates the average loss of the examples in a mini-batch.
Args:
unused_predictions: a 2-d tensor storing the prediction scores, in which
each row represents a sample in the mini-batch and each column
represents a class.
unused_labels: a 2-d tensor storing the labels, which has the same shape
as the unused_predictions. The labels must be in the range of 0 and 1.
unused_params: loss specific parameters.
Returns:
A scalar loss tensor.
"""
raise NotImplementedError()
class CrossEntropyLoss(BaseLoss):
def calculate_loss(self, predictions, labels, weights=None,
**unused_params):
loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=labels,
logits=predictions)
if weights is not None:
loss = tf.div(tf.reduce_sum(loss * weights),
tf.reduce_sum(weights) + epsilon)
else:
loss = tf.reduce_mean(loss)
return loss
class SparseSoftmaxCrossEntropyLoss(BaseLoss):
def calculate_loss(self, predictions, labels, weights=None,
epsilon=1e-9, **unused_params):
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels,
logits=predictions)
if weights is not None:
loss = tf.div(tf.reduce_sum(loss * weights),
tf.reduce_sum(weights) + epsilon)
else:
loss = tf.reduce_mean(loss)
return loss
class SelfCriticalLoss(BaseLoss):
def __init__(self):
self.cider_scorer = tf_cider.CiderScorer()
def calculate_loss(self,
target_caption_words,
target_caption_lengths,
greedy_caption_words,
greedy_caption_lengths,
sample_caption_words,
sample_caption_lengths,
sample_caption_logits,
epsilon=1e-9, **unused_params):
cider_scorer = self.cider_scorer
log_tensor("greedy_caption_words", l=locals())
log_tensor("greedy_caption_lengths", l=locals())
log_tensor("sample_caption_logits", l=locals())
log_tensor("sample_caption_words", l=locals())
log_tensor("sample_caption_lengths", l=locals())
log_tensor("target_caption_words", l=locals())
log_tensor("target_caption_lengths", l=locals())
greedy_score = cider_scorer.score(greedy_caption_words,
greedy_caption_lengths,
target_caption_words,
target_caption_lengths)
sample_score = cider_scorer.score(sample_caption_words,
sample_caption_lengths,
target_caption_words,
target_caption_lengths)
tf.summary.scalar("losses/average_greedy_score", tf.reduce_mean(greedy_score))
tf.summary.scalar("losses/average_sample_score", tf.reduce_mean(sample_score))
tf.summary.histogram("losses/greedy_score", greedy_score)
tf.summary.histogram("losses/sample_score", sample_score)
tf.summary.histogram("losses/greedy_caption_lengths", greedy_caption_lengths)
tf.summary.histogram("losses/sample_caption_lengths", sample_caption_lengths)
# reward = -1 * reward
reward = greedy_score - sample_score
reward = tf.stop_gradient(reward)
# extract the logprobs of each word in sample_captions
sample_probs = tf.nn.softmax(sample_caption_logits)
batch_size, max_sample_length, _ = sample_probs.get_shape().as_list()
sample_batch_index = tf.tile(tf.reshape(tf.range(0, batch_size),
shape=[batch_size,1]),
multiples=[1, max_sample_length])
sample_seq_index = tf.tile(tf.reshape(tf.range(0, max_sample_length),
shape=[1, max_sample_length]),
multiples=[batch_size, 1])
sample_gather_index = tf.stack([sample_batch_index,
sample_seq_index,
sample_caption_words], axis=2)
sample_caption_logprobs = tf.log(tf.gather_nd(sample_probs, sample_gather_index) + epsilon)
tf.summary.histogram("losses/sample_caption_logprobs", sample_caption_logprobs)
sample_caption_mask = tf.sequence_mask(sample_caption_lengths,
maxlen=max_sample_length)
sample_caption_mask = tf.cast(sample_caption_mask, dtype=tf.float32)
rl_loss = tf.expand_dims(reward, 1) * sample_caption_logprobs
rl_loss = tf.div(tf.reduce_sum(rl_loss * sample_caption_mask),
tf.reduce_sum(sample_caption_mask),
name="rl_loss")
tf.summary.scalar("losses/rl_loss", rl_loss)
log_tensor("reward", l=locals())
log_tensor("rl_loss", l=locals())
return rl_loss
| 40.848684 | 95 | 0.64326 | 5,181 | 0.834434 | 0 | 0 | 0 | 0 | 0 | 0 | 1,701 | 0.273957 |
a63f4ba25cb878a66fcd7170945ef4034691b3de | 7,811 | py | Python | datasets/hope_edi/hope_edi.py | WojciechKusa/datasets | 1406a04c3e911cec2680d8bc513653e0cafcaaa4 | [
"Apache-2.0"
] | 10,608 | 2020-09-10T15:47:50.000Z | 2022-03-31T22:51:47.000Z | datasets/hope_edi/hope_edi.py | WojciechKusa/datasets | 1406a04c3e911cec2680d8bc513653e0cafcaaa4 | [
"Apache-2.0"
] | 2,396 | 2020-09-10T14:55:31.000Z | 2022-03-31T19:41:04.000Z | datasets/hope_edi/hope_edi.py | WojciechKusa/datasets | 1406a04c3e911cec2680d8bc513653e0cafcaaa4 | [
"Apache-2.0"
] | 1,530 | 2020-09-10T21:43:10.000Z | 2022-03-31T01:59:12.000Z | # coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Hope Speech dataset for Equality, Diversity and Inclusion (HopeEDI)"""
import csv
import datasets
_HOMEPAGE = "https://competitions.codalab.org/competitions/27653#learn_the_details"
_CITATION = """\
@inproceedings{chakravarthi-2020-hopeedi,
title = "{H}ope{EDI}: A Multilingual Hope Speech Detection Dataset for Equality, Diversity, and Inclusion",
author = "Chakravarthi, Bharathi Raja",
booktitle = "Proceedings of the Third Workshop on Computational Modeling of People's Opinions, Personality, and Emotion's in Social Media",
month = dec,
year = "2020",
address = "Barcelona, Spain (Online)",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/2020.peoples-1.5",
pages = "41--53",
abstract = "Over the past few years, systems have been developed to control online content and eliminate abusive, offensive or hate speech content. However, people in power sometimes misuse this form of censorship to obstruct the democratic right of freedom of speech. Therefore, it is imperative that research should take a positive reinforcement approach towards online content that is encouraging, positive and supportive contents. Until now, most studies have focused on solving this problem of negativity in the English language, though the problem is much more than just harmful content. Furthermore, it is multilingual as well. Thus, we have constructed a Hope Speech dataset for Equality, Diversity and Inclusion (HopeEDI) containing user-generated comments from the social media platform YouTube with 28,451, 20,198 and 10,705 comments in English, Tamil and Malayalam, respectively, manually labelled as containing hope speech or not. To our knowledge, this is the first research of its kind to annotate hope speech for equality, diversity and inclusion in a multilingual setting. We determined that the inter-annotator agreement of our dataset using Krippendorff{'}s alpha. Further, we created several baselines to benchmark the resulting dataset and the results have been expressed using precision, recall and F1-score. The dataset is publicly available for the research community. We hope that this resource will spur further research on encouraging inclusive and responsive speech that reinforces positiveness.",
}
"""
_DESCRIPTION = """\
A Hope Speech dataset for Equality, Diversity and Inclusion (HopeEDI) containing user-generated comments from the social media platform YouTube with 28,451, 20,198 and 10,705 comments in English, Tamil and Malayalam, respectively, manually labelled as containing hope speech or not.
"""
_LICENSE = "Creative Commons Attribution 4.0 International Licence"
_URLs = {
"english": {
"TRAIN_DOWNLOAD_URL": "https://drive.google.com/u/0/uc?id=1ydsOTvBZXKqcRvXawOuePrJ99slOEbkk&export=download",
"VALIDATION_DOWNLOAD_URL": "https://drive.google.com/u/0/uc?id=1pvpPA97kybx5IyotR9HNuqP4T5ktEtr4&export=download",
},
"tamil": {
"TRAIN_DOWNLOAD_URL": "https://drive.google.com/u/0/uc?id=1R1jR4DcH2UEaM1ZwDSRHdfTGvkCNu6NW&export=download",
"VALIDATION_DOWNLOAD_URL": "https://drive.google.com/u/0/uc?id=1cTaA6OCZUaepl5D-utPw2ZmbonPcw52v&export=download",
},
"malayalam": {
"TRAIN_DOWNLOAD_URL": "https://drive.google.com/u/0/uc?id=1wxwqnWGRzwvc_-ugRoFX8BPgpO3Q7sch&export=download",
"VALIDATION_DOWNLOAD_URL": "https://drive.google.com/u/0/uc?id=1uZ0U9VJQEUPQItPpTJKXH8u_6jXppvJ1&export=download",
},
}
class HopeEdi(datasets.GeneratorBasedBuilder):
"""HopeEDI dataset."""
VERSION = datasets.Version("1.0.0")
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name="english", version=VERSION, description="This part of my dataset covers English dataset"
),
datasets.BuilderConfig(
name="tamil", version=VERSION, description="This part of my dataset covers Tamil dataset"
),
datasets.BuilderConfig(
name="malayalam", version=VERSION, description="This part of my dataset covers Tamil dataset"
),
]
def _info(self):
if self.config.name == "english": # This is the name of the configuration selected in BUILDER_CONFIGS above
features = datasets.Features(
{
"text": datasets.Value("string"),
"label": datasets.features.ClassLabel(names=["Hope_speech", "Non_hope_speech", "not-English"]),
}
)
elif self.config.name == "tamil":
features = datasets.Features(
{
"text": datasets.Value("string"),
"label": datasets.features.ClassLabel(names=["Hope_speech", "Non_hope_speech", "not-Tamil"]),
}
)
# else self.config.name == "malayalam":
else:
features = datasets.Features(
{
"text": datasets.Value("string"),
"label": datasets.features.ClassLabel(names=["Hope_speech", "Non_hope_speech", "not-malayalam"]),
}
)
return datasets.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# This defines the different columns of the dataset and their types
features=features, # Here we define them above because they are different between the two configurations
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
# Homepage of the dataset for documentation
homepage=_HOMEPAGE,
# License for the dataset if available
license=_LICENSE,
# Citation for the dataset
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
my_urls = _URLs[self.config.name]
train_path = dl_manager.download_and_extract(my_urls["TRAIN_DOWNLOAD_URL"])
validation_path = dl_manager.download_and_extract(my_urls["VALIDATION_DOWNLOAD_URL"])
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepath": train_path,
"split": "train",
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"filepath": validation_path,
"split": "validation",
},
),
]
def _generate_examples(self, filepath, split):
"""Generate HopeEDI examples."""
with open(filepath, encoding="utf-8") as csv_file:
csv_reader = csv.reader(
csv_file, quotechar='"', delimiter="\t", quoting=csv.QUOTE_NONE, skipinitialspace=False
)
for id_, row in enumerate(csv_reader):
text, label, dummy = row
yield id_, {"text": text, "label": label}
| 49.125786 | 1,525 | 0.673921 | 3,696 | 0.473179 | 453 | 0.057995 | 0 | 0 | 0 | 0 | 5,047 | 0.64614 |
a641a9f31d2309805597aee4c338bb8ecb70c725 | 5,310 | py | Python | code_legacy/PostfixLogSummary.py | rhymeswithmogul/starttls-everywhere | cdc6cce552d44e17a3178eba987ae5d2b6a22e75 | [
"Apache-2.0"
] | 339 | 2015-01-01T06:19:34.000Z | 2021-12-10T17:24:52.000Z | code_legacy/PostfixLogSummary.py | rhymeswithmogul/starttls-everywhere | cdc6cce552d44e17a3178eba987ae5d2b6a22e75 | [
"Apache-2.0"
] | 95 | 2015-06-07T21:26:16.000Z | 2021-09-28T13:11:00.000Z | code_legacy/PostfixLogSummary.py | rhymeswithmogul/starttls-everywhere | cdc6cce552d44e17a3178eba987ae5d2b6a22e75 | [
"Apache-2.0"
] | 50 | 2015-03-18T17:41:32.000Z | 2021-03-19T07:44:54.000Z | #!/usr/bin/env python
import argparse
import collections
import os
import re
import sys
import time
import Config
TIME_FORMAT = "%b %d %H:%M:%S"
# TODO: There's more to be learned from postfix logs! Here's one sample
# observed during failures from the sender vagrant vm:
# Jun 6 00:21:31 precise32 postfix/smtpd[3648]: connect from localhost[127.0.0.1]
# Jun 6 00:21:34 precise32 postfix/smtpd[3648]: lost connection after STARTTLS from localhost[127.0.0.1]
# Jun 6 00:21:34 precise32 postfix/smtpd[3648]: disconnect from localhost[127.0.0.1]
# Jun 6 00:21:56 precise32 postfix/master[3001]: reload -- version 2.9.6, configuration /etc/postfix
# Jun 6 00:22:01 precise32 postfix/pickup[3674]: AF3B6480475: uid=0 from=<root>
# Jun 6 00:22:01 precise32 postfix/cleanup[3680]: AF3B6480475: message-id=<20140606002201.AF3B6480475@sender.example.com>
# Jun 6 00:22:01 precise32 postfix/qmgr[3673]: AF3B6480475: from=<root@sender.example.com>, size=576, nrcpt=1 (queue active)
# Jun 6 00:22:01 precise32 postfix/smtp[3682]: SSL_connect error to valid-example-recipient.com[192.168.33.7]:25: -1
# Jun 6 00:22:01 precise32 postfix/smtp[3682]: warning: TLS library problem: 3682:error:140740BF:SSL routines:SSL23_CLIENT_HELLO:no protocols available:s23_clnt.c:381:
# Jun 6 00:22:01 precise32 postfix/smtp[3682]: AF3B6480475: to=<vagrant@valid-example-recipient.com>, relay=valid-example-recipient.com[192.168.33.7]:25, delay=0.06, delays=0.03/0.03/0/0, dsn=4.7.5, status=deferred (Cannot start TLS: handshake failure)
#
# Also:
# Oct 10 19:12:13 sender postfix/smtp[1711]: 62D3F481249: to=<vagrant@valid-example-recipient.com>, relay=valid-example-recipient.com[192.168.33.7]:25, delay=0.07, delays=0.03/0.01/0.03/0, dsn=4.7.4, status=deferred (TLS is required, but was not offered by host valid-example-recipient.com[192.168.33.7])
def get_counts(input, config, earliest_timestamp):
seen_trusted = False
counts = collections.defaultdict(lambda: collections.defaultdict(int))
tls_deferred = collections.defaultdict(int)
# Typical line looks like:
# Jun 12 06:24:14 sender postfix/smtp[9045]: Untrusted TLS connection established to valid-example-recipient.com[192.168.33.7]:25: TLSv1.1 with cipher AECDH-AES256-SHA (256/256 bits)
# indicate a problem that should be alerted on.
# ([^[]*) <--- any group of characters that is not "["
# Log lines for when a message is deferred for a TLS-related reason. These
deferred_re = re.compile("relay=([^[ ]*).* status=deferred.*TLS")
# Log lines for when a TLS connection was successfully established. These can
# indicate the difference between Untrusted, Trusted, and Verified certs.
connected_re = re.compile("([A-Za-z]+) TLS connection established to ([^[]*)")
mx_to_domain_mapping = config.get_mx_to_domain_policy_map()
timestamp = 0
for line in sys.stdin:
timestamp = time.strptime(line[0:15], TIME_FORMAT)
if timestamp < earliest_timestamp:
continue
deferred = deferred_re.search(line)
connected = connected_re.search(line)
if connected:
validation = connected.group(1)
mx_hostname = connected.group(2).lower()
if validation == "Trusted" or validation == "Verified":
seen_trusted = True
address_domains = config.get_address_domains(mx_hostname, mx_to_domain_mapping)
if address_domains:
domains_str = [ a.domain for a in address_domains ]
d = ', '.join(domains_str)
counts[d][validation] += 1
counts[d]["all"] += 1
elif deferred:
mx_hostname = deferred.group(1).lower()
tls_deferred[mx_hostname] += 1
return (counts, tls_deferred, seen_trusted, timestamp)
def print_summary(counts):
for mx_hostname, validations in counts.items():
for validation, validation_count in validations.items():
if validation == "all":
continue
print mx_hostname, validation, validation_count / validations["all"], "of", validations["all"]
if __name__ == "__main__":
arg_parser = argparse.ArgumentParser(description='Detect delivery problems'
' in Postfix log files that may be caused by security policies')
arg_parser.add_argument('-c', action="store_true", dest="cron", default=False)
arg_parser.add_argument("policy_file", nargs='?',
default=os.path.join("examples", "starttls-everywhere.json"),
help="STARTTLS Everywhere policy file")
args = arg_parser.parse_args()
config = Config.Config()
config.load_from_json_file(args.policy_file)
last_timestamp_processed = 0
timestamp_file = '/tmp/starttls-everywhere-last-timestamp-processed.txt'
if os.path.isfile(timestamp_file):
last_timestamp_processed = time.strptime(open(timestamp_file).read(), TIME_FORMAT)
(counts, tls_deferred, seen_trusted, latest_timestamp) = get_counts(sys.stdin, config, last_timestamp_processed)
with open(timestamp_file, "w") as f:
f.write(time.strftime(TIME_FORMAT, latest_timestamp))
# If not running in cron, print an overall summary of log lines seen from known hosts.
if not args.cron:
print_summary(counts)
if not seen_trusted:
print 'No Trusted connections seen! Probably need to install a CAfile.'
if len(tls_deferred) > 0:
print "Some mail was deferred due to TLS problems:"
for (k, v) in tls_deferred.iteritems():
print "%s: %s" % (k, v)
| 50.571429 | 304 | 0.728814 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,851 | 0.536911 |
a641d2aa2cfe97b524c9e4fc4f5456576d4c7c5c | 663 | py | Python | provarme_dashboard/migrations/0006_auto_20190623_1914.py | arferreira/dropazul_app | f341da5f2bcccd2c1f40fad00c6e5d77bba4c6f3 | [
"MIT"
] | null | null | null | provarme_dashboard/migrations/0006_auto_20190623_1914.py | arferreira/dropazul_app | f341da5f2bcccd2c1f40fad00c6e5d77bba4c6f3 | [
"MIT"
] | 9 | 2020-06-05T23:49:20.000Z | 2022-01-13T01:43:03.000Z | provarme_dashboard/migrations/0006_auto_20190623_1914.py | arferreira/dropazul_app | f341da5f2bcccd2c1f40fad00c6e5d77bba4c6f3 | [
"MIT"
] | null | null | null | # Generated by Django 2.0.5 on 2019-06-23 19:14
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('provarme_dashboard', '0005_devolution_traffic'),
]
operations = [
migrations.RemoveField(
model_name='devolution',
name='address',
),
migrations.RemoveField(
model_name='devolution',
name='city',
),
migrations.RemoveField(
model_name='devolution',
name='state',
),
migrations.RemoveField(
model_name='devolution',
name='zipcode',
),
]
| 22.1 | 58 | 0.544495 | 578 | 0.871795 | 0 | 0 | 0 | 0 | 0 | 0 | 171 | 0.257919 |
a642467e773fb1feed9e2df7720878963e67a766 | 601 | py | Python | enemy_bot/enemy_bot_level8/burger_war/scripts/dummyArReader.py | kenkenjlab/burger_war_kit | 9332494aa1212805330c01cacc389776a8a354b6 | [
"BSD-3-Clause"
] | 1 | 2021-09-29T14:54:17.000Z | 2021-09-29T14:54:17.000Z | enemy_bot/enemy_bot_level8/burger_war/scripts/dummyArReader.py | kenkenjlab/burger_war_kit | 9332494aa1212805330c01cacc389776a8a354b6 | [
"BSD-3-Clause"
] | 3 | 2021-02-11T04:58:58.000Z | 2021-02-23T10:26:47.000Z | enemy_bot/enemy_bot_level8/burger_war/scripts/dummyArReader.py | kenkenjlab/burger_war_kit | 9332494aa1212805330c01cacc389776a8a354b6 | [
"BSD-3-Clause"
] | 1 | 2021-02-19T02:06:20.000Z | 2021-02-19T02:06:20.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
This is dummy ar marker reader node.
mainly for judge server test.
by Takuya Yamaguhi.
'''
from time import sleep
import rospy
from std_msgs.msg import String
if __name__ == "__main__":
rospy.init_node("qr_reader")
# publish qr_val
qr_val_pub = rospy.Publisher('target_id', String, queue_size=10)
sample_id_list = ['0001', '0002', '0003', '0004',
'0005', '0006', '0007', '0008']
while not rospy.is_shutdown():
for qr_id in sample_id_list:
qr_val_pub.publish(qr_id)
sleep(3)
| 25.041667 | 68 | 0.628952 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 235 | 0.391015 |
a643184953f977a187e7f1cbf609b9b4da054af6 | 1,834 | py | Python | morph_net/framework/op_handler_decorator_test.py | nmoezzi/morph-net | fb25044ac06fc6e3b681911fc3dffe65a2b6a0a4 | [
"Apache-2.0"
] | 1 | 2019-04-25T08:23:52.000Z | 2019-04-25T08:23:52.000Z | morph_net/framework/op_handler_decorator_test.py | psyhicborg/morph-net | 0fb096d8d3b33eda9ab86c700cb6c07c9dbf10ee | [
"Apache-2.0"
] | null | null | null | morph_net/framework/op_handler_decorator_test.py | psyhicborg/morph-net | 0fb096d8d3b33eda9ab86c700cb6c07c9dbf10ee | [
"Apache-2.0"
] | 1 | 2019-04-26T14:50:13.000Z | 2019-04-26T14:50:13.000Z | """Tests for morph_net.framework.op_regularizer_decorator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from morph_net.framework import conv2d_source_op_handler
from morph_net.framework import generic_regularizers
from morph_net.framework import op_handler_decorator
from morph_net.framework import op_regularizer_manager as orm
import numpy as np
import tensorflow as tf
class DummyDecorator(generic_regularizers.OpRegularizer):
"""A dummy decorator that multiply the regularization vector by 0.5.
"""
def __init__(self, regularizer_object):
"""Creates an instance.
Accept an OpRegularizer that is decorated by this class.
Args:
regularizer_object: OpRegularizer to decorate.
"""
self._regularization_vector = regularizer_object.regularization_vector * 0.5
self._alive_vector = regularizer_object.alive_vector
@property
def regularization_vector(self):
return self._regularization_vector
@property
def alive_vector(self):
return self._alive_vector
class OpHandlerDecoratorTest(tf.test.TestCase):
"""Test class for OpHandlerDecorator."""
def testOpHandlerDecorator(self):
image = tf.constant(0.0, shape=[1, 17, 19, 3])
kernel = tf.ones([5, 5, 3, 3])
output = tf.nn.conv2d(image, kernel, strides=[1, 1, 1, 1], padding='SAME')
decorated_op_handler = op_handler_decorator.OpHandlerDecorator(
conv2d_source_op_handler.Conv2DSourceOpHandler(1e-3, 0), DummyDecorator)
op_slice = orm.OpSlice(output.op, orm.Slice(0, 3))
regularizer = decorated_op_handler.create_regularizer(op_slice)
self.assertAllClose(0.5 * np.ones(3), regularizer.regularization_vector)
self.assertAllClose(np.ones(3), regularizer.alive_vector)
if __name__ == '__main__':
tf.test.main()
| 30.065574 | 80 | 0.767176 | 1,341 | 0.731189 | 0 | 0 | 148 | 0.080698 | 0 | 0 | 349 | 0.190294 |
a643fa6b9b973e3eaf970f5c1bcf721c67f1b002 | 141 | py | Python | code/chokudai_S002_g_01.py | KoyanagiHitoshi/AtCoder | 731892543769b5df15254e1f32b756190378d292 | [
"MIT"
] | 3 | 2019-08-16T16:55:48.000Z | 2021-04-11T10:21:40.000Z | code/chokudai_S002_g_01.py | KoyanagiHitoshi/AtCoder | 731892543769b5df15254e1f32b756190378d292 | [
"MIT"
] | null | null | null | code/chokudai_S002_g_01.py | KoyanagiHitoshi/AtCoder | 731892543769b5df15254e1f32b756190378d292 | [
"MIT"
] | null | null | null | from functools import reduce
from fractions import gcd
n=int(input())
for i in range(n):
a,b=map(int,input().split())
print(gcd(a,b)) | 23.5 | 32 | 0.680851 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
a6447a0836186c91e88667a84455256438771191 | 765 | py | Python | simple_clinic/cli.py | sebanie15/simple_clinic | 4dc942b0549ee6397a0e89dd7aa03eb8580b4a5a | [
"MIT"
] | null | null | null | simple_clinic/cli.py | sebanie15/simple_clinic | 4dc942b0549ee6397a0e89dd7aa03eb8580b4a5a | [
"MIT"
] | null | null | null | simple_clinic/cli.py | sebanie15/simple_clinic | 4dc942b0549ee6397a0e89dd7aa03eb8580b4a5a | [
"MIT"
] | null | null | null | """Console script for simple_clinic."""
import sys
import click
class ActiveDoctor(object):
def __init__(self):
self.id = 0
active = click.make_pass_decorator(ActiveDoctor, ensure=True)
@click.group()
@click.option('--id', type=int, help='')
@active
def cli(active, id):
"""Console script for simple_clinic."""
active.id = id
return 0
@cli.command()
@active
def show_activated(active):
click.echo(f'Activated = {active.id}')
# click.echo(f'activated : {activated}')
@cli.command()
@click.option('--set_id', type=int)
@active
def set_activated(active, set_id):
active.id = set_id
@cli.command()
@active
def print_test(active):
print(active.id)
if __name__ == "__main__":
sys.exit(cli()) # pragma: no cover
| 16.630435 | 61 | 0.667974 | 72 | 0.094118 | 0 | 0 | 481 | 0.628758 | 0 | 0 | 190 | 0.248366 |
a644ae4c04f7d11053b8e2b79110bf1aa7213f69 | 1,882 | py | Python | tests/script/test_p2pk.py | meherett/btmhdw | 6929750edb7747a9937806272127c98db86e4c98 | [
"MIT"
] | 3 | 2019-06-02T06:31:06.000Z | 2019-06-16T20:46:38.000Z | tests/script/test_p2pk.py | meherett/btmhdw | 6929750edb7747a9937806272127c98db86e4c98 | [
"MIT"
] | 3 | 2020-09-10T04:40:58.000Z | 2021-06-25T15:38:35.000Z | tests/script/test_p2pk.py | meherett/btmhdw | 6929750edb7747a9937806272127c98db86e4c98 | [
"MIT"
] | 1 | 2020-08-11T07:48:19.000Z | 2020-08-11T07:48:19.000Z | #!/usr/bin/env python3
import json
import os
from pybytom.script import (
get_public_key_hash, get_p2pkh_program, get_p2wpkh_program, get_p2wpkh_address
)
# Test Values
base_path = os.path.dirname(__file__)
file_path = os.path.abspath(os.path.join(base_path, "..", "values.json"))
values = open(file_path, "r")
_ = json.loads(values.read())
values.close()
def test_p2pk():
assert get_public_key_hash(
public_key=_["script"]["p2pk"]["public_key"]
) == _["script"]["p2pk"]["public_key_hash"]
assert get_p2pkh_program(
public_key_hash=_["script"]["p2pk"]["public_key_hash"]
) == _["script"]["p2pk"]["program"]["p2pkh"]
assert get_p2wpkh_program(
public_key_hash=_["script"]["p2pk"]["public_key_hash"]
) == _["script"]["p2pk"]["program"]["p2wpkh"]
assert get_p2wpkh_address(
public_key_hash=_["script"]["p2pk"]["public_key_hash"], network="mainnet", vapor=False
) == _["script"]["p2pk"]["address"]["mainnet"]
assert get_p2wpkh_address(
public_key_hash=_["script"]["p2pk"]["public_key_hash"], network="solonet", vapor=False
) == _["script"]["p2pk"]["address"]["solonet"]
assert get_p2wpkh_address(
public_key_hash=_["script"]["p2pk"]["public_key_hash"], network="testnet", vapor=False
) == _["script"]["p2pk"]["address"]["testnet"]
assert get_p2wpkh_address(
public_key_hash=_["script"]["p2pk"]["public_key_hash"], network="mainnet", vapor=True
) == _["script"]["p2pk"]["vapor_address"]["mainnet"]
assert get_p2wpkh_address(
public_key_hash=_["script"]["p2pk"]["public_key_hash"], network="solonet", vapor=True
) == _["script"]["p2pk"]["vapor_address"]["solonet"]
assert get_p2wpkh_address(
public_key_hash=_["script"]["p2pk"]["public_key_hash"], network="testnet", vapor=True
) == _["script"]["p2pk"]["vapor_address"]["testnet"]
| 37.64 | 94 | 0.658874 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 685 | 0.363974 |
a644c4a8df7832e2212b7e472ca9077666b87f3c | 5,040 | py | Python | src/GradeHelpUtil.py | blackpan2/PyGrade | b8f3ed8f1e13226b34a05311f809696225c1be2e | [
"Apache-2.0"
] | 1 | 2016-04-18T17:10:46.000Z | 2016-04-18T17:10:46.000Z | src/GradeHelpUtil.py | blackpan2/PyGrade | b8f3ed8f1e13226b34a05311f809696225c1be2e | [
"Apache-2.0"
] | null | null | null | src/GradeHelpUtil.py | blackpan2/PyGrade | b8f3ed8f1e13226b34a05311f809696225c1be2e | [
"Apache-2.0"
] | null | null | null | import os
import shutil
import subprocess
from Colorify import red, cyan
from DiffJob import prepare, diff, student_output
__author__ = 'George Herde'
def cd_into_assignment(top_level, student, config):
# Tries to move into the assignment folder (as set in the config.ini) within the student's directory
if os.path.exists("{}/{}/{}".format(top_level, student, config.dir)):
os.chdir("{}/{}/{}".format(top_level, student, config.dir))
return True
else:
# Provides a list of folders in the student's directory to use an an alternative
alternate_dir = [d for d in os.listdir(os.getcwd()) if os.path.isdir(d) and not d.startswith(".")]
print("{} Not found.\nAlternate folders:".format(config.dir))
print_array_of_strings(alternate_dir)
target_dir = input("Choose an alternative(or 'None'): ")
if target_dir == 'None':
return False
else:
# Move into the chosen directory
os.chdir("{}/{}/{}".format(top_level, student, target_dir))
# Prints the name of the current folder back to the user
print("\nUsing Directory: {}".format(os.path.relpath(os.getcwd(), start=os.getcwd() + "/..")))
return True
def yes_no_question(question_text, y_default=True):
if y_default:
user_input = input("{} (Y/n)\n".format(question_text)).strip().lower()
if user_input in "" or user_input in "y":
return True
else:
return False
else:
user_input = input("{} (y/N)\n".format(question_text)).strip().lower()
if user_input in "" or user_input in "n":
return False
else:
return True
def print_array_of_strings(array):
array.sort()
for element in array[:-1]:
print("{}".format(element), end=", ")
print("{}".format(array[-1]))
def move_support_files(config, config_location, destination):
if config.support_files is not None:
for item in config.support_files:
if not os.path.exists("{}/{}".format(destination, item)):
print(red("Supporting file missing {} into student repository".format(item)))
shutil.copy("{}/{}".format(config_location, item), "{}/{}".format(destination, item))
if config.grading_files is not None:
for item in config.grading_files:
print("Copying grading file: {} into student repository".format(item))
shutil.copy("{}/{}".format(config_location, item), "{}/{}".format(destination, item))
def execute_testing(test_type, job_list, config_location=None):
if test_type == "diff" and config_location is not None:
execute_diff(job_list, config_location)
elif test_type == "unit":
execute_unit(job_list)
elif test_type == "bash":
execute_bash(job_list)
def execute_diff(job_list, config_location):
for job in job_list[:-1]:
print("\n{}".format(cyan(job.__str__())))
print("-------------------------------------------------------------")
prepare(job, config_location, os.getcwd())
student_output(job)
diff(job)
input("Continue to next Diff job...")
job = job_list[-1]
print("\n{}".format(cyan(job.__str__())))
print("-------------------------------------------------------------")
prepare(job, config_location, os.getcwd())
student_output(job)
diff(job)
def execute_unit(job_list):
for job in job_list[:-1]:
print("\n{}".format(cyan(job.__str__())))
print(cyan("-------------------------------------------------------------"))
job.run()
input("Continue to next Unit Test job...")
job = job_list[-1]
print("\n{}".format(cyan(job.__str__())))
print(cyan("-------------------------------------------------------------"))
job.run()
def execute_bash(job_list):
for job in job_list[:-1]:
print("\n{}".format(cyan(job.__str__())))
print(cyan("-------------------------------------------------------------"))
job.run()
print("\n")
input("Continue to next Bash job...")
job = job_list[-1]
print("\n{}".format(cyan(job.name)))
print(cyan("-------------------------------------------------------------"))
job.run()
def view_source(config):
# View Source Files
vim_array = ["vim", "-p"]
vim_files = []
for v_file in config.required_files:
if v_file == "*All*":
for file in os.listdir(os.getcwd()):
vim_files.append(file)
else:
vim_files.append(v_file)
if config.support_files is not None:
for v_file in config.support_files:
vim_files.append(v_file)
print("Files opened: {}".format(vim_files))
i = 0
for file in vim_files:
if i == 5:
subprocess.Popen(vim_array).communicate()
vim_array = ["vim", "-p"]
i = 0
vim_array.append(str(file))
i += 1
subprocess.Popen(vim_array).communicate()
| 36.521739 | 106 | 0.557341 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,196 | 0.237302 |
a6453769ac7b8628be28e32dc698e710b23b78b5 | 1,823 | py | Python | src/SigasiProjectCreator/createSigasiProjectFromListOfFiles.py | mderveeuw-si/SigasiProjectCreator | a079eb47650185fb476d76880755589c1e7cb73b | [
"BSD-3-Clause"
] | 11 | 2015-09-16T16:29:08.000Z | 2020-10-10T17:17:07.000Z | src/SigasiProjectCreator/createSigasiProjectFromListOfFiles.py | mderveeuw-si/SigasiProjectCreator | a079eb47650185fb476d76880755589c1e7cb73b | [
"BSD-3-Clause"
] | 21 | 2016-06-11T18:27:25.000Z | 2022-01-04T10:52:11.000Z | src/SigasiProjectCreator/createSigasiProjectFromListOfFiles.py | mderveeuw-si/SigasiProjectCreator | a079eb47650185fb476d76880755589c1e7cb73b | [
"BSD-3-Clause"
] | 11 | 2016-06-11T15:16:12.000Z | 2022-03-24T20:30:23.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
:copyright: (c) 2008-2017 Sigasi
:license: BSD, see LICENSE for more details.
"""
import os
from SigasiProjectCreator.ArgsAndFileParser import ArgsAndFileParser
from SigasiProjectCreator.Creator import SigasiProjectCreator
from SigasiProjectCreator import VhdlVersion
usage = """usage: %prog project-name hdl-file hdl-file...
this script creates a sigasi project in the current working directory:
* adds one linked folder to the project that points to the common
folder of all listed hdl-files
* unmaps all hdl-files in the common folder, except the listed files.
These files are mapped to the 'work' library
example: %prog MyProjectName foo.vhdl bar.sv
"""
def main():
parser = ArgsAndFileParser(usage)
args = parser.parse_args(2)
project_name = args[0]
hdl_files = args[1:]
destination = os.getcwd()
# Find common directory of the hdl files
abs_paths = [os.path.abspath(x) for x in hdl_files]
folder = os.path.dirname(os.path.commonprefix([p + os.path.sep for p in abs_paths]))
sigasi_project_file_creator = SigasiProjectCreator(project_name, VhdlVersion.NINETY_THREE)
# Create Project File and add a link the common source folder
folder_name = os.path.basename(os.path.normpath(folder))
sigasi_project_file_creator.add_link(folder_name, folder, True)
# Create Library Mapping File
# Unmap everything except the list of files (map those to work)
sigasi_project_file_creator.unmap("/")
for path in abs_paths:
relative_file_path = os.path.relpath(path, folder)
sigasi_project_file_creator.add_mapping(folder_name + "/" + relative_file_path, "work")
sigasi_project_file_creator.write(destination)
if __name__ == '__main__':
main()
| 35.057692 | 95 | 0.724081 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 770 | 0.422381 |
a645385a232a6b62393f9ca56ec107d5d9401d70 | 3,944 | py | Python | imagepypelines_image/Resize.py | jmaggio14/imagepypelines_image | a93a74b476033904bae055ba1ac7c32614e601b6 | [
"MIT"
] | 1 | 2020-03-15T23:56:16.000Z | 2020-03-15T23:56:16.000Z | imagepypelines_image/Resize.py | jmaggio14/imagepypelines_image | a93a74b476033904bae055ba1ac7c32614e601b6 | [
"MIT"
] | null | null | null | imagepypelines_image/Resize.py | jmaggio14/imagepypelines_image | a93a74b476033904bae055ba1ac7c32614e601b6 | [
"MIT"
] | null | null | null | from .util import dtype_type_check,\
interpolation_type_check,\
channel_type_check,\
get_cv2_interp_type
from .imports import import_opencv
from .blocks import ImageBlock
cv2 = import_opencv()
import numpy as np
import imagepypelines as ip
class Resize(ImageBlock):
"""splits images into separate component channels
Attributes:
w_scale_type(str): type of scaling used for image width, either
"proportional" or "absolute"
h_scale_type(str): type of scaling used for image height, either
"proportional" or "absolute"
h_param(int,float): vertical scale or absolute height to resize
image to
w_param(int,float): horizontal scale or absolute height to resize
image to
interp(str): interpolation type for resizing. One of
'nearest', 'linear', 'area', 'cubic', 'lanczos4'
Default Enforcement:
1) image
type: np.ndarray
shapes: [(None,None,None),(None,None)]
notes: image must be ordered [height,width,channels]
Batch Size:
"each"
"""
def __init__(self, h=None, w=None, scale_h=None, scale_w=None, interp='nearest'):
"""Instantiates the object
Args:
w(None,int): width to scale image to, must be None is scale_w is
defined
h(None,int): height to scale image to, must be None is scale_h is
defined
scale_h(None,float): vertical scale for the image, must be None
if 'h' is defined
scale_w(None,float): horizontal scale for the image, must be None
if 'w' is defined
interp(str): interpolation type for image scaling, must be one of:
'nearest', 'linear', 'area', 'cubic', 'lanczos4'
"""
super().__init__(order="HWC")
# make sure either h or scale_h is defined
if (h is None) and (scale_h is None):
raise ValueError("'h' or 'scale_h' must be defined")
# make sure either w or scale_w is defined
if (w is None) and (scale_w is None):
raise ValueError("'w' or 'scale_w' must be defined")
# make sure only h or scale_h is defined
if (not h is None) and (not scale_h is None):
raise ValueError("only 'h' or 'scale_h' can be defined")
# make sure only w or scale_w is defined
if (not w is None) and (not scale_w is None):
raise ValueError("only 'w' or 'scale_w' can be defined")
# set w instance variables
if w is None:
self.w_scale_type = 'proportional'
self.w_param = scale_w
else:
self.w_scale_type = 'absolute'
self.w_param = w
# set h instance variables
if h is None:
self.h_scale_type = 'proportional'
self.h_param = scale_h
else:
self.h_scale_type = 'absolute'
self.h_param = h
self.__cv2_interp = get_cv2_interp_type(interp)
self.interp = interp
self.enforce('image', np.ndarray, [(None,None,None),(None,None)])
def process(self, image):
"""Resizes the image to the specified dimensions
Args:
image(np.ndarray): image to resize, must be shaped
[height,width,channels]
Returns:
np.ndarray: resized image
"""
# get h dimension
if self.h_scale_type == "proportional":
new_h = round(self.h_param * image.shape[0], 0)
else:
new_h = self.h_param
# get w dimension
if self.w_scale_type == "proportional":
new_w = round(self.w_param * image.shape[1], 0)
else:
new_w = self.w_param
return cv2.rezize(image, (new_w,new_h), interpolation=self.__cv2_interp)
# END
| 31.552 | 85 | 0.580629 | 3,640 | 0.922921 | 0 | 0 | 0 | 0 | 0 | 0 | 2,184 | 0.553753 |
a645baf6910431ca3700072fb0f62f39e11af65b | 458 | py | Python | csvorm/relations.py | AppexX/python-csvorm | ef7819a756f2736c30c9404282f8f3b7524f7570 | [
"Apache-2.0"
] | 2 | 2018-07-24T08:10:30.000Z | 2021-12-16T17:39:02.000Z | csvorm/relations.py | AppexX/python-csvorm | ef7819a756f2736c30c9404282f8f3b7524f7570 | [
"Apache-2.0"
] | null | null | null | csvorm/relations.py | AppexX/python-csvorm | ef7819a756f2736c30c9404282f8f3b7524f7570 | [
"Apache-2.0"
] | 2 | 2018-07-23T05:20:08.000Z | 2018-09-17T11:32:21.000Z | class RelationType(object):
ONE_TO_MANY = "one_to_many"
ONE_TO_ONE = "one_to_one"
class Relation(object):
def __init__(self, cls):
self.cls = cls
class HasOne(Relation):
def get(self, id):
return self.cls.get(id=id)
class HasMany(Relation):
def get(self, id):
value = []
tokens = id.split(",")
for token in tokens:
value += (self.cls.get(id=token.strip()))
return value
| 18.32 | 53 | 0.582969 | 448 | 0.978166 | 0 | 0 | 0 | 0 | 0 | 0 | 28 | 0.061135 |
a64954da5abc2b618672c110a9d724b19813f598 | 9,145 | py | Python | benchmarks/compare_with_others.py | ProLoD/icontract-hypothesis | fe6c12a7395807d78880c2bbead48580fe8a1cff | [
"MIT"
] | 57 | 2021-01-14T12:01:19.000Z | 2022-03-02T10:54:43.000Z | benchmarks/compare_with_others.py | ProLoD/icontract-hypothesis | fe6c12a7395807d78880c2bbead48580fe8a1cff | [
"MIT"
] | 7 | 2021-02-15T16:28:55.000Z | 2021-07-23T10:58:21.000Z | benchmarks/compare_with_others.py | ProLoD/icontract-hypothesis | fe6c12a7395807d78880c2bbead48580fe8a1cff | [
"MIT"
] | 2 | 2021-01-21T05:35:58.000Z | 2021-04-02T08:28:06.000Z | #!/usr/bin/env python3
"""Benchmark icontract against deal when used together with hypothesis."""
import os
import sys
import timeit
from typing import List
import deal
import dpcontracts
import hypothesis
import hypothesis.extra.dpcontracts
import hypothesis.strategies
import icontract
import tabulate
import icontract_hypothesis
def benchmark_icontract_assume_preconditions(arg_count: int = 1) -> None:
"""Benchmark the Hypothesis testing with icontract and rejection sampling."""
count = 0
if arg_count == 1:
@icontract.require(lambda a: a > 0)
def some_func(a: int) -> None:
nonlocal count
count += 1
pass
assume_preconditions = icontract_hypothesis.make_assume_preconditions(some_func)
@hypothesis.settings(
suppress_health_check=(hypothesis.HealthCheck.filter_too_much,)
)
@hypothesis.given(a=hypothesis.strategies.integers())
def execute(a: int) -> None:
assume_preconditions(a)
some_func(a)
elif arg_count == 2:
@icontract.require(lambda a: a > 0)
@icontract.require(lambda b: b > 0)
def some_func(a: int, b: int) -> None:
nonlocal count
count += 1
pass
assume_preconditions = icontract_hypothesis.make_assume_preconditions(some_func)
@hypothesis.settings(
suppress_health_check=(hypothesis.HealthCheck.filter_too_much,)
)
@hypothesis.given(
a=hypothesis.strategies.integers(), b=hypothesis.strategies.integers()
)
def execute(a: int, b: int) -> None:
assume_preconditions(a=a, b=b)
some_func(a, b)
elif arg_count == 3:
@icontract.require(lambda a: a > 0)
@icontract.require(lambda b: b > 0)
@icontract.require(lambda c: c > 0)
def some_func(a: int, b: int, c: int) -> None:
nonlocal count
count += 1
pass
assume_preconditions = icontract_hypothesis.make_assume_preconditions(some_func)
@hypothesis.settings(
suppress_health_check=(hypothesis.HealthCheck.filter_too_much,)
)
@hypothesis.given(
a=hypothesis.strategies.integers(),
b=hypothesis.strategies.integers(),
c=hypothesis.strategies.integers(),
)
def execute(a: int, b: int, c: int) -> None:
assume_preconditions(a=a, b=b, c=c)
some_func(a, b, c)
else:
raise NotImplementedError("arg_count {}".format(arg_count))
execute()
# Assert the count of function executions for fair tests
assert count == 100
def benchmark_icontract_inferred_strategy(arg_count: int = 1) -> None:
"""Benchmark the Hypothesis testing with icontract and inferred search strategies."""
count = 0
if arg_count == 1:
@icontract.require(lambda a: a > 0)
def some_func(a: int) -> None:
nonlocal count
count += 1
pass
elif arg_count == 2:
@icontract.require(lambda a: a > 0)
@icontract.require(lambda b: b > 0)
def some_func(a: int, b: int) -> None:
nonlocal count
count += 1
pass
elif arg_count == 3:
@icontract.require(lambda a: a > 0)
@icontract.require(lambda b: b > 0)
@icontract.require(lambda c: c > 0)
def some_func(a: int, b: int, c: int) -> None:
nonlocal count
count += 1
pass
else:
raise NotImplementedError("arg_count {}".format(arg_count))
icontract_hypothesis.test_with_inferred_strategy(some_func)
# Assert the count of function executions for fair tests
assert count == 100
def benchmark_dpcontracts(arg_count: int = 1) -> None:
"""Benchmark the Hypothesis testing with dpcontracts."""
count = 0
if arg_count == 1:
@dpcontracts.require("some dummy contract", lambda args: args.a > 0)
def some_func(a: int) -> None:
nonlocal count
count += 1
pass
@hypothesis.settings(
suppress_health_check=(hypothesis.HealthCheck.filter_too_much,)
)
@hypothesis.given(a=hypothesis.strategies.integers())
def execute(a: int) -> None:
hypothesis.extra.dpcontracts.fulfill(some_func)(a)
elif arg_count == 2:
@dpcontracts.require("some dummy contract", lambda args: args.a > 0)
@dpcontracts.require("some dummy contract", lambda args: args.b > 0)
def some_func(a: int, b: int) -> None:
nonlocal count
count += 1
pass
@hypothesis.settings(
suppress_health_check=(hypothesis.HealthCheck.filter_too_much,)
)
@hypothesis.given(
a=hypothesis.strategies.integers(), b=hypothesis.strategies.integers()
)
def execute(a: int, b: int) -> None:
hypothesis.extra.dpcontracts.fulfill(some_func)(a, b)
elif arg_count == 3:
@dpcontracts.require("some dummy contract", lambda args: args.a > 0)
@dpcontracts.require("some dummy contract", lambda args: args.b > 0)
@dpcontracts.require("some dummy contract", lambda args: args.c > 0)
def some_func(a: int, b: int, c: int) -> None:
nonlocal count
count += 1
pass
@hypothesis.settings(
suppress_health_check=(hypothesis.HealthCheck.filter_too_much,)
)
@hypothesis.given(
a=hypothesis.strategies.integers(),
b=hypothesis.strategies.integers(),
c=hypothesis.strategies.integers(),
)
def execute(a: int, b: int, c: int) -> None:
hypothesis.extra.dpcontracts.fulfill(some_func)(a, b, c)
else:
raise NotImplementedError("arg_count {}".format(arg_count))
execute()
# Assert the count of function executions for fair tests
assert count == 100
def benchmark_deal(arg_count: int = 1) -> None:
"""Benchmark the Hypothesis testing with deal."""
count = 0
if arg_count == 1:
@deal.pre(lambda _: _.a > 0)
def some_func(a: int) -> None:
nonlocal count
count += 1
pass
for case in deal.cases(some_func, count=100):
case()
elif arg_count == 2:
@deal.pre(lambda _: _.a > 0)
@deal.pre(lambda _: _.b > 0)
def some_func(a: int, b: int) -> None:
nonlocal count
count += 1
pass
for case in deal.cases(some_func, count=100):
case()
elif arg_count == 3:
@deal.pre(lambda _: _.a > 0)
@deal.pre(lambda _: _.b > 0)
@deal.pre(lambda _: _.c > 0)
def some_func(a: int, b: int, c: int) -> None:
nonlocal count
count += 1
pass
for case in deal.cases(some_func, count=100):
case()
else:
raise NotImplementedError("arg_count {}".format(arg_count))
assert count == 100
def writeln_utf8(text: str = "") -> None:
"""
Write the text to STDOUT using UTF-8 encoding followed by a new-line character.
We can not use ``print()`` as we can not rely on the correct encoding in Windows.
See: https://stackoverflow.com/questions/31469707/changing-the-locale-preferred-encoding-in-python-3-in-windows
"""
sys.stdout.buffer.write(text.encode("utf-8"))
sys.stdout.buffer.write(os.linesep.encode("utf-8"))
def measure_functions() -> None:
# yapf: disable
funcs = [
'benchmark_icontract_inferred_strategy',
'benchmark_icontract_assume_preconditions',
'benchmark_dpcontracts',
'benchmark_deal',
]
# yapf: enable
durations = [0.0] * len(funcs)
number = 10
for arg_count in [1, 2, 3]:
for i, func in enumerate(funcs):
duration = timeit.timeit(
"{}(arg_count={})".format(func, arg_count),
setup="from __main__ import {}".format(func),
number=number,
)
durations[i] = duration
table = [] # type: List[List[str]]
for func, duration in zip(funcs, durations):
# yapf: disable
table.append([
'`{}`'.format(func),
'{:.2f} s'.format(duration),
'{:.2f} ms'.format(duration * 1000 / number),
'{:.0f}%'.format(duration * 100 / durations[0])
])
# yapf: enable
# yapf: disable
table_str = tabulate.tabulate(
table,
headers=['Case', 'Total time', 'Time per run', 'Relative time per run'],
colalign=('left', 'right', 'right', 'right'),
tablefmt='rst')
# yapf: enable
writeln_utf8()
writeln_utf8("Argument count: {}".format(arg_count))
writeln_utf8()
writeln_utf8(table_str)
if __name__ == "__main__":
writeln_utf8("Benchmarking Hypothesis testing:")
writeln_utf8("")
measure_functions()
| 29.310897 | 115 | 0.584254 | 0 | 0 | 0 | 0 | 4,519 | 0.49415 | 0 | 0 | 1,489 | 0.162821 |
a64a38e4de8c4fb4cf2ac3e9a3fade9d7fe3f5d7 | 1,898 | py | Python | experiments/plot.py | henrytseng/srcnn | 1745987e0722c7b03fd870b793f10f627719d25f | [
"MIT"
] | 125 | 2017-04-12T10:21:27.000Z | 2022-03-18T07:06:35.000Z | experiments/plot.py | henrytseng/srcnn | 1745987e0722c7b03fd870b793f10f627719d25f | [
"MIT"
] | 9 | 2017-03-09T19:55:39.000Z | 2018-02-09T22:10:55.000Z | experiments/plot.py | henrytseng/srcnn | 1745987e0722c7b03fd870b793f10f627719d25f | [
"MIT"
] | 66 | 2017-03-09T08:32:11.000Z | 2022-02-09T08:20:35.000Z | from pathlib import Path
import matplotlib.pyplot as plt
import pandas as pd
results_dir = Path('results')
results_dir.mkdir(exist_ok=True)
# Performance plot
for scale in [3, 4]:
for test_set in ['Set5', 'Set14']:
time = []
psnr = []
model = []
for save_dir in sorted(Path('.').glob(f'*-sc{scale}')):
if 'bicubic' not in save_dir.stem:
model += [save_dir.stem.rsplit('-', 1)[0].upper()]
metrics_file = save_dir / f'test/{test_set}/metrics.csv'
metrics = pd.read_csv(str(metrics_file), index_col='name')
time += [metrics.time.average]
psnr += [metrics.psnr.average]
plt.figure()
plt.semilogx(time, psnr, '.')
plt.grid(True, which='both')
for x, y, s in zip(time, psnr, model):
if 'NS' in s:
s = s.split('-')[1]
plt.text(x, y, s)
plt.xlabel('Run time (sec)')
plt.ylabel('PSNR (dB)')
plt.title(f'Scale {scale} on {test_set}')
plt.savefig(str(results_dir / f'performance-sc{scale}-{test_set}.png'))
plt.close()
# History plot
for scale in [3, 4]:
plt.figure()
for save_dir in sorted(Path('.').glob(f'*-sc{scale}')):
if 'bicubic' not in save_dir.stem:
model = save_dir.stem.rsplit('-', 1)[0].upper()
history_file = save_dir / f'train/history.csv'
history = pd.read_csv(str(history_file))
plt.plot(history.epoch, history.val_psnr, label=model, alpha=0.8)
plt.legend()
plt.xlabel('Epochs')
plt.ylabel('Average test PSNR (dB)')
plt.savefig(str(results_dir / f'history-sc{scale}.png'))
plt.xlim(0, 500)
if scale == 3:
plt.ylim(31.5, 34.5)
if scale == 4:
plt.ylim(29, 32)
plt.savefig(str(results_dir / f'history-sc{scale}-zoom.png'))
plt.close()
| 33.892857 | 79 | 0.558483 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 365 | 0.192308 |
a64bf21d0735e0fc79835922c07c262f127237c8 | 585 | py | Python | main.py | deepkick/Visualization-of-the-number-of-Covid19-infected-people-by-Python | cbb4a4f1cde15eee7b27005604023c3e31da7348 | [
"MIT"
] | null | null | null | main.py | deepkick/Visualization-of-the-number-of-Covid19-infected-people-by-Python | cbb4a4f1cde15eee7b27005604023c3e31da7348 | [
"MIT"
] | null | null | null | main.py | deepkick/Visualization-of-the-number-of-Covid19-infected-people-by-Python | cbb4a4f1cde15eee7b27005604023c3e31da7348 | [
"MIT"
] | null | null | null | import tkinter
import translate
from translate import translate
def btn_click():
lang = str(translate(txt_1.get()))
txt_2.insert(0, lang)
# 画面作成
tki = tkinter.Tk()
tki.geometry('300x300')
tki.title('翻訳機')
# ラベル
lbl_1 = tkinter.Label(text='英文:')
lbl_1.place(x=30, y=70)
lbl_2 = tkinter.Label(text='日本文:')
lbl_2.place(x=30, y=100)
# テキストボックス
txt_1 = tkinter.Entry(width=20)
txt_1.place(x=90, y=70)
txt_2 = tkinter.Entry(width=20)
txt_2.place(x=90, y=100)
# ボタン
btn = tkinter.Button(tki, text='翻訳', command=btn_click)
btn.place(x=140, y=170)
# 画面をそのまま表示
tki.mainloop() | 18.870968 | 55 | 0.692308 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 140 | 0.212443 |
a64c022309cbe0a220144817328b319ba96f7547 | 15,665 | py | Python | tests/commands/test_cloud.py | pm3310/sagify | 79de19e938414a4d0de687e1d3d443711314d9d2 | [
"MIT"
] | 3 | 2019-06-10T18:34:42.000Z | 2019-10-17T13:51:54.000Z | tests/commands/test_cloud.py | pm3310/sagify | 79de19e938414a4d0de687e1d3d443711314d9d2 | [
"MIT"
] | null | null | null | tests/commands/test_cloud.py | pm3310/sagify | 79de19e938414a4d0de687e1d3d443711314d9d2 | [
"MIT"
] | 2 | 2019-10-17T13:52:10.000Z | 2021-08-21T07:49:50.000Z | try:
from unittest.mock import patch
except ImportError:
from mock import patch
from click.testing import CliRunner
import sagify
from sagify.config.config import Config
from sagify.__main__ import cli
class TestUploadData(object):
def test_upload_data_happy_case(self):
runner = CliRunner()
with patch(
'sagify.commands.initialize._get_local_aws_profiles',
return_value=['default', 'sagify']
):
with patch.object(
sagify.config.config.ConfigManager,
'get_config',
lambda _: Config(
image_name='sagemaker-img', aws_profile='sagify', aws_region='us-east-1'
)
):
with patch(
'sagify.sagemaker.sagemaker.SageMakerClient'
) as mocked_sage_maker_client:
instance = mocked_sage_maker_client.return_value
instance.upload_data.return_value = 's3://path-to-data/data/'
with runner.isolated_filesystem():
runner.invoke(cli=cli, args=['init'], input='my_app\n1\n2\nus-east-1\n')
result = runner.invoke(
cli=cli,
args=[
'cloud', 'upload-data',
'-i', 'input_data/',
'-s', 's3://path-to-data'
]
)
instance.upload_data.assert_called_with('input_data/', 's3://path-to-data')
assert result.exit_code == 0
def test_upload_data_with_dir_arg_happy_case(self):
runner = CliRunner()
with patch(
'sagify.commands.initialize._get_local_aws_profiles',
return_value=['default', 'sagify']
):
with patch.object(
sagify.config.config.ConfigManager,
'get_config',
lambda _: Config(
image_name='sagemaker-img', aws_profile='sagify', aws_region='us-east-1'
)
):
with patch(
'sagify.sagemaker.sagemaker.SageMakerClient'
) as mocked_sage_maker_client:
instance = mocked_sage_maker_client.return_value
instance.upload_data.return_value = 's3://path-to-data/data/'
with runner.isolated_filesystem():
runner.invoke(
cli=cli, args=['init', '-d', 'src/'], input='my_app\n1\n2\nus-east-1\n'
)
result = runner.invoke(
cli=cli,
args=[
'cloud', 'upload-data',
'-d',
'src/',
'-i', 'input_data/',
'-s', 's3://path-to-data'
]
)
instance.upload_data.assert_called_with('input_data/', 's3://path-to-data')
assert result.exit_code == 0
def test_upload_data_with_invalid_dir_arg_happy_case(self):
runner = CliRunner()
with patch(
'sagify.commands.initialize._get_local_aws_profiles',
return_value=['default', 'sagify']
):
with patch.object(
sagify.config.config.ConfigManager,
'get_config',
lambda _: Config(
image_name='sagemaker-img', aws_profile='sagify', aws_region='us-east-1'
)
):
with patch(
'sagify.sagemaker.sagemaker.SageMakerClient'
) as mocked_sage_maker_client:
instance = mocked_sage_maker_client.return_value
instance.upload_data.return_value = 's3://path-to-data/data/'
with runner.isolated_filesystem():
runner.invoke(
cli=cli, args=['init', '-d', 'src/'], input='my_app\n1\n2\nus-east-1\n'
)
result = runner.invoke(
cli=cli,
args=[
'cloud', 'upload-data',
'-d',
'invalid_dir/',
'-i', 'input_data/',
'-s', 's3://path-to-data'
]
)
assert instance.upload_data.call_count == 0
assert result.exit_code == -1
class TestTrain(object):
def test_train_happy_case(self):
runner = CliRunner()
with patch(
'sagify.commands.initialize._get_local_aws_profiles',
return_value=['default', 'sagify']
):
with patch.object(
sagify.config.config.ConfigManager,
'get_config',
lambda _: Config(
image_name='sagemaker-img', aws_profile='sagify', aws_region='us-east-1'
)
):
with patch(
'sagify.sagemaker.sagemaker.SageMakerClient'
) as mocked_sage_maker_client:
instance = mocked_sage_maker_client.return_value
with runner.isolated_filesystem():
runner.invoke(cli=cli, args=['init'], input='my_app\n1\n2\nus-east-1\n')
result = runner.invoke(
cli=cli,
args=[
'cloud', 'train',
'-i', 's3://bucket/input',
'-o', 's3://bucket/output',
'-e', 'ml.c4.2xlarge'
]
)
assert instance.train.call_count == 1
instance.train.assert_called_with(
image_name='sagemaker-img',
input_s3_data_location='s3://bucket/input',
train_instance_count=1,
train_instance_type='ml.c4.2xlarge',
train_volume_size=30,
train_max_run=24 * 60 * 60,
output_path='s3://bucket/output',
hyperparameters=None
)
assert result.exit_code == 0
def test_train_with_dir_arg_happy_case(self):
runner = CliRunner()
with patch(
'sagify.commands.initialize._get_local_aws_profiles',
return_value=['default', 'sagify']
):
with patch.object(
sagify.config.config.ConfigManager,
'get_config',
lambda _: Config(
image_name='sagemaker-img', aws_profile='sagify', aws_region='us-east-1'
)
):
with patch(
'sagify.sagemaker.sagemaker.SageMakerClient'
) as mocked_sage_maker_client:
instance = mocked_sage_maker_client.return_value
with runner.isolated_filesystem():
runner.invoke(
cli=cli, args=['init', '-d', 'src/'], input='my_app\n1\n2\nus-east-1\n'
)
result = runner.invoke(
cli=cli,
args=[
'cloud', 'train',
'-d',
'src/',
'-i', 's3://bucket/input',
'-o', 's3://bucket/output',
'-e', 'ml.c4.2xlarge'
]
)
assert instance.train.call_count == 1
instance.train.assert_called_with(
image_name='sagemaker-img',
input_s3_data_location='s3://bucket/input',
train_instance_count=1,
train_instance_type='ml.c4.2xlarge',
train_volume_size=30,
train_max_run=24 * 60 * 60,
output_path='s3://bucket/output',
hyperparameters=None
)
assert result.exit_code == 0
def test_train_with_invalid_dir_arg_happy_case(self):
runner = CliRunner()
with patch(
'sagify.commands.initialize._get_local_aws_profiles',
return_value=['default', 'sagify']
):
with patch.object(
sagify.config.config.ConfigManager,
'get_config',
lambda _: Config(
image_name='sagemaker-img', aws_profile='sagify', aws_region='us-east-1'
)
):
with patch(
'sagify.sagemaker.sagemaker.SageMakerClient'
) as mocked_sage_maker_client:
instance = mocked_sage_maker_client.return_value
with runner.isolated_filesystem():
runner.invoke(
cli=cli, args=['init', '-d', 'src/'], input='my_app\n1\n2\nus-east-1\n'
)
result = runner.invoke(
cli=cli,
args=[
'cloud', 'train',
'-d',
'invalid_dir/',
'-i', 's3://bucket/input',
'-o', 's3://bucket/output',
'-e', 'ml.c4.2xlarge'
]
)
assert not instance.train.called
assert result.exit_code == -1
class TestDeploy(object):
def test_train_happy_case(self):
runner = CliRunner()
with patch(
'sagify.commands.initialize._get_local_aws_profiles',
return_value=['default', 'sagify']
):
with patch.object(
sagify.config.config.ConfigManager,
'get_config',
lambda _: Config(
image_name='sagemaker-img', aws_profile='sagify', aws_region='us-east-1'
)
):
with patch(
'sagify.sagemaker.sagemaker.SageMakerClient'
) as mocked_sage_maker_client:
instance = mocked_sage_maker_client.return_value
with runner.isolated_filesystem():
runner.invoke(cli=cli, args=['init'], input='my_app\n1\n2\nus-east-1\n')
result = runner.invoke(
cli=cli,
args=[
'cloud', 'deploy',
'-m', 's3://bucket/model/location/model.tar.gz',
'-n', '2',
'-e', 'ml.c4.2xlarge'
]
)
assert instance.deploy.call_count == 1
instance.deploy.assert_called_with(
image_name='sagemaker-img',
s3_model_location='s3://bucket/model/location/model.tar.gz',
train_instance_count=2,
train_instance_type='ml.c4.2xlarge'
)
assert result.exit_code == 0
def test_train_with_dir_arg_happy_case(self):
runner = CliRunner()
with patch(
'sagify.commands.initialize._get_local_aws_profiles',
return_value=['default', 'sagify']
):
with patch.object(
sagify.config.config.ConfigManager,
'get_config',
lambda _: Config(
image_name='sagemaker-img', aws_profile='sagify', aws_region='us-east-1'
)
):
with patch(
'sagify.sagemaker.sagemaker.SageMakerClient'
) as mocked_sage_maker_client:
instance = mocked_sage_maker_client.return_value
with runner.isolated_filesystem():
runner.invoke(
cli=cli, args=['init', '-d', 'src/'], input='my_app\n1\n2\nus-east-1\n'
)
result = runner.invoke(
cli=cli,
args=[
'cloud', 'deploy',
'-d',
'src/',
'-m', 's3://bucket/model/location/model.tar.gz',
'-n', '2',
'-e', 'ml.c4.2xlarge'
]
)
assert instance.deploy.call_count == 1
instance.deploy.assert_called_with(
image_name='sagemaker-img',
s3_model_location='s3://bucket/model/location/model.tar.gz',
train_instance_count=2,
train_instance_type='ml.c4.2xlarge'
)
assert result.exit_code == 0
def test_train_with_invalid_dir_arg_happy_case(self):
runner = CliRunner()
with patch(
'sagify.commands.initialize._get_local_aws_profiles',
return_value=['default', 'sagify']
):
with patch.object(
sagify.config.config.ConfigManager,
'get_config',
lambda _: Config(
image_name='sagemaker-img', aws_profile='sagify', aws_region='us-east-1'
)
):
with patch(
'sagify.sagemaker.sagemaker.SageMakerClient'
) as mocked_sage_maker_client:
instance = mocked_sage_maker_client.return_value
with runner.isolated_filesystem():
runner.invoke(
cli=cli, args=['init', '-d', 'src/'], input='my_app\n1\n2\nus-east-1\n'
)
result = runner.invoke(
cli=cli,
args=[
'cloud', 'deploy',
'-d',
'invalid_dir/',
'-m', 's3://bucket/model/location/model.tar.gz',
'-n', '2',
'-e', 'ml.c4.2xlarge'
]
)
assert not instance.deploy.called
assert result.exit_code == -1
| 41.662234 | 99 | 0.418449 | 15,444 | 0.985892 | 0 | 0 | 0 | 0 | 0 | 0 | 2,969 | 0.189531 |
a64dc18b9557bde696478c8692e131bbdb9c3b18 | 2,300 | py | Python | Matplotlib.py | claw0ed/DataSci | 0d058e5756f5702aabbcfacee958182ae061dd94 | [
"BSD-2-Clause"
] | null | null | null | Matplotlib.py | claw0ed/DataSci | 0d058e5756f5702aabbcfacee958182ae061dd94 | [
"BSD-2-Clause"
] | null | null | null | Matplotlib.py | claw0ed/DataSci | 0d058e5756f5702aabbcfacee958182ae061dd94 | [
"BSD-2-Clause"
] | null | null | null | # Matplotlib
# 파이썬 데이터과학 관련 시각화 페키지
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
#%matplotlib inline # 주피터 노트북에서 show() 호출없이도
# 그래프를 그릴수 있게 해 줌
# data = np.arange(10)
# plt.plot(data)
# plt.show()
# 산점도 - 100의 표준정규분포 난수 생성
list = []
for i in range(100): # 0 ~ 99
x = np.random.normal(0,1) # 표준정규분포 난수
y = x + 0.1 + 0.2 + np.random.normal(0,1)
list.append([x, y])
print(list)
x_data = [ v[0] for v in list ] # v= [x, ]
y_data = [ v[1] for v in list ] # v= [, y]
plt.plot(x_data, y_data, 'ro')
plt.show()
# 성적데이터 읽어오기
df = pd.read_excel('c:/Java/sungjuk.xlsx')
#총점, 평균 계산후 df 에 추가
subj = ['국어', '영어', '수학', '과학']
df['총점'] = df[subj].sum(axis=1)
df['평균'] = df['총점'] / len(subj)
df.sort_values(['평균'], ascending=[False]) # 평균으로 정렬
import matplotlib as mpl
mpl.rc('font', family='Malgun Gothic') # 그래프 한글 설정
sj = df.sort_values(['평균'], ascending=[False])
sj.index = sj['이름']
sj['평균'].plot(kind='bar', figsize=(8,4))
# 성적 비교 - 어느 반이 잘했나?
ban1 = df[df['반'] == 1]
ban2 = df[df['반'] == 2]
ban1_mean = ban1['총점'].sum() / (6 * 4)
ban2_mean = ban2['총점'].sum() / (6 * 4)
print(ban1_mean, ban2_mean) # 79.042 vs 77.125
# 두집단 간의 평균운 유의미하게 차이 나는것인가? (t검증)
# p-value 값이 0.005 이하일때 - 차이가 난다고 할 수 있음
import scipy.stats as stats
result = stats.ttest_ind(ban1['평균'], ban2['평균'])
print(result) # pvalue=0.755583336185639
# 그럼, 과목별 평균은 차이가 나는가? (t검증)
for sub in subj:
print(sub, stats.ttest_ind(ban1[sub], ban2[sub]))
# 국어 pvalue=0.031982494983816424
# 영어 pvalue=0.5518533781528807
# 수학 pvalue=0.1654958420079056
# 과학 pvalue=0.0014931977711732465
# 전체 성적데이터에 대한 그래프 출력
sj[subj].plot(kind='bar', figsize=(10,6))
# 과목별 점수 분포 - 박스수염 그래프 작성
df[subj].boxplot(return_type='axes')
# 일반, 이반 과목별 점수 분포
ban1[subj].boxplot(return_type='axes') # 일반
ban2[subj].boxplot(return_type='axes') # 이반
# 과목별 상관관계 - '수학:과학' 와 '국어:영어'
df.plot(kind='scatter', x='수학', y='과학')
print( stats.pearsonr( df['수학'], df['과학'] ) ) # 피어슨 상관계수
# 0.5632890597067751(상관계수), 0.05650580486155532(p검증값)
# 과목별 상관관계 - '수학:과학' 와 '국어:영어'
df.plot(kind='scatter', x='국어', y='영어')
print( stats.pearsonr( df['국어'], df['영어'] ) ) # 피어슨 상관계수
# 0.10566562777973997(상관계수), 0.7437959551857836(p검증값)
| 25.274725 | 57 | 0.604348 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,681 | 0.57138 |
a64ea2878c9e329d7ac84eb7694cf09c5df4ae99 | 10,875 | py | Python | pmaf/sequence/_sequence/_nucleotide.py | mmtechslv/PhyloMAF | bab43dd4a4d2812951b1fdf4f1abb83edb79ea88 | [
"BSD-3-Clause"
] | 1 | 2021-07-02T06:24:17.000Z | 2021-07-02T06:24:17.000Z | pmaf/sequence/_sequence/_nucleotide.py | mmtechslv/PhyloMAF | bab43dd4a4d2812951b1fdf4f1abb83edb79ea88 | [
"BSD-3-Clause"
] | 1 | 2021-06-28T12:02:46.000Z | 2021-06-28T12:02:46.000Z | pmaf/sequence/_sequence/_nucleotide.py | mmtechslv/PhyloMAF | bab43dd4a4d2812951b1fdf4f1abb83edb79ea88 | [
"BSD-3-Clause"
] | null | null | null | import warnings
warnings.simplefilter("ignore", category=FutureWarning)
from pmaf.internal.io._seq import SequenceIO
from pmaf.sequence._shared import (
validate_seq_mode,
mode_as_str,
mode_as_skbio,
sniff_mode,
)
from pmaf.sequence._metakit import NucleotideMetabase
from skbio.sequence import GrammaredSequence, DNA, RNA, Protein
from io import StringIO, IOBase
import copy
from numpy import isscalar
from typing import Union, Optional, Sequence, Any
class Nucleotide(NucleotideMetabase):
"""Class that represent single nucleotide sequence."""
def __init__(
self,
sequence: Union[GrammaredSequence, str],
name: Optional[str] = None,
metadata: Optional[dict] = None,
mode: str = "DNA",
**kwargs: Any
):
"""Constructor for the :class:`.Nucleotide`.
Parameters
----------
sequence
Sequence data
name
Name of the sequence instance
metadata
Metadata of the sequence instance
mode
Sequence type/mode of the new instance # TODO: Validation currently passes "protein" fix it.
kwargs
Compatibility
"""
if name is None or isscalar(name):
tmp_name = name
else:
raise TypeError("`name` can be any scalar or None")
if isinstance(metadata, dict):
tmp_metadata = metadata
elif metadata is None:
tmp_metadata = {}
else:
raise TypeError("`metadata` can be dict or None")
if mode is not None:
if validate_seq_mode(mode):
tmp_mode = mode.lower()
else:
raise ValueError("`mode` is invalid.")
else:
if isinstance(sequence, GrammaredSequence):
tmp_mode = mode_as_str(type(sequence))
else:
tmp_mode = None
if isinstance(sequence, GrammaredSequence):
tmp_sequence_str = str(sequence).upper()
tmp_metadata = {**sequence.metadata, **tmp_metadata}
if tmp_name is None:
tmp_name = sequence.metadata.get("id", None)
elif isinstance(sequence, str):
tmp_sequence_str = sequence.upper()
else:
raise TypeError("`sequence` has invalid type.")
if tmp_mode is None:
tmp_skbio_type = sniff_mode(tmp_sequence_str)
else:
tmp_skbio_type = mode_as_skbio(tmp_mode)
self.__sequence = tmp_skbio_type(tmp_sequence_str)
self.__mode = mode_as_str(tmp_skbio_type)
self.__skbio_mode = tmp_skbio_type
if tmp_name is not None:
self.__sequence.metadata["id"] = tmp_name
self.__metadata = tmp_metadata
self.__name = tmp_name
self.__buckled = bool(kwargs.get("buckled", None))
def __repr__(self):
class_name = self.__class__.__name__
name = self.__name if self.__name is not None else "N/A"
length = len(self.__sequence)
metadata_state = "Present" if len(self.__metadata) > 0 else "N/A"
mode = self.__mode.upper() if self.__mode is not None else "N/A"
repr_str = "<{}:[{}], Name:[{}], Mode:[{}], Metadata:[{}]>".format(
class_name, length, name, mode, metadata_state
)
return repr_str
def buckle_by_uid(self, uid: str) -> dict:
"""Buckle sequences based on unique identifier `uid`. Sequence are
usually buckled prior to alignment in order to not loose sequence
specific metadata during alignment process.
Parameters
----------
uid
Unique identifier string.
Returns
-------
Packed metadata of current nucleotide instance for backup.
"""
if not self.__buckled:
packed_metadata = {
"master-metadata": self.__metadata,
"__name": self.__name,
}
self.__name = uid
self.__sequence.metadata["id"] = uid
self.__buckled = True
return packed_metadata
else:
raise RuntimeError("Nucleotide instance is already buckled.")
def unbuckle_uid(self) -> str:
"""Retrieve unique identifier assigned during buckling.
Returns
-------
If instance is buckled the return the `uid`. Otherwise raise error.
"""
if self.__buckled:
return self.__sequence.metadata["id"]
else:
raise RuntimeError("Nucleotide instance is not buckled.")
def restore_buckle(self, buckled_pack: dict) -> None:
"""Restore the buckle using packed metadata `buckle_pack`
Parameters
----------
buckled_pack
Packed metadata backed up during bucking process
Returns
-------
None if success otherwise error.
"""
if self.__buckled:
if isinstance(buckled_pack, dict):
if len(buckled_pack) > 0:
self.__name = buckled_pack["__name"]
self.__sequence.metadata["id"] = buckled_pack["__name"]
self.__metadata.update(buckled_pack["master-metadata"])
else:
ValueError("`buckled_pack` is empty.")
else:
TypeError("`buckled_pack` has invalid type.")
else:
raise RuntimeError("Nucleotide instance is not buckled.")
def get_string_as(self, format: str = "fasta", **kwargs: Any) -> str:
"""Get string of the sequence.
Parameters
----------
format
Format of the string to retrieve
kwargs
Compatibility
Returns
-------
Formatted sequence data as string
"""
with StringIO() as tmp_buffer_io:
self.__write_by_handle(tmp_buffer_io, format=format, **kwargs)
return tmp_buffer_io.getvalue()
def write(
self, file: Union[str, IOBase], format: str = "fasta", **kwargs: Any
) -> None:
"""Write sequence data to the file.
Parameters
----------
file
File path or IO stream to write into
format
Format of the output file
kwargs
Compatibility
"""
self.__write_by_handle(file, format=format, **kwargs)
def __write_by_handle(self, file, format, mode="w", **kwargs):
"""Write into the IO handler."""
self.__sequence.metadata = self.__metadata
self.__sequence.metadata["id"] = self.__name
if isinstance(file, IOBase):
if file.writable():
if mode[0] == "a":
file.seek(0, 2)
elif mode[0] == "w":
file.seek(0, 0)
else:
raise ValueError("`mode` has invalid value.")
else:
raise ValueError("`file` must be writable.")
tmp_file = file
elif isinstance(file, str):
tmp_file = open(file, mode)
else:
raise ValueError("`file` is invalid.")
with StringIO() as tmp_io: # This is done to compensate skbio bug. Skbio writer does not recogine mode kwarg properly.
self.__sequence.write(tmp_io, format=format, **kwargs)
tmp_io.seek(0, 0)
tmp_file.write(tmp_io.read())
self.__sequence.metadata = {"id": self.__name}
def complement(self):
"""Return the sequence complement as new instance."""
seq_complement = str(self.__sequence.complement())
return type(self)(
seq_complement, name=self.__name, metadata=self.__metadata, mode=self.__mode
)
def copy(self):
"""Copy of the current instance."""
return copy.deepcopy(self)
@classmethod
def read(
cls,
file: Any,
name: Optional[str] = None,
metadata: Optional[dict] = None,
mode: str = "DNA",
**kwargs: any
) -> "Nucleotide":
"""Factory class that reads the sequence data.
Parameters
----------
file
Unspecified data for sequence.
Can be file path, IO stream, string, etc.
name
Name of the sequence instance
metadata
Metadata for the sequence instance
mode
Sequence type/mode can be 'DNA' or 'RNA'
kwargs
Compatibility
Returns
-------
Return new instance of :class:`.Nucleotide`
"""
if isinstance(name, (str, int, type(None))):
tmp_name = name
else:
raise TypeError("`name` can be str, int or None")
if isinstance(metadata, dict):
tmp_metadata = metadata
elif metadata is None:
tmp_metadata = {}
else:
raise TypeError("`metadata` can be dict or None")
seq_gen = SequenceIO(file, upper=True).pull_parser(
parser="simple", id=True, description=True, sequence=True
)
tmp_sequence_str = ""
for sid, desc, seq_str in seq_gen:
if len(tmp_sequence_str) == 0:
tmp_sequence_str = seq_str
if not "description" in tmp_metadata.keys():
tmp_metadata.update({"description": desc})
if tmp_name is None:
tmp_name = sid
else:
raise ValueError(
"`sequence` must contain only one sequence. For _multiple sequence reads use MultiSequence."
)
return cls(
tmp_sequence_str, name=tmp_name, metadata=tmp_metadata, mode=mode, **kwargs
)
@property
def skbio(self) -> GrammaredSequence:
"""The :mod:`skbio` representation of the sequence as
:class:`skbio.sequence.GrammaredSequence`"""
return self.__sequence
@property
def text(self) -> str:
"""Sequence as string."""
return str(self.__sequence)
@property
def metadata(self) -> dict:
"""Sequence instance metadata."""
return self.__metadata
@property
def mode(self) -> str:
"""Sequence instance mode/type."""
return self.__mode
@property
def skbio_mode(self) -> Union[DNA, RNA, Protein]:
"""The :mod:`skbio` mode of the sequence."""
return self.__skbio_mode
@property
def length(self) -> int:
"""Length of the sequence."""
return len(self.__sequence)
@property
def name(self) -> str:
"""Name of the sequence instance."""
return self.__name
@property
def is_buckled(self) -> bool:
"""Is sequence instance is buckled or not."""
return self.__buckled
| 32.954545 | 127 | 0.564046 | 10,402 | 0.956506 | 0 | 0 | 2,922 | 0.26869 | 0 | 0 | 3,679 | 0.338299 |
a64f51f75f428531eb9d4006ba45d2b3d4ab67ec | 554 | py | Python | src/main.py | faheem77/FASTAPI-on-Scrapped-Data | aeabe5dcb0106ed0c5b4287a9f812461e878606e | [
"MIT"
] | 4 | 2021-05-02T15:46:15.000Z | 2021-07-20T20:17:03.000Z | src/main.py | faheem77/FASTAPI-on-Scrapped-Data | aeabe5dcb0106ed0c5b4287a9f812461e878606e | [
"MIT"
] | null | null | null | src/main.py | faheem77/FASTAPI-on-Scrapped-Data | aeabe5dcb0106ed0c5b4287a9f812461e878606e | [
"MIT"
] | 5 | 2021-04-23T00:23:25.000Z | 2022-02-27T19:05:35.000Z | from fastapi import FastAPI, Response
import events_service as _service
app = FastAPI()
@app.get("/")
async def root():
return {"message": "hello world"}
@app.get("/events")
async def events():
return _service.get_all_events()
@app.get("/events/{month}")
async def events_month(month: str):
return _service.month_events(month)
@app.get("/events/{month}/{day}")
async def events_of_day(month: str, day: int):
return _service.day_events(month, day)
@app.get("/events/today")
async def today():
return _service.todays_events() | 19.103448 | 46 | 0.698556 | 0 | 0 | 0 | 0 | 451 | 0.814079 | 329 | 0.593863 | 89 | 0.16065 |
a64f8541cde5ab3f951254ac3406eb262db0a22b | 4,117 | py | Python | rollservice/tests/test_dice_seq.py | stallmanifold/pnpdr | 49c84845e0a702e87f954135db070727cc77c2cc | [
"Apache-2.0",
"MIT"
] | null | null | null | rollservice/tests/test_dice_seq.py | stallmanifold/pnpdr | 49c84845e0a702e87f954135db070727cc77c2cc | [
"Apache-2.0",
"MIT"
] | null | null | null | rollservice/tests/test_dice_seq.py | stallmanifold/pnpdr | 49c84845e0a702e87f954135db070727cc77c2cc | [
"Apache-2.0",
"MIT"
] | null | null | null | from django.contrib.auth.models import User
from rollservice.models import DiceSequence
import rest_framework.test as rf_test
import rest_framework.status as status
import rest_framework.reverse as reverse
import hypothesis.extra.django
import hypothesis.strategies as strategies
import unittest
class DiceSeqStrategies:
dice_rolls = strategies.lists(
elements=strategies.sampled_from([4, 6, 8, 10, 12, 20, 100]),
min_size=1
)
user = strategies.just(dict(
username='dungeon_master',
email='dungeon_master@testserver.local',
password='password123'
))
@strategies.composite
def seq_name(draw):
seq_number = draw(strategies.integers(min_value=1))
return f'Roll {seq_number}'
@strategies.composite
def dice_sequence(draw, seq_name=seq_name(), dice_rolls=dice_rolls):
seq_name = draw(seq_name)
dice_sequence = draw(dice_rolls)
return dict(
seq_name=seq_name,
dice_sequence=dice_sequence
)
dice_sequence_list = strategies.lists(elements=dice_sequence(), min_size=1)
@strategies.composite
def existing_uuid(draw, queryset):
max_value = len(queryset) - 1
index = draw(strategies.integers(min_value=0, max_value=max_value))
return queryset[index].uuid
non_existing_uuid = strategies.uuids()
invalid_uuid = strategies.text(max_size=100)
@strategies.composite
def existing_uuid_url(draw, queryset):
max_value = len(queryset) - 1
index = draw(strategies.integers(min_value=0, max_value=max_value))
uuid = queryset[index].uuid
url = reverse.reverse('dice-seq-by-uuid', args=[uuid])
return url
@strategies.composite
def non_existing_uuid_url(draw, queryset, non_existing_uuid=non_existing_uuid):
uuid = draw(non_existing_uuid)
url = reverse.reverse('dice-seq-by-uuid', args=[uuid])
return url
@strategies.composite
def invalid_uuid_url(draw, invalid_uuid=invalid_uuid):
uuid = draw(invalid_uuid)
url_root = reverse.reverse('dice-seq')
url = url_root + '/by_uuid/' + uuid + '/'
return url
class DiceSequenceByUUIDTests(hypothesis.extra.django.TestCase):
@classmethod
def setUpTestData(cls):
sequences = DiceSeqStrategies.dice_sequence_list.example()
new_user = DiceSeqStrategies.user.example()
owner = User.objects.create(**new_user)
for sequence in sequences:
dice_sequence = DiceSequence.objects.create(seq_name=sequence['seq_name'], owner=owner)
dice_sequence.sequence.set(sequence['dice_sequence'])
queryset = DiceSequence.objects.all()
client_class = rf_test.APIClient
@hypothesis.given(DiceSeqStrategies.existing_uuid_url(queryset=queryset))
def test_dice_seq_by_uuid_GET_with_existing_uuid_should_return_OK(self, url):
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
@hypothesis.given(DiceSeqStrategies.non_existing_uuid_url(queryset=queryset))
def test_dice_seq_by_uuid_GET_with_non_existing_uuid_should_return_NOT_FOUND(self, url):
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
@hypothesis.given(DiceSeqStrategies.invalid_uuid_url())
def test_dice_seq_by_uuid_GET_with_invalid_uuid_should_return_BAD_REQUEST(self, url):
response = self.client.get(url)
self.assertIn(response.status_code, [status.HTTP_404_NOT_FOUND, status.HTTP_400_BAD_REQUEST])
@hypothesis.given(strategies.one_of([
DiceSeqStrategies.existing_uuid_url(queryset=queryset),
DiceSeqStrategies.non_existing_uuid_url(queryset=queryset),
DiceSeqStrategies.invalid_uuid_url(),
]))
def test_dice_seq_by_uuid_GET_idempotent(self, url):
response1 = self.client.get(url)
response2 = self.client.get(url)
self.assertEqual(response1.status_code, response2.status_code)
| 32.674603 | 101 | 0.704396 | 3,805 | 0.924217 | 0 | 0 | 3,082 | 0.748603 | 0 | 0 | 167 | 0.040564 |
a64f99499c110d1b06a6b4695b5246480f98a472 | 1,002 | py | Python | tests/unit/test_metrics.py | chryssa-zrv/UA_COMET | 527e7c86bd0a0d8ff90efda58e820108a5666b92 | [
"Apache-2.0"
] | null | null | null | tests/unit/test_metrics.py | chryssa-zrv/UA_COMET | 527e7c86bd0a0d8ff90efda58e820108a5666b92 | [
"Apache-2.0"
] | null | null | null | tests/unit/test_metrics.py | chryssa-zrv/UA_COMET | 527e7c86bd0a0d8ff90efda58e820108a5666b92 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import unittest
import numpy as np
import torch
from comet.metrics import RegressionReport, WMTKendall
class TestMetrics(unittest.TestCase):
def test_regression_report(self):
report = RegressionReport()
a = np.array([0, 0, 0, 1, 1, 1, 1])
b = np.arange(7)
expected = {
"pearson": torch.tensor(0.8660254, dtype=torch.float32),
"kendall": torch.tensor(0.7559289, dtype=torch.float32),
"spearman": torch.tensor(0.866025, dtype=torch.float32),
}
result = report.compute(a, b)
self.assertDictEqual(
{k: round(v.item(), 4) for k, v in result.items()},
{k: round(v.item(), 4) for k, v in expected.items()},
)
def test_wmt_kendall(self):
metric = WMTKendall()
pos = torch.tensor([0, 0.5, 1])
neg = torch.tensor([1, 0.5, 0])
expected = (1 - 2) / (1 + 2)
self.assertEqual(metric.compute(pos, neg), expected)
| 28.628571 | 68 | 0.572854 | 870 | 0.868263 | 0 | 0 | 0 | 0 | 0 | 0 | 51 | 0.050898 |
a6502bc8d3efa4a62ed9b5abd448b7a3fdc28b1d | 12,184 | py | Python | models/edhoc/draftedhoc-20200301/oracle.py | hoheinzollern/EDHOC-Verification | b62bb5192021b9cee52845943ba0c1999cb84119 | [
"MIT"
] | null | null | null | models/edhoc/draftedhoc-20200301/oracle.py | hoheinzollern/EDHOC-Verification | b62bb5192021b9cee52845943ba0c1999cb84119 | [
"MIT"
] | null | null | null | models/edhoc/draftedhoc-20200301/oracle.py | hoheinzollern/EDHOC-Verification | b62bb5192021b9cee52845943ba0c1999cb84119 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import sys, re
from functools import reduce
DEBUG = False
#DEBUG = True
# Put prios between 0 and 100. Above 100 is for default strategy
MAXNPRIO = 200 # max number of prios, 0 is lowest prio
FALLBACKPRIO = MAXNPRIO # max number of prios, 0 is lowest prio
prios = [(i, []) for i in range(MAXNPRIO + 1)]
def outputPrios(goalLines, lemma):
rankedGoals = [str(goal) + "\n" for prioList in prios for goal in prioList[1]]
print("".join(rankedGoals))
def dumpPrios(goalLines, lemma):
print("Prios:")
for pl in prios:
for p in pl[1]:
print(" > level:{}, goalNo:{}".format(pl[0], p))
def prioritize(goalNumber, prio, goalLine):
prios[prio][1].insert(0, goalNumber)
if DEBUG:
goal = re.sub("\s+", " ", goalLine)
print("goalNo:{} prio:{} goal:{}".format(goalNumber, prio, goal))
def genPrios(goalLines, lemma):
# Prioritize splitEqs over new instances
# splitEqs = False
# splitEqsLine = -1
# for i in range(len(goalLines)):
# if re.match(".*splitEqs.*", goalLines[i]):
# splitEqs = True
# splitEqsLine = i
for line in goalLines:
goal = line.split(':')[0]
if "sanity" in lemma:
if DEBUG:
print("MATCHING Sanity LEMMA: {}".format(lemma))
if re.match(".*SKRev.*", line) or\
re.match(".*Completed.*", line):
prioritize(goal, 90, line)
elif re.match(".*StR.*", line) or\
re.match(".*StI.*", line):
prioritize(goal, 80, line)
elif re.match(".*KU\( 'g'\^~xx \).*", line) or\
re.match(".*KU\( 'g'\^~yy \).*", line) or\
re.match(".*KU\( ~xx.*", line) or\
re.match(".*KU\( ~yy.*", line) or\
re.match(".*~~>.*", line) or\
re.match(".*=.*=.*", line):
prioritize(goal, 70, line)
elif re.match(".*LTK_.*", line):
prioritize(goal, 67, line)
elif re.match(".*aead.*", line):
prioritize(goal, 65, line)
elif re.match(".*KU\( sign.*", line) or\
re.match(".*KU\( extr.*", line) or\
re.match(".*KU\( expa.*", line):
prioritize(goal, 60, line)
elif re.match(".*KU\( h\(.*", line):
prioritize(goal, 55, line)
elif re.match(".*KU\( h\(.*", line):
prioritize(goal, 55, line)
else:
prioritize(goal, 50, line)
elif "authImplicit" in lemma: #"authGIYImplicitAuthGuarantee" in lemma: # Special for imp agree
if DEBUG:
print("MATCHING Auth LEMMA: {}".format(lemma))
if re.match(".*: !KU\( ~xx \).*", line) or\
re.match(".*: !KU\( ~yy \).*", line) or\
re.match(".*: !KU\( ~xx\.. \).*", line) or\
re.match(".*: !KU\( ~yy\.. \).*", line) or\
re.match(".*~~>.*", line) or\
re.match(".*: !KU\( 'g'\^~xx \).*", line) or\
re.match(".*: !KU\( 'g'\^~xx\.. \).*", line) or\
re.match(".*: !KU\( 'g'\^~yy \).*", line) or\
re.match(".*: !KU\( 'g'\^~yy\.. \).*", line) or\
re.match(".*: !KU\( ~ltk \).*", line) or\
re.match(".*: !KU\( ~ltk\.. \).*", line) or\
re.match(".*: !KU\( pk\(~ltk\) \).*", line) or\
re.match(".*: !KU\( 'g'\^~ltk \).*", line) or\
re.match(".*: !KU\( 'g'\^~ltk\.. \).*", line) or\
re.match(".*: !LTK_SIG\(.*", line) or\
re.match(".*: !LTK_STAT\(.*", line) or\
re.match(".*: !PK_SIG\(.*", line) or\
re.match(".*: !PK_STAT\(.*", line) or\
re.match(".*: StI._.*", line) or\
re.match(".*: StR._.*", line) or\
re.match(".*ExpRunning.*", line):
prioritize(goal, 97, line)
elif re.match(".*KU\( 'g'\^\(~yy.*\*~ltk.*", line) or\
re.match(".*KU\( 'g'\^\(~xx.*\*~ltk.*", line):
prioritize(goal, 93, line)
elif re.match(".*KU\( 'g'\^\(~xx.*\*~yy.*", line) or\
re.match(".*KU\( 'g'\^\(~yy.*\*~xx.*", line):
prioritize(goal, 90, line)
elif re.match(".*KU\( extr.*", line) or\
re.match(".*KU\( expa.*", line):
prioritize(goal, 80, line)
elif re.match(".*LTKRev.*", line) or\
re.match(".*sign.*", line) or\
re.match(".*aead.*", line):
prioritize(goal, 70, line)
elif re.match(".*KU\( h\(.*", line):
prioritize(goal, 60, line)
elif re.match(".*KU\( \(.V⊕.*", line):
prioritize(goal, 40, line)
else:
prioritize(goal, 50, line)
elif "auth" in lemma:
if DEBUG:
print("MATCHING Auth LEMMA: {}".format(lemma))
if re.match(".*KU\( ~ltk.*", line) or\
re.match(".*KU\( ~xx.*", line) or\
re.match(".*KU\( ~yy.*", line):
prioritize(goal, 98, line)
elif re.match(".*: !KU\( ~xx \).*", line) or\
re.match(".*: !KU\( ~yy \).*", line) or\
re.match(".*: !KU\( ~xx\.. \).*", line) or\
re.match(".*: !KU\( ~yy\.. \).*", line) or\
re.match(".*~~>.*", line) or\
re.match(".*: !KU\( 'g'\^~xx \).*", line) or\
re.match(".*: !KU\( 'g'\^~xx\.. \).*", line) or\
re.match(".*: !KU\( 'g'\^~yy \).*", line) or\
re.match(".*: !KU\( 'g'\^~yy\.. \).*", line) or\
re.match(".*: !KU\( ~ltk \).*", line) or\
re.match(".*: !KU\( ~ltk\.. \).*", line) or\
re.match(".*: !KU\( pk\(~ltk\) \).*", line) or\
re.match(".*: !KU\( 'g'\^~ltk \).*", line) or\
re.match(".*: !KU\( 'g'\^~ltk\.. \).*", line) or\
re.match(".*: !LTK_SIG\(.*", line) or\
re.match(".*: !LTK_STAT\(.*", line) or\
re.match(".*: !PK_SIG\(.*", line) or\
re.match(".*: !PK_STAT\(.*", line) or\
re.match(".*: StI._.*", line) or\
re.match(".*: StR._.*", line) or\
re.match(".*ExpRunning.*", line):
prioritize(goal, 90, line)
elif \
re.match(".*aead.*", line) or\
re.match(".*KU\( expa.*", line) or\
re.match(".*KU\( extr.*", line):
prioritize(goal, 87, line)
elif re.match(".*KU\( 'g'\^~ltk.*\).*", line) or\
re.match(".*KU\( 'g'\^\(~ltk.*\).*", line) or\
re.match(".*Helper.*", line) or\
re.match(".*~~>.*", line) or\
re.match(".*=.*=.*", line):
prioritize(goal, 85, line)
elif re.match(".*KU\( 'g'\^\(~yy.*\*~ltk.*", line) or\
re.match(".*KU\( 'g'\^\(~xx.*\*~ltk.*", line):
prioritize(goal, 80, line)
elif re.match(".*KU\( 'g'\^\(~xx.*\*~yy.*", line) or\
re.match(".*KU\( 'g'\^\(~yy.*\*~xx.*", line):
prioritize(goal, 75, line)
elif re.match(".*LTKRev.*", line) or\
re.match(".*sign.*", line) or\
re.match(".*splitEqs.*", line) or\
re.match(".*StI.*", line) or\
re.match(".*StR.*", line):
prioritize(goal, 70, line)
elif re.match(".*KU\( h\(.*", line):
prioritize(goal, 60, line)
elif re.match(".*KU\( \(.V⊕.*", line):
prioritize(goal, 40, line)
else:
prioritize(goal, 50, line)
elif "AEAD" in lemma:
if DEBUG:
print("MATCHING AEAD LEMMA: {}".format(lemma))
if re.match(".*: !KU\( ~xx \).*", line) or\
re.match(".*: !KU\( ~yy \).*", line) or\
re.match(".*: !KU\( ~xx\.. \).*", line) or\
re.match(".*: !KU\( ~yy\.. \).*", line) or\
re.match(".*: !KU\( ~ltk \).*", line) or\
re.match(".*: !KU\( ~ltk\.. \).*", line) or\
re.match(".*: !KU\( ~AD_3.*", line):
prioritize(goal, 90, line)
elif \
re.match(".*KU\( 'g'\^\(~xx\*~yy\).*", line) or\
re.match(".*KU\( 'g'\^\(~ltk\*~yy\).*", line) or\
re.match(".*KU\( 'g'\^\(~ltk\*~xx\).*", line):
prioritize(goal, 80, line)
elif \
re.match(".*: !KU\( *aead\(.*", line) or\
re.match(".*: !KU\( *expa.*", line):
re.match(".*: !KU\( *extr.*", line) or\
prioritize(goal, 70, line)
elif \
re.match(".*last.*", line) or\
re.match(".*: !KU\( pk\(~ltk\) \).*", line) or\
re.match(".*: !KU\( 'g'\^~ltk \).*", line) or\
re.match(".*: !KU\( 'g'\^~ltk\.. \).*", line) or\
re.match(".*: !LTK_SIG\(.*", line) or\
re.match(".*: !LTK_STAT\(.*", line) or\
re.match(".*: !PK_SIG\(.*", line) or\
re.match(".*: !PK_STAT\(.*", line) or\
re.match(".*: St.*", line):
prioritize(goal, 60, line)
else:
prioritize(goal, 50, line)
elif "secrecy" in lemma:
if DEBUG:
print("MATCHING Secrecy LEMMA: {}".format(lemma))
if re.match(".*KU\( ~ltk.*", line) or\
re.match(".*KU\( 'g'\^\(~ltk\*.*\).*", line):
prioritize(goal, 97, line)
elif re.match(".*KU\( ~xx.*", line) or\
re.match(".*KU\( ~yy.*", line) or\
re.match(".*Helper.*", line) or\
re.match(".*~~>.*", line) or\
re.match(".*=.*=.*", line):
prioritize(goal, 95, line)
elif re.match(".*KU\( 'g'\^\(~xx\*~yy\).*", line) or\
re.match(".*KU\( 'g'\^\(~yy\*~xx\).*", line):
prioritize(goal, 90, line)
elif \
re.match(".*KU\( expa.*", line) or\
re.match(".*KU\( extr.*", line):
prioritize(goal, 80, line)
elif re.match(".*LTKRev.*", line) or\
re.match(".*sign.*", line) or\
re.match(".*StI.*", line) or\
re.match(".*StR.*", line) or\
re.match(".*aead.*", line):
prioritize(goal, 70, line)
elif re.match(".*KU\( h\(.*", line):
prioritize(goal, 60, line)
elif re.match(".*KU\( \(.V⊕.*", line):
prioritize(goal, 40, line)
else:
prioritize(goal, 50, line)
else:
if DEBUG:
print("NO MATCH FOR LEMMA: {}".format(lemma))
exit(0)
def echoOracle(goalLines, lemma):
for line in goalLines:
goal = line.split(':')[0]
prioritize(goal, 0, line)
def testMatch(pattern, tamarinString):
if re.match(pattern, tamarinString):
print("Matches!")
else:
print("Don't match!")
if __name__ == "__main__":
if sys.argv[1] == "testMatch":
if len(sys.argv) != 4:
print("usage: oracle.py testMatch pattern tamarinString")
sys.exit(1)
testMatch(sys.argv[2], sys.argv[3])
sys.exit(0)
goalLines = sys.stdin.readlines()
lemma = sys.argv[1]
genPrios(goalLines, lemma)
#echoOracle(goalLines, lemma)
# We want 0 to be lowest prio, so reverse all level-lists and the list itself
prios = [(p[0], p[1][::-1]) for p in prios][::-1]
outputPrios(goalLines, lemma)
#dumpPrios(goalLines, lemma)
| 43.514286 | 105 | 0.403234 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,380 | 0.277276 |
a65188af292deab5b04d0b97830086fab5d0df1e | 1,370 | py | Python | examples/models/train_relgan.py | DANISHFAYAZNAJAR/nalp | 8a7d8b7cb13dfc755a72d0770bf81ba9bc6ddb35 | [
"Apache-2.0"
] | 18 | 2019-02-01T19:13:54.000Z | 2022-03-14T04:42:28.000Z | examples/models/train_relgan.py | DANISHFAYAZNAJAR/nalp | 8a7d8b7cb13dfc755a72d0770bf81ba9bc6ddb35 | [
"Apache-2.0"
] | 2 | 2022-01-11T14:43:00.000Z | 2022-01-19T16:00:20.000Z | examples/models/train_relgan.py | DANISHFAYAZNAJAR/nalp | 8a7d8b7cb13dfc755a72d0770bf81ba9bc6ddb35 | [
"Apache-2.0"
] | 7 | 2019-03-28T23:17:31.000Z | 2022-01-11T10:58:38.000Z | import tensorflow as tf
from nalp.corpus import TextCorpus
from nalp.datasets import LanguageModelingDataset
from nalp.encoders import IntegerEncoder
from nalp.models import RelGAN
# Creating a character TextCorpus from file
corpus = TextCorpus(from_file='data/text/chapter1_harry.txt', corpus_type='char')
# Creating an IntegerEncoder, learning encoding and encoding tokens
encoder = IntegerEncoder()
encoder.learn(corpus.vocab_index, corpus.index_vocab)
encoded_tokens = encoder.encode(corpus.tokens)
# Creating Language Modeling Dataset
dataset = LanguageModelingDataset(encoded_tokens, max_contiguous_pad_length=10, batch_size=64)
# Creating the RelGAN
relgan = RelGAN(encoder=encoder, vocab_size=corpus.vocab_size, max_length=10,
embedding_size=256, n_slots=5, n_heads=5, head_size=25, n_blocks=1, n_layers=3,
n_filters=(64, 128, 256), filters_size=(3, 5, 5), dropout_rate=0.25, tau=5)
# Compiling the GSGAN
relgan.compile(pre_optimizer=tf.optimizers.Adam(learning_rate=0.01),
d_optimizer=tf.optimizers.Adam(learning_rate=0.0001),
g_optimizer=tf.optimizers.Adam(learning_rate=0.0001))
# Pre-fitting the RelGAN
relgan.pre_fit(dataset.batches, epochs=200)
# Fitting the RelGAN
relgan.fit(dataset.batches, epochs=50)
# Saving RelGAN weights
relgan.save_weights('trained/relgan', save_format='tf')
| 37.027027 | 95 | 0.772993 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 311 | 0.227007 |
a654337a734e79739f69d3d53dd0928fa3930eaa | 10,448 | py | Python | portReporter.py | XXXTheInternXXX/portReporter | 40bf983379a6f21c4c9c7143cab61a8b9a024665 | [
"MIT"
] | null | null | null | portReporter.py | XXXTheInternXXX/portReporter | 40bf983379a6f21c4c9c7143cab61a8b9a024665 | [
"MIT"
] | null | null | null | portReporter.py | XXXTheInternXXX/portReporter | 40bf983379a6f21c4c9c7143cab61a8b9a024665 | [
"MIT"
] | null | null | null | # Scan IP for open SQL port (Default Port)
# Run Whois on the IP found
# Scan all the ports and show which ones are left open
# If no open ports, then move on
import os
import sys
amIRoot = os.getuid()
if not amIRoot == 0:
print ("Run me as root plz UwU. I need to send mor packetz.")
sys.exit(0)
import logging
logging.getLogger("scapy.runtime").setLevel(logging.ERROR)
from scapy.all import *
import random
from random import randint
import whois
#Arrays for logging
openPorts = []
filteredPorts = []
closedPorts = []
#Dictionary for WHOIS data
whoisDict = {}
logging.basicConfig(format='[%(asctime)s] - %(name)s - %(levelname)s - %(message)s]', datefmt='%m-%d-%Y %H:%M:%S')
logger = logging.getLogger('portReporter')
logger.setLevel(logging.INFO)
fh = logging.FileHandler('botLog.log')
fh.setLevel(logging.INFO)
formatter = logging.Formatter('[%(asctime)s] - %(name)s - %(levelname)s - %(message)s]', datefmt='%m-%d-%Y %H:%M:%S')
fh.setFormatter(formatter)
logger.addHandler(fh)
hostFileTime = time.strftime("%m-%d-%Y %H:%M:%S")
def scriptRestart():
logger.info("[*] Restarting script...")
python = sys.executable
os.execl(python, python, * sys.argv)
def ipGenerator():
# Check if the IP is real
# Yes, there is a way to do this with sockets...... I don't like it
targetIp = ".".join([ str(random.randint(0, 255)) for _ in range(4) ])
x = targetIp.split(".")
if x[0] == '10' or x[0] == '127' or x[0] == '0':
ipGenerator()
elif x[0] == '172' and x[1] in str(range(16,32)):
ipGenerator()
elif x[0] == '192' and x[1] == '168':
ipGenerator()
elif x[0] == '169' and x[1] == '254':
ipGenerator()
elif x[0] == '100' and x[1] in str(range(64,128)):
ipGenerator()
else:
logger.info("[+] Generated the IP %s", targetIp)
return targetIp
def scan4Ports():
# Check for filtering, use the best source port, and then restart if it is filtered / closed, if it is open it will continue
# Scan for common ports and check their status
logger.info("[+] Scanning for common ports against the IP {0}".format(target))
# Set X as 0 for working with arrays
x = 0
#Set a random ephemeral port
src_port = RandShort()
# Common ports (Top 1000) used by NMAP
commonPorts=[1,3,4,6,7,9,13,17,19,20,21,22,23,24,25,26,30,32,33,37,42,43,49,53,70,79,80,81,82,83,84,85,88,89,90,99,100,106,109,110,111,113,119,125,135,139,143,144,146,161,163,179,199,211,212,222,254,255,256,259,264,280,301,306,311,340,366,389,406,407,416,417,425,427,443,444,445,458,464,465,481,497,500,512,513,514,515,524,541,543,544,545,548,554,555,563,587,593,616,617,625,631,636,646,648,666,667,668,683,687,691,700,705,711,714,720,722,726,749,765,777,783,787,800,801,808,843,873,880,888,898,900,901,902,903,911,912,981,987,990,992,993,995,999,1000,1001,1002,1007,1009,1010,1011,1021,1022,1023,1024,1025,1026,1027,1028,1029,1030,1031,1032,1033,1034,1035,1036,1037,1038,1039,1040,1041,1042,1043,1044,1045,1046,1047,1048,1049,1050,1051,1052,1053,1054,1055,1056,1057,1058,1059,1060,1061,1062,1063,1064,1065,1066,1067,1068,1069,1070,1071,1072,1073,1074,1075,1076,1077,1078,1079,1080,1081,1082,1083,1084,1085,1086,1087,1088,1089,1090,1091,1092,1093,1094,1095,1096,1097,1098,1099,1100,1102,1104,1105,1106,1107,1108,1110,1111,1112,1113,1114,1117,1119,1121,1122,1123,1124,1126,1130,1131,1132,1137,1138,1141,1145,1147,1148,1149,1151,1152,1154,1163,1164,1165,1166,1169,1174,1175,1183,1185,1186,1187,1192,1198,1199,1201,1213,1216,1217,1218,1233,1234,1236,1244,1247,1248,1259,1271,1272,1277,1287,1296,1300,1301,1309,1310,1311,1322,1328,1334,1352,1417,1433,1434,1443,1455,1461,1494,1500,1501,1503,1521,1524,1533,1556,1580,1583,1594,1600,1641,1658,1666,1687,1688,1700,1717,1718,1719,1720,1721,1723,1755,1761,1782,1783,1801,1805,1812,1839,1840,1862,1863,1864,1875,1900,1914,1935,1947,1971,1972,1974,1984,1998,1999,2000,2001,2002,2003,2004,2005,2006,2007,2008,2009,2010,2013,2020,2021,2022,2030,2033,2034,2035,2038,2040,2041,2042,2043,2045,2046,2047,2048,2049,2065,2068,2099,2100,2103,2105,2106,2107,2111,2119,2121,2126,2135,2144,2160,2161,2170,2179,2190,2191,2196,2200,2222,2251,2260,2288,2301,2323,2366,2381,2382,2383,2393,2394,2399,2401,2492,2500,2522,2525,2557,2601,2602,2604,2605,2607,2608,2638,2701,2702,2710,2717,2718,2725,2800,2809,2811,2869,2875,2909,2910,2920,2967,2968,2998,3000,3001,3003,3005,3006,3007,3011,3013,3017,3030,3031,3052,3071,3077,3128,3168,3211,3221,3260,3261,3268,3269,3283,3300,3301,3306,3322,3323,3324,3325,3333,3351,3367,3369,3370,3371,3372,3389,3390,3404,3476,3493,3517,3527,3546,3551,3580,3659,3689,3690,3703,3737,3766,3784,3800,3801,3809,3814,3826,3827,3828,3851,3869,3871,3878,3880,3889,3905,3914,3918,3920,3945,3971,3986,3995,3998,4000,4001,4002,4003,4004,4005,4006,4045,4111,4125,4126,4129,4224,4242,4279,4321,4343,4443,4444,4445,4446,4449,4550,4567,4662,4848,4899,4900,4998,5000,5001,5002,5003,5004,5009,5030,5033,5050,5051,5054,5060,5061,5080,5087,5100,5101,5102,5120,5190,5200,5214,5221,5222,5225,5226,5269,5280,5298,5357,5405,5414,5431,5432,5440,5500,5510,5544,5550,5555,5560,5566,5631,5633,5666,5678,5679,5718,5730,5800,5801,5802,5810,5811,5815,5822,5825,5850,5859,5862,5877,5900,5901,5902,5903,5904,5906,5907,5910,5911,5915,5922,5925,5950,5952,5959,5960,5961,5962,5963,5987,5988,5989,5998,5999,6000,6001,6002,6003,6004,6005,6006,6007,6009,6025,6059,6100,6101,6106,6112,6123,6129,6156,6346,6389,6502,6510,6543,6547,6565,6566,6567,6580,6646,6666,6667,6668,6669,6689,6692,6699,6779,6788,6789,6792,6839,6881,6901,6969,7000,7001,7002,7004,7007,7019,7025,7070,7100,7103,7106,7200,7201,7402,7435,7443,7496,7512,7625,7627,7676,7741,7777,7778,7800,7911,7920,7921,7937,7938,7999,8000,8001,8002,8007,8008,8009,8010,8011,8021,8022,8031,8042,8045,8080,8081,8082,8083,8084,8085,8086,8087,8088,8089,8090,8093,8099,8100,8180,8181,8192,8193,8194,8200,8222,8254,8290,8291,8292,8300,8333,8383,8400,8402,8443,8500,8600,8649,8651,8652,8654,8701,8800,8873,8888,8899,8994,9000,9001,9002,9003,9009,9010,9011,9040,9050,9071,9080,9081,9090,9091,9099,9100,9101,9102,9103,9110,9111,9200,9207,9220,9290,9415,9418,9485,9500,9502,9503,9535,9575,9593,9594,9595,9618,9666,9876,9877,9878,9898,9900,9917,9929,9943,9944,9968,9998,9999,10000,10001,10002,10003,10004,10009,10010,10012,10024,10025,10082,10180,10215,10243,10566,10616,10617,10621,10626,10628,10629,10778,11110,11111,11967,12000,12174,12265,12345,13456,13722,13782,13783,14000,14238,14441,14442,15000,15002,15003,15004,15660,15742,16000,16001,16012,16016,16018,16080,16113,16992,16993,17877,17988,18040,18101,18988,19101,19283,19315,19350,19780,19801,19842,20000,20005,20031,20221,20222,20828,21571,22939,23502,24444,24800,25734,25735,26214,27000,27352,27353,27355,27356,27715,28201,30000,30718,30951,31038,31337,32768,32769,32770,32771,32772,32773,32774,32775,32776,32777,32778,32779,32780,32781,32782,32783,32784,32785,33354,33899,34571,34572,34573,35500,38292,40193,40911,41511,42510,44176,44442,44443,44501,45100,48080,49152,49153,49154,49155,49156,49157,49158,49159,49160,49161,49163,49165,49167,49175,49176,49400,49999,50000,50001,50002,50003,50006,50300,50389,50500,50636,50800,51103,51493,52673,52822,52848,52869,54045,54328,55055,55056,55555,55600,56737,56738,57294,57797,58080,60020,60443,61532,61900,62078,63331,64623,64680,65000,65129,65389]
# So that we dont get riddled with messages from Scapy
conf.verb = 0
# Running a loop to check a port, report its status, and then add 1 to the loop to move on to the next port
# A way to make this faster would be to adjust the timeout for the scans but it doesn't really matter
while x < len(commonPorts):
dst_port=commonPorts[x]
try:
stealth_scan_resp = sr1(IP(dst=target)/TCP(sport=src_port,dport=dst_port,flags="S"),timeout=1.25)
if(str(type(stealth_scan_resp))=="<type 'NoneType'>"):
logger.info("[*] Port {0} is showing as FILTERED for {1}".format(dst_port,target))
filteredPorts.append(dst_port)
elif(stealth_scan_resp.haslayer(TCP)):
if(stealth_scan_resp.getlayer(TCP).flags == 0x12):
send_rst = sr(IP(dst=target)/TCP(sport=src_port,dport=dst_port,flags="R"),timeout=1.25)
logger.info("[+] Port {0} is showing as OPEN for {1}".format(dst_port,target))
openPorts.append(dst_port)
elif (stealth_scan_resp.getlayer(TCP).flags == 0x14):
logger.info("[-] Port {0} is showing as CLOSED for {1}".format(dst_port,target))
closedPorts.append(dst_port)
elif(stealth_scan_resp.haslayer(ICMP)):
if(int(stealth_scan_resp.getlayer(ICMP).type)==3 and int(stealth_scan_resp.getlayer(ICMP).code) in [1,2,3,9,10,13]):
logger.info("[*] Port {0} is showing as FILTERED for {1}".format(dst_port,target))
filteredPorts.append(dst_port)
else:
logger.info("[-] Port {0} is showing as CLOSED for {1}".format(dst_port,target))
closedPorts.append(dst_port)
except AttributeError:
logger.info("[-] Port {0} is showing as CLOSED for {1}".format(dst_port,target))
closedPorts.append(dst_port)
x += 1
continue
x += 1
if len(openPorts) == 0:
logger.info("[+] No OPEN ports found, restarting...")
scriptRestart()
else:
runWhoisOnTarget()
def runWhoisOnTarget():
whoisCheck = False
try:
logger.info("[+] Starting WHOIS on target {0}".format(target))
whoisCommand = whois.whois(target)
whoisCheck = True
if whoisCheck == True:
logger.info("[+] Scanned domain successfully")
whoisDict["targetEmail"] = whoisCommand.emails
parseDict()
else:
logger.info("[-] The IP {0} does not exist or an error occurred".format(target))
except KeyboardInterrupt:
logger.info("""[*] STOPPED BY USER""")
exit()
except:
logger.info("[-] The IP {0} has failed to resolve ***EXPIRED***".format(target))
scriptRestart()
def parseDict():
targetEmail = "Email: " + str(whoisDict["targetEmail"])
with open ('results.txt', 'a+') as file:
file.write('----Report for {0}----\n'.format(target))
file.write("Open Ports: " + str(openPorts) + "\n")
file.write("Filtered Ports: " + str(filteredPorts) + "\n")
file.write("Closed Ports: " + str(closedPorts) + "\n")
file.write("Email: " + str(whoisDict["targetEmail"]) + "\n")
file.write('----END OF REPORT----\n')
logData()
def logData():
logger.info("[*] ----Report for {0}----".format(target))
logger.info("[*] Open Ports: " + str(openPorts))
logger.info("[*] Filtered Ports: " + str(filteredPorts))
logger.info("[*] Closed Ports: " + str(closedPorts))
logger.info("[*] Email: " + str(whoisDict["targetEmail"]))
logger.info("[*] ----END OF REPORT----")
target = ipGenerator()
scan4Ports() | 71.07483 | 5,009 | 0.733825 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,094 | 0.200421 |
a6554e28fae19027e74ff048a0c85dcdc8783d73 | 689 | py | Python | setup.py | thomasms/filecompare | 393af84939689481da27460cccb52040e6171e01 | [
"MIT"
] | null | null | null | setup.py | thomasms/filecompare | 393af84939689481da27460cccb52040e6171e01 | [
"MIT"
] | null | null | null | setup.py | thomasms/filecompare | 393af84939689481da27460cccb52040e6171e01 | [
"MIT"
] | null | null | null | from setuptools import setup
setup(name='filecompare',
version='0.1',
description='A package for comparing text and JSON files.',
url='https://github.com/thomasms/filecompare',
author='Thomas Stainer',
author_email='stainer.tom+github@gmail.com',
license='MIT',
packages=[
'filecompare',
'filecompare.compare',
'filecompare.tools',
'filecompare.utils'
],
install_requires=[],
python_requires='>=3',
scripts=['filecompare/tools/docompare.py'],
setup_requires=['pytest-runner'],
test_suite='tests.testsuite',
tests_require=['pytest'],
zip_safe=False)
| 28.708333 | 65 | 0.608128 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 305 | 0.442671 |
a65a378d0980f12b970ed55c7de79ac039724e55 | 48 | py | Python | python/testData/keywordCompletion/finallyInExcept.py | jnthn/intellij-community | 8fa7c8a3ace62400c838e0d5926a7be106aa8557 | [
"Apache-2.0"
] | 2 | 2018-12-29T09:53:39.000Z | 2018-12-29T09:53:42.000Z | python/testData/keywordCompletion/finallyInExcept.py | jnthn/intellij-community | 8fa7c8a3ace62400c838e0d5926a7be106aa8557 | [
"Apache-2.0"
] | 173 | 2018-07-05T13:59:39.000Z | 2018-08-09T01:12:03.000Z | python/testData/keywordCompletion/finallyInExcept.py | jnthn/intellij-community | 8fa7c8a3ace62400c838e0d5926a7be106aa8557 | [
"Apache-2.0"
] | 2 | 2020-03-15T08:57:37.000Z | 2020-04-07T04:48:14.000Z | try:
a = 1
except:
a = 2
fina<caret> | 9.6 | 15 | 0.458333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
a65aacd8f80b65ceab62275e2519a06afa40e428 | 2,453 | py | Python | tests/test_text/test_text.py | mateusz-obszanski/my-python-utils | d04801d14edbd95b5e6b5171ac8fd6acaf4d0554 | [
"MIT"
] | null | null | null | tests/test_text/test_text.py | mateusz-obszanski/my-python-utils | d04801d14edbd95b5e6b5171ac8fd6acaf4d0554 | [
"MIT"
] | null | null | null | tests/test_text/test_text.py | mateusz-obszanski/my-python-utils | d04801d14edbd95b5e6b5171ac8fd6acaf4d0554 | [
"MIT"
] | null | null | null | from text import longest_common_substring
from text._utils import suffix_array
import itertools
class HelperTestMixin:
"""
author: Anonta (https://stackoverflow.com/users/5798361/anonta)
source: https://stackoverflow.com/questions/51456472/python-fastest-algorithm-to-get-the-most-common-prefix-out-of-a-list-of-strings/51457611
"""
def suffix_verify(self, text, step=16):
tx = text
sa, _, lcp = suffix_array(text=tx, _step=step)
assert set(sa) == set(range(len(tx)))
ok = True
for i0, i1, h in zip(sa[:-1], sa[1:], lcp[1:]):
assert tx[i1 : i1 + h] == tx[i0 : i0 + h] # type: ignore
assert tx[i1 + h : i1 + h + 1] > tx[i0 + h : i0 + h + 1] # type: ignore
assert max(i0, i1) <= len(tx) - h # type: ignore
assert ok == True
class TestSuffixArray(HelperTestMixin):
"""
author: Anonta (https://stackoverflow.com/users/5798361/anonta)
source: https://stackoverflow.com/questions/51456472/python-fastest-algorithm-to-get-the-most-common-prefix-out-of-a-list-of-strings/51457611
"""
def test_16(self):
# 'a' < 'ana' < 'anana' < 'banana' < 'na' < 'nana'
expect = ([5, 3, 1, 0, 4, 2], [3, 2, 5, 1, 4, 0], [0, 1, 3, 0, 0, 2])
assert suffix_array(text="banana", _step=16) == expect
def test_1(self):
expect = ([5, 3, 1, 0, 4, 2], [3, 2, 5, 1, 4, 0], [0, 1, 3, 0, 0, 2])
assert suffix_array(text="banana", _step=1) == expect
def test_mini(self):
assert suffix_array(text="", _step=1) == ([], [], [])
assert suffix_array(text="a", _step=1) == ([0], [0], [0])
assert suffix_array(text="aa", _step=1) == ([1, 0], [1, 0], [0, 1])
assert suffix_array(text="aaa", _step=1) == ([2, 1, 0], [2, 1, 0], [0, 1, 2])
def test_example(self):
self.suffix_verify("abracadabra")
def test_cartesian(self):
"""Test all combinations of alphabet "ABC" up to length 4 characters"""
for size in range(7):
for cartesian in itertools.product(*(size * ["ABC"])):
text = "".join(cartesian)
self.suffix_verify(text, 1)
def test_lcp(self):
expect = {"ana": [1, 3]}
assert longest_common_substring("banana") == expect
expect = {" s": [3, 21], "no": [0, 13], "o ": [5, 20, 38]}
assert longest_common_substring("not so Agamemnon, who spoke fiercely to ") == expect
| 39.564516 | 145 | 0.571137 | 2,351 | 0.958418 | 0 | 0 | 0 | 0 | 0 | 0 | 730 | 0.297595 |
a65b97afb97c000a1a2b770c26078287c8034b28 | 472 | py | Python | 201409/3.py | L-LYR/csp-sol | 6c0aec82d4704dc8b53886fe1f72e5088d6eab6d | [
"MIT"
] | null | null | null | 201409/3.py | L-LYR/csp-sol | 6c0aec82d4704dc8b53886fe1f72e5088d6eab6d | [
"MIT"
] | null | null | null | 201409/3.py | L-LYR/csp-sol | 6c0aec82d4704dc8b53886fe1f72e5088d6eab6d | [
"MIT"
] | null | null | null | # Time: 03/18/21
# Author: HammerLi
# Tags: [Simulation]
# Title: 字符串匹配
# Content:
# 给出一个字符串和多行文字,在这些文字中找到字符串出现的那些行。
# 你的程序还需支持大小写敏感选项:当选项打开时,表示同一个字母的大写和小写看作不同的字符;
# 当选项关闭时,表示同一个字母的大写和小写看作相同的字符。
tar = input()
strict = bool(int(input()))
n = int(input())
for i in range(0, n):
src = input()
if strict:
if src.find(tar) != -1:
print(src)
else:
if src.lower().find(tar.lower()) != -1:
print(src)
# Python3巨简单,模拟就行了 | 20.521739 | 48 | 0.605932 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 445 | 0.630312 |
a66116aa53fbe883f4e06b84d6c1d21984e8b026 | 826 | py | Python | manage.py | Vanzct/xp | 75794c283f8680cac84edf0e184e8d1fdaed2b9c | [
"MIT"
] | null | null | null | manage.py | Vanzct/xp | 75794c283f8680cac84edf0e184e8d1fdaed2b9c | [
"MIT"
] | null | null | null | manage.py | Vanzct/xp | 75794c283f8680cac84edf0e184e8d1fdaed2b9c | [
"MIT"
] | null | null | null | # coding=utf-8
__author__ = 'Van'
import os
import sys
from flask.ext.script import Manager, Shell
# from flask.ext.migrate import Migrate, MigrateCommand
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from app import create_app
mode = os.getenv('APP_CONFIG_MODE') or 'default'
if mode:
mode = mode.lower()
print 'current config mode %s' % mode
app = create_app(mode)
manager = Manager(app)
# manager.add_command("shell", Shell(make_context=make_shell_context))
# manager.add_command('db', MigrateCommand)
@manager.command
def test():
"""aRun the unit tests."""
import unittest
tests = unittest.TestLoader().discover('tests')
unittest.TextTestRunner(verbosity=2).run(tests)
if __name__ == '__main__':
app.debug = True
app.run(host='0.0.0.0', port=5001)
# manager.run()
| 23.6 | 70 | 0.713075 | 0 | 0 | 0 | 0 | 183 | 0.22155 | 0 | 0 | 304 | 0.368039 |
a66166246703189297b327b36845062d1af73e37 | 29,651 | py | Python | btclib/tests/test_dh.py | giubby84/btclib | 0dd7e4e8ca43451a03b577fd7ec95715a1a21711 | [
"MIT"
] | null | null | null | btclib/tests/test_dh.py | giubby84/btclib | 0dd7e4e8ca43451a03b577fd7ec95715a1a21711 | [
"MIT"
] | null | null | null | btclib/tests/test_dh.py | giubby84/btclib | 0dd7e4e8ca43451a03b577fd7ec95715a1a21711 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (C) 2017-2020 The btclib developers
#
# This file is part of btclib. It is subject to the license terms in the
# LICENSE file found in the top-level directory of this distribution.
#
# No part of btclib including this file, may be copied, modified, propagated,
# or distributed except according to the terms contained in the LICENSE file.
"Tests for `btclib.dh` module."
from hashlib import sha1, sha224, sha256, sha384, sha512
import pytest
from btclib import dsa
from btclib.curve import CURVES, mult
from btclib.dh import ansi_x9_63_kdf, diffie_hellman
from btclib.secpoint import bytes_from_point
def test_ecdh() -> None:
ec = CURVES["secp256k1"]
hf = sha256
a, A = dsa.gen_keys() # Alice
b, B = dsa.gen_keys() # Bob
# Alice computes the shared secret using Bob's public key
shared_secret_a = mult(a, B)
# Bob computes the shared secret using Alice's public key
shared_secret_b = mult(b, A)
assert shared_secret_a == shared_secret_b
assert shared_secret_a == mult(a * b, ec.G)
# hash the shared secret to remove weak bits
shared_secret_field_element = shared_secret_a[0]
z = shared_secret_field_element.to_bytes(ec.psize, "big")
shared_info = b"deadbeef"
hsize = hf().digest_size
for size in (hsize - 1, hsize, hsize + 1):
shared_key = ansi_x9_63_kdf(z, size, hf, None)
assert len(shared_key) == size
assert shared_key == diffie_hellman(a, B, size, None, ec, hf)
assert shared_key == diffie_hellman(b, A, size, None, ec, hf)
shared_key = ansi_x9_63_kdf(z, size, hf, shared_info)
assert len(shared_key) == size
assert shared_key == diffie_hellman(a, B, size, shared_info, ec, hf)
assert shared_key == diffie_hellman(b, A, size, shared_info, ec, hf)
max_size = hsize * (2 ** 32 - 1)
size = max_size + 1
with pytest.raises(ValueError, match="cannot derive a key larger than "):
ansi_x9_63_kdf(z, size, hf, None)
def test_gec_2() -> None:
"""GEC 2: Test Vectors for SEC 1, section 4.1
http://read.pudn.com/downloads168/doc/772358/TestVectorsforSEC%201-gec2.pdf
"""
# 4.1.1
ec = CURVES["secp160r1"]
hf = sha1
# 4.1.2
dU = 971761939728640320549601132085879836204587084162
assert dU == 0xAA374FFC3CE144E6B073307972CB6D57B2A4E982
QU = mult(dU, ec.G, ec)
assert QU == (
466448783855397898016055842232266600516272889280,
1110706324081757720403272427311003102474457754220,
)
assert (
bytes_from_point(QU, ec).hex() == "0251b4496fecc406ed0e75a24a3c03206251419dc0"
)
# 4.1.3
dV = 399525573676508631577122671218044116107572676710
assert dV == 0x45FB58A92A17AD4B15101C66E74F277E2B460866
QV = mult(dV, ec.G, ec)
assert QV == (
420773078745784176406965940076771545932416607676,
221937774842090227911893783570676792435918278531,
)
assert (
bytes_from_point(QV, ec).hex() == "0349b41e0e9c0369c2328739d90f63d56707c6e5bc"
)
# expected results
z_exp = 1155982782519895915997745984453282631351432623114
assert z_exp == 0xCA7C0F8C3FFA87A96E1B74AC8E6AF594347BB40A
size = 20
# 4.1.4
z, _ = mult(dU, QV, ec) # x coordinate only
assert z == z_exp
keyingdata = ansi_x9_63_kdf(z.to_bytes(ec.psize, "big"), size, hf, None)
assert keyingdata.hex() == "744ab703f5bc082e59185f6d049d2d367db245c2"
# 4.1.5
z, _ = mult(dV, QU, ec) # x coordinate only
assert z == z_exp
keyingdata = ansi_x9_63_kdf(z.to_bytes(ec.psize, "big"), size, hf, None)
assert keyingdata.hex() == "744ab703f5bc082e59185f6d049d2d367db245c2"
def test_capv() -> None:
"""Component testing of the Cryptographic Algorithm Validation Program.
https://csrc.nist.gov/projects/cryptographic-algorithm-validation-program/component-testing
https://csrc.nist.gov/CSRC/media/Projects/Cryptographic-Algorithm-Validation-Program/documents/components/800-135testvectors/ansx963_2001.zip
"""
# fmt: off
test_vectors = [
(sha1, 128, "1c7d7b5f0597b03d06a018466ed1a93e30ed4b04dc64ccdd", None, "bf71dffd8f4d99223936beb46fee8ccc"),
(sha1, 128, "5ed096510e3fcf782ceea98e9737993e2b21370f6cda2ab1", None, "ec3e224446bfd7b3be1df404104af953"),
(sha1, 128, "9fb06aa8dd20e947c9216359630e588b6cd522dd71865ab0", None, "a1f9cef361c26fb9280f582851ecd5f2"),
(sha1, 128, "613411bedfba26cbddec4fd68c3ae2c40a2255ae0f5c46ee", None, "d8106c1ee5e7be18fa2e3550459e24f7"),
(sha1, 128, "445776ec51f2c9aae125dd6d6832210eee69249c4c7ad2db", None, "96f1cac19f41a8ce5f5bdd84856b89ba"),
(sha1, 128, "1c3a4b420de31f5092e0568847d8ba9f84376ccfe5224c19", None, "5c2e39b7571111ba6cad54b63abd3536"),
(sha1, 128, "0147fee06dd9918cd1654132227313b104bf99b1ad1f1c46", None, "098758b7ed8dac02a5991411b76b3d2c"),
(sha1, 128, "50ee47d625dcb6a6196c148d452e99bb0a1cf1fa82cdc3a9", None, "9e2a45a4a8984a563f5776ee7ebfd5c6"),
(sha1, 128, "ea2c79dc2ef00afa448cb8d390998d5a18f27f5d888e472c", None, "c5d126d15ca3d358ee78db4c1ba0df44"),
(sha1, 128, "424d414d4b63c7cafe05d4d8bf8b6ce4438eb329a650354f", None, "a5370056ae13f6270490ded98b08c68b"),
(sha1, 1024, "fd17198b89ab39c4ab5d7cca363b82f9fd7e23c3984dc8a2", "856a53f3e36a26bbc5792879f307cce2", "6e5fad865cb4a51c95209b16df0cc490bc2c9064405c5bccd4ee4832a531fbe7f10cb79e2eab6ab1149fbd5a23cfdabc41242269c9df22f628c4424333855b64e95e2d4fb8469c669f17176c07d103376b10b384ec5763d8b8c610409f19aca8eb31f9d85cc61a8d6d4a03d03e5a506b78d6847e93d295ee548c65afedd2efec"),
(sha1, 1024, "6e1373b2dd31b74b638e86988eb9e918d0c96f46cd5b3a92", "19743dfab297303399c4197c4346ee3a", "57ef215679ca589af756ad2208761fd26fc828da6ebb28bfdd9bc8028d264b3a5c6f6d2dd3de7e1d914e99cb6522e233c26d9ab51e3d27ff532785889a553e44538a085b900cb9209849350df7183e3b0ba73077e42b9c5a769b843e25ef507b9c5ed88d54302e71e16f986a1b20d93948d61f208eff1741e5b7aa490734bde8"),
(sha1, 1024, "b195b8c3c7bb7ceba50ea27c3c2e364559e1fe3578aa715e", "d5b35fd5f49cc116c019029b08c85ef1", "ba02df6edf41d941703e407572820310eb9db401d71c91f392bc18039e2fb5b250df267f3cdc244313b6c016f247e65cf3006270806495189e97015bbb0b3774b9b147303be32c41b8878ca57a6a4768675688a61ec859e3d4bcef4c5ec97eb654591879c85207a21f5dac6f51e1133bcb08c518817fd6c249011e44af678b50"),
(sha1, 1024, "6858a77d2b9db2281238103faf6829bfcb631b9d936b127a", "332a4693ddf068f331b1cf9db9ef6a73", "e1ce87a741be93af506bd2a49a8b88cabd5f7ab370de3a0d943d5a10b3deb4088bf7f26d863915cb9d5cbf491c816a570bb021adb7348355b942d6551e8f783475d4f448514f92190d380bf31535eb1af49779eed6f2ffe7f6aee4e0095e8e7a3505cad3ca531b12d51cb5ee742cb46fddcb0740c8ef7e9c208b38f780f98e3c"),
(sha1, 1024, "c94d3dac0f574192b54e254c211336eec808bd84caf986a8", "8f6d41388cc7f75da870ed81caa645dc", "4b6ba78ed72a11eac83048df87caa92ebcb0ea0f3d4ed3124c6193e806d2cb12862a0dab34c0b1ebe873526dd9c354ed0491f71b00f425988e74276f288f966d7bc12dd6346fa073137dc03365591642c876c93b870e0df8cfecac587a6e8718f980aa8d625e4183dadaba8990e958a0849bbd6a7524fb7e6f7ae0963284ae71"),
(sha1, 1024, "6abda986108e8a5134057f679850dcf088ea3c43658996ab", "a5bb20b7e8196838e40239b08737f481", "8e2bf5df0c82175e165745ce45808e0006665c0bc44a46b9ece98d2335a06aeaadbc0194437529303627d01488793f9797b343a2c20114715e5fdbfe04b58190d9721857aa00524ec817dc9f479142906119f72e05a6bc01e6c17b74f5ce597de61400939d640aea23831531e42e6d92fbf0b29e4ce6b9656e59d2356dc54a50"),
(sha1, 1024, "74c1ff417476636d3fa4ee48f5eab876d661d67128348db6", "4f34bd9a38b57dba2b5a4e97c99eb4c2", "65f81d448eaa84c53a3261d4a8894ee38c7b1cdbe8f0118fe9140093323795fd8bdde40ae27d18dfe37207b295d70e0c92dc9e63980f2b3ec0ecd6a5e908aa319dbb0ca1a9e275d32a479f86e6ab3102c380efec1d22ab4c6e21b045ef7ed75b35e7b357065857deec39580850b3881645bf42a3d903fb9ede4c04a6887c382e"),
(sha1, 1024, "a655d8b0f737061eaa5a692dcc1c92a19b3103c876ccab31", "e7796de6ae29ce6f8e1f4eb8a81d1727", "583918aa2f85ccb17c624266afae509909a9be9121453a526aaf6cc87f903122dcdc14bafde13e2b878f270e1f86f569ab15e12a227c843d361fd8230e465453d5f3b5fb32b3175ba2e8e4aa473c3792f57485f6b022bece57651f7bbe95f1bfb9d7bb9ce712eb30233972dfb6258620822e496305bef740115312e808db039a"),
(sha1, 1024, "3f6d287da6237895c4ed10dd5c4fbb5fe08eaac5bb314c7b", "0151cb9a7944494ce88eed12b05a3aaf", "d52041563204a69dc1f6f72d9b12e40d4efa35be050b2a677ae43717fa51ab21c75f9853fc701d9270ed2e8e493e15453cc98c0cb7ab07b3b23aa9e241eb3dcc8e401328e86df4c5b83256738782605271f52b17434eff72a1a3b4f45c4a52bb493f9cfd0e9bfd8decd86ce844c0888221abbc08e827cbbba12618ca39f54f1c"),
(sha1, 1024, "9a71e94ab0b17f0b219fa95ac061d553a4639e89539b023b", "e274ffac839cf3c16266c058627e63fc", "974c1ca2ed816208862029ade68cee9a2877e3e6c7f320dfe6336c3d5ebf57d4fbd40766fe42cca6a91f7092542857560de5fec7813e79e0412fa773adb3a4e43fc13b7e899a6c5acad0848d6156087d0431509dadb55469cac922565bca451505c4f18fe97f9ab71016fc4e641d016bcba34aa6ae7c1e3acfe08b5fd95aa484"),
(sha224, 128, "9ba3226ba0fca6bd5ddaef5b8d763a4d3303bc258d90468c", None, "15ccbd7d6b8f918335799b3920e69c1f"),
(sha224, 128, "fc87aaa2d23ebabdb912c153d3a675da556a57df0699e479", None, "e22c69198766563bf0cbc07628eff5f7"),
(sha224, 128, "f557b1ba1162cdc06cd531d5376a6575cad3e3b0f1508cc0", None, "35183315fba3ffb68a97b1eb5c052021"),
(sha224, 128, "daa88161947e99d50e0400a79fa70b13e0d0a578f38d7fa0", None, "c76ea452168ae2ae6f4b78c695e2ac76"),
(sha224, 128, "30b694d1454a10bdd5993da1a5c466e0821bf426ad7b8b40", None, "bafc2a0a75b9bfbdf1356a60a7937aa8"),
(sha224, 128, "79bf9d93badd5a3eff9ab4c30c44b1985f5e7266e246e777", None, "f3a3c2ed92eebdc35b403843fdb8cd97"),
(sha224, 128, "79496976705d6edea6fe1d7113263ce1eff221020c89db0b", None, "27cb9631cbb1b4f86aee8c2cf1718be0"),
(sha224, 128, "2adc3b158cb63d7afa417c5d832b01bfc0aa34ceb35411ca", None, "e648b78032930639e5c210d798203f98"),
(sha224, 128, "c8ae013dbfa53e9806d21b4deb7e761dbc515f2249afcdb2", None, "44c96abaca4ac9373b58c5af34880dbc"),
(sha224, 128, "9f562ec0869dce142d378909b3610b02108b158719b573f9", None, "252c6dcaf650d92c09c8dae4dc0934cf"),
(sha224, 1024, "da67a73072d521a8272c69023573012ddf9b46bff65b3900", "727997aed53e78f74b1d66743a4ea4d2", "dfc3126c5eebf9a58d89730e8d8ff7cc772592f28c10b349b437d9d068698a22e532eae975dfaf9c5c6a9f2935eafb05353013c253444e61f07bc9ddd15948e614bdc7e445ba3b1893f42f87f18fb352d49956009a642c362d45410b43a9ab376e9261210739174759511d1f9e52f6ec73dfed446dbafaf7fd1a57113abc2e8d"),
(sha224, 1024, "0aab8fffc75e03810fefe1d1f170e8fb860d3880b2206944", "d318ac8eb3c51d8e8e88b8297f79ff26", "9bb6698705db9646ed360a8247396efc92c3450bfaa177c07459dfa8cc108cc8eb98c1e92e8257443463f531c01518fe8d4355784a7df2eaef16908d91104fdc917950b3816146f24a6845a5adad248dda41fcf611954f4de41f357c48f48910a48a1f26b9eff1434b9138848d4b03f05ab6d928c6b9a1b9ba8081405ec45c5f"),
(sha224, 1024, "ccd2f983a0462b12762392bb02f66ffc44da3155111518f6", "9f90a5a197f316275e4376c262f83345", "9b2c47c1edb54b01e6f26236299262270bb82b3de85f744756c1d811f5db1c95dae1484cfab9119b0f75161efbf3a8a69b5f663b7b484bea7009c53e020e8aa009fe8616de2c932bd41d3d2783ee488c024eda2806f0ef324d16a9a95370c5d9ea277fba8a9d23a2a3051524bccbdcabb62e3550170900da7cf403736fb41823"),
(sha224, 1024, "384f91ff8495828524e558fbb5acbd1e8b0ac597d8dd8efa", "a389ee5959381ab6a7240ab3322a2c8b", "3ef814c4724372a48b05c6d2cdddef4b57c2cce711860429ab14d87df79a5ee97fbcc8db83f6bc8ad08deceb3e4c09a87691bdffe79791edb409d3af1121750acb9b4a35f76cfb96a707faf4c5a3a455f80637e162202a55d10ac977cea4e62df1536493c6e51f40f7ed76bc38071e192d33018381fcfe8655fe2d82f2052208"),
(sha224, 1024, "0fea3ff05fdc02af194a4502c4f8968ea696589666e3e5a1", "8f1736597687a0e50f9795f5ce4a794b", "3276318049fb0f809e3eb919e628dde6c8a661147d68a843a0217d49066711652a77956a86eec57d56d62dd9f41149d815fa46416157a6793cc2e0bbaf7de75b78fd532e296064525406781229e6cf657bcfedb110fb6889d9c5d0fce5ae5d9129941f238db5f6de160b15d11bb01b42498a79c8b714ece7a6c50fc5919da383"),
(sha224, 1024, "c425bb77c93b59bade4f0fade4f58a61ac3540a186b806ce", "46a7e50d6e084eaf34f997edd0e71324", "603cbf3606c22368c7dcb03c0ff22f94c4e7190af58715e8a630d48dd48acbb2eb72ad2e596c1373dcfd76b36e24461a3c6eb70d5a13217db5fa706fe7cb0004d6eb6b41ef87964262f3f71f588c1506e575051490c78cf1c87c495a31049b42f165cd468c2de294d840ee79f0d8a27ba5985fa37eddc14ccce7ed56a1cc73fb"),
(sha224, 1024, "f5e674ecf26fcb110cbf6617ca81645552c95787e42b59b8", "791c6a02432eeb4e9e09d1666d80edb5", "e03f4a184cfd06361b87eecfa8277ed3bd5d176bb6a1ed7fbe0f1cb7432f394cbf3ec94bd64c275f2dd40531693c2d8c82c4f57057c29d6ca38551490ec66ad7f650a3aa7528fa3bfcb6dd5455cf2158254b7d3284cb91e2154d0042af7b38fb58268196865bdcac6326ef3ae4fa2a38f4844c716518506b6cd2b032681dc851"),
(sha224, 1024, "e5036244d705de12354c712df9e9b45282fd7969b479601b", "2fd1ad5b6b5a6606ca8bbe1fdf651b37", "f7b412e63aa9fab0435f64ab9f5a6c90d924bf2057ecb311529ed761f7ef939bd765d38e9eadbc8d16667ac3751c3111a932f815bb00af80a78139a05b3ecf3c7074f4b17e81188b49c91b9bf681066d0a6c62561489f1b660a6a9626b23355cbe189bf4a7cf8667608b582dced3ce883b9cef9b2e01667b2e894d80599d2555"),
(sha224, 1024, "34a8b50ddfe5643d8eb284cf817074955fe85251cc40c116", "79b1b79134f4bc2247bab4d401441f66", "69bea882176d4475bd68f6b040482da6c5287be9e9a773e1a4c70c7dcc16fec975b05c589886d0f67f69103a668c4f23908b9261b6cf81b6ebf2c24693e32d2814483a471a8e70e33e9c1fef5d1714fc1a2a55a22b9ea14868eff726da3c113dce79df3413129dfca11e331df57cc127094eff6b41b8e6e92b5bc7a8ad6679a1"),
(sha224, 1024, "295bebb724f5bd120c97690d034487e60398fbed6facca88", "1a45c3460cf33d23209aa90a3c4ca708", "e72d4748fbc36b163efe655d19a0aca946baf35cbbfe4c9a69b81597348c53740fda2ece02baa6f7a9f2b64195c09840e4c2d1e11a229243e3014c7cfcbca5afb1a209af6955b3ef1234f1c45ad458bcfa458041eceff639756a2d81a2bfa64687df82a791f96f9441e9f72b5a11c4246acdb75f176c5a89bec7ad36da651f5c"),
(sha256, 128, "96c05619d56c328ab95fe84b18264b08725b85e33fd34f08", None, "443024c3dae66b95e6f5670601558f71"),
(sha256, 128, "96f600b73ad6ac5629577eced51743dd2c24c21b1ac83ee4", None, "b6295162a7804f5667ba9070f82fa522"),
(sha256, 128, "de4ec3f6b2e9b7b5b6160acd5363c1b1f250e17ee731dbd6", None, "c8df626d5caaabf8a1b2a3f9061d2420"),
(sha256, 128, "d38bdbe5c4fc164cdd967f63c04fe07b60cde881c246438c", None, "5e674db971bac20a80bad0d4514dc484"),
(sha256, 128, "693937e6e8e89606df311048a59c4ab83e62c56d692e05ce", None, "5c3016128b7ee53a4d3b14c344b4db09"),
(sha256, 128, "be91c4f176b067f465244742a9df72ca921a6acf462739a4", None, "41476c80696df4e87fb83e55524b89ce"),
(sha256, 128, "1d5b0ad85bc7859ada93dd5ccaf9536761f3c1a49a42f642", None, "650192990bfcaf7366f536aa89f27dbc"),
(sha256, 128, "265c33d66b341c3f5ae2497a4eff1bed1cd3e549095bb32a", None, "0066528a1bd57cd92bd619e60b605f1e"),
(sha256, 128, "03213ad997fdd6921c9ffb440db597a5d867d9d232dd2e99", None, "5a00bd1c812c579507314b491e4e1dfc"),
(sha256, 128, "3ede6083cd256016f820b69ea0dcd09f57cdab011a80bb6e", None, "026454370775578e3b4a3e09e97a67d2"),
(sha256, 1024, "22518b10e70f2a3f243810ae3254139efbee04aa57c7af7d", "75eef81aa3041e33b80971203d2c0c52", "c498af77161cc59f2962b9a713e2b215152d139766ce34a776df11866a69bf2e52a13d9c7c6fc878c50c5ea0bc7b00e0da2447cfd874f6cf92f30d0097111485500c90c3af8b487872d04685d14c8d1dc8d7fa08beb0ce0ababc11f0bd496269142d43525a78e5bc79a17f59676a5706dc54d54d4d1f0bd7e386128ec26afc21"),
(sha256, 1024, "7e335afa4b31d772c0635c7b0e06f26fcd781df947d2990a", "d65a4812733f8cdbcdfb4b2f4c191d87", "c0bd9e38a8f9de14c2acd35b2f3410c6988cf02400543631e0d6a4c1d030365acbf398115e51aaddebdc9590664210f9aa9fed770d4c57edeafa0b8c14f93300865251218c262d63dadc47dfa0e0284826793985137e0a544ec80abf2fdf5ab90bdaea66204012efe34971dc431d625cd9a329b8217cc8fd0d9f02b13f2f6b0b"),
(sha256, 1024, "f148942fe6acdcd55d9196f9115b78f068da9b163a380fcf", "6d2748de2b48bb21fd9d1be67c0c68af", "6f61dcc517aa6a563dcadeabe1741637d9a6b093b68f19eb4311e0e7cc5ce704274331526ad3e3e0c8172ff2d92f7f07463bb4043e459ad4ed9ddffb9cc8690536b07379ba4aa8204ca25ec68c0d3639362fddf6648bcd2ce9334f091bd0167b7d38c771f632596599ef61ae0a93131b76c80d34e4926d26659ed57db7ba7555"),
(sha256, 1024, "fd4413d60953a7f9358492046109f61253ceef3c0e362ba0", "824d7da4bc94b95259326160bf9c73a4", "1825f49839ae8238c8c51fdd19dddc46d309288545e56e29e31712fd19e91e5a3aeee277085acd7c055eb50ab028bbb9218477aeb58a5e0a130433b2124a5c3098a77434a873b43bd0fec8297057ece049430d37f8f0daa222e15287e0796434e7cf32293c14fc3a92c55a1c842b4c857dd918819c7635482225fe91a3751eba"),
(sha256, 1024, "f365fe5360336c30a0b865785e3162d05d834596bb4034d0", "0530781d7d765d0d9a82b154eec78c3c", "92227b24b58da94b2803f6e7d0a8aab27e7c90a5e09afaecf136c3bab618104a694820178870c10b2933771aab6dedc893688122fffc5378f0eb178ed03bac4bfd3d7999f97c39aed64eeadb6801206b0f75cbd70ef96ae8f7c69b4947c1808ffc9ca589047803038d6310006924b934e8f3c1a15a59d99755a9a4e528daa201"),
(sha256, 1024, "65989811f490718caa70d9bdca753f6c5bd44e4d7b7a0c98", "264a09349830c51726ca8918ae079e4a", "f5f6ef377871830807c741560a955542dcedb662784c3e87fba06bff83db0d9753b92a540e5c86acfe4a80e7657109ee3178879748d967635a0122dbf37d3158c2d214c3dcba8cc29d6292250f51a3b698280744f81040275e9a8b6ee5c9b0307db176364868deade3becc0711c1fb9028c79abad086459c3843f804db928c49"),
(sha256, 1024, "9d598818649fc81b8c59f60dfd41784790c971eefcff6419", "435f06ac33386eaf3af9042d70b93b08", "970845c707dafb8699fa26b9f6c181f358ebed337f9504b04b515c9f01db12dd4965e65e8750af575c0934527183ccbe8e243f26398906089c11bc8a8f69bedbbcf651c19c219b5bd0dc1829931cc6994d71f0000b7e42b1b994aa332b4a0bc506cde8723cd8da879826c585ae12fafb3f3daf5784007006878f4ebc4eda7db2"),
(sha256, 1024, "4f9c0a5c03c8c3a23f06847d0e1f86f7df8da47bf3ccde99", "45672212c5af77d7eb5c90c38e125b52", "80fd7658118370a7d790d708ddafe6e7a5ba22caaacbf46e73fce6d6e1516a465d8264b75b5286067ac57863949aae984dc00653bf151930b398d7f5478c7b954565c584c8ad36fe59692781f2398d71e0234cff09d3c175d86a6c7c0f1e387eda55da8300caee4173ad7ff74b2effd723defc20060fa69f92b8af858a87a4f7"),
(sha256, 1024, "1980d2966d59ccbbf89f7fe9a5943da886f232ac02ee69ce", "c8af6665439efbbee8660701681d54ce", "2120434e863d1df7b9748a3cbc73d2680ede19437a13230a9dc4ef692feb5197afd4e9275d6ed00e1ff3a0fd026dc8a2adefc90bf0e8656912849094d7a515bf45dda69e574bf33211255dd78bfc2b83434f1e0f7795d468dd09c4ed88b691b3fb9ce876161b2f26b41614ff05228b3402f0d1f3044c2c3f9f7136c7aca53356"),
(sha256, 1024, "0eaabe1f7ab668ccf171547d8d08f6f2e06bc5e5f32d521c", "e4e98a7d346906518305de3798959070", "b90a0069ad42b964e96d392e0f13c39e43203371b1eba48f7c41fbfd83df7505d564ce4bf0cf8d956d2a1e9aee6308471d22f70aedd19b24566974f54db2849a79528c9e3f5d4f93c2f6f0862311fca14a2df91635d112fbb05dcd7c0ee72a6d8e713216bc8777596244f724e4046ba134f9a811f8f504ee67b1683041690921"),
(sha384, 128, "d8554db1b392cd55c3fe957bed76af09c13ac2a9392f88f6", None, "671a46aada145162f8ddf1ca586a1cda"),
(sha384, 128, "070265bd04222fc1dcb67182fa797166eaa18a2a1e1a6c0f", None, "522d79f65430350cec5c59c014e1a2cd"),
(sha384, 128, "4e7ef0743a0a14fe21eaa9cbcec68581e75a616c76814c61", None, "4ac7317e0f82ff9256f1584a24661446"),
(sha384, 128, "8952079916141dca1ce53d0d221269db0130f99270129ea3", None, "5910e2945753e0d0a0d60afd54815a3b"),
(sha384, 128, "646e92b7bf5e747bb7ba5afbe6d2028bb93147be73fcec60", None, "ec2c0633e51c78880bee00e63d40d103"),
(sha384, 128, "cd09e15099aec9baa47bb343d156afe8e0cd33f8dbf104be", None, "f72c76cc83bf273c7e5129d1706e3330"),
(sha384, 128, "bfd00866e7a7e147fd98e1defed9fa1ab32d3e785a3f3436", None, "10c4874e47a1032cb9307dd4b4cad9f9"),
(sha384, 128, "f07d1c1d8d3435c9477303c87ae19a0b8acf890c11b19794", None, "ecc66ccf0bcfaa644787203178647091"),
(sha384, 128, "eeb2e06aad13b543746a9e5411066d4ef5717bc753eee1a0", None, "2d750acfa410f23e6993747536aaee9e"),
(sha384, 128, "ba3ef5d54aadb1824dd974edf1748d76b7b13d26e83fa9f9", None, "55182a2abb9dc1d79d64b09c4c4666ee"),
(sha384, 1024, "c051fd22539c9de791d6c43a854b8f80a6bf70190050854a", "1317504aa34759bb4c931e3b78201945", "cf6a84434734ac6949e1d7976743277be789906908ad3ca3a8923da7f476abbeb574306d7243031a85566914bfd247d2519c479953d9d55b6b831e56260806c39af21b74e3ecf470e3bd8332791c8a23c13352514fdef00c2d1a408ba31b2d3f9fdcb373895484649a645d1845eec91b5bfdc5ad28c7824984482002dd4a8677"),
(sha384, 1024, "2c9436cd85df982911df60d54f2d41d81660cdb37e457daf", "6b5910575296437a75c04371c8623cf6", "6efb45067e00024beaa9fa763ef2c701527cd9eb5697f7f77475f2d36495058e3558893006ab0169e9a9f78481f6f06e9b005413856af89cd764beba0fff6ed4a077ffd36f966b633e058793320febf52b937554539096838873171933c2b7f864000be1b3a01ad6c4e66c3190bbfc90d7deb31e8857cf272cdd2caea730839e"),
(sha384, 1024, "04bac3eccc8730c441c12f050168643c3581c046067eb930", "6f75d4e7ec627f047589c588d20a8ae0", "64be249badec07779df8c40e3a75ebe7296f4c853e8c596d208f6c9cc7b41b75db28aa31a9199eabb750c28804739cbdabf81f2b9579c0e0bb3dbab77a0315ce1f7d4cad83e2cbd4258f132f3ccbe522da73ba0b389b6963d227c3aa61dbdde64517cd05599596dd9e73b85e0deede8a822821b4a27403116919f40f75cc7c42"),
(sha384, 1024, "684ac84d726909080f8d6bd89d8a744ced207c5bdb4bf866", "ae59a73e8b3c3b59f01fec8e7efadef9", "e312c7c168c69e3c0e0894c7a4b561cf8e38c3dfcbc90c8934edb8b16f7031cf595a093d6289a01fd977c0bf216c04edaa21230e82bd0f066a60180174df85482dd6353111da24bf979422e3fb7b34720310075abba72c5f0ac6bfd7c6af331532ce7b1d3b9628ab4502614f9e324177ad33f7257a4c1efcecefb83f446242e1"),
(sha384, 1024, "74a215aa43a7f59fac674d220c852e91a30e7ad05b1b7223", "8bd8cc5c429502d5ed0da3fe706a52d4", "3d836e700d223a088647eb9a323f7b7b19ad071818141182e216cd9644396b01d6b3d3e1fc2cefa2794bf7d9d27f10b0716ae3ec100e171cb6188c5a23da1b7500879b014b4878455b17f049060cb46c57c1b0670eb8cfa3b478ca0501ed5c258773b862f0eadb0991eb56a4f51aadb1287179bd7a366ac16c235d7b11d96048"),
(sha384, 1024, "5318d9e0ec5d6f82bae244f01e3e5281e954b924d1554fee", "c0537c7929f6efe8399c8089552214a9", "38083a961d8967e11096a99d36c198b3527dfbda74c2f4e9cfc7b5a115333d2be242b192df027ba4c1f732f1c26ae94b8cd3fa2ecd59df9be5baed7c479da001798a4a623ae01fe1b1feb83f436fc4b3268bd56b17579c0d7ad0df9296db3f57f26a7de0d64b04311c81d70fdec19cd8acf0e5a03b60059172475b104aaf92cb"),
(sha384, 1024, "d427c25cc0d5c499aa789cbd9a0f2a358596e0a586d6aaad", "b0db1a8f05b1ed0ac6594f882d61da82", "f800e7ed9cf7a632ceeda04ea75f6fd7efddcd96cf6ec03052cb4c71f52a61ea96d363f1d07704fe51765135624a55b64cefe6c7f7e653d6a404911a99ecd6f437a9e770b6c60601d6001165b37e6005548f454493429dce77ac3311f817a88f8b14a4a2bab4b2cb142f5154c9a23bf6818bcafad4b8d0fe50c1392b12196a62"),
(sha384, 1024, "fff1206cd5e2aff982c47d5dd31c2ce50e6718f4d2126427", "74b3285de80d0c1962b6c9c6dc9cd5bf", "d8b2cc9655a2cfa338e76cdf17258501b69a04057947c4083fd76bdfd73d48a6cb9e8538317bff5e829e006661e0ab53a9dd5ff210c8b59ff6ae64220bcab7c84facd792583c34177a867c69e117688bec10d134c003f112ca600eb6c514df0be5daa73bc9b4800403f79424ff3313b95d009ff423655774487cc1465731936b"),
(sha384, 1024, "75a43f6464c2954efd9558d2d9c76cfcafefec3f07fe14af", "6744c4a41d5bd7f4ca94ea488605c3d3", "5045a6252c9b6eb80debc67e0d11a028bf8e1f0b274d13aebcc7d565e1b73ed228c5f4195ebd1044aaf9a755c6945a729767f8f3697adb2941df0f449fdfca8f84abefc5011d4b968ad1f79b535bf124e3dcf131f8f894ee633a040c34a6470544497ae3d96c1e4bcdc5914d40c4a73f1e174b29bd5755d1aa0a3ddd3f9428d5"),
(sha384, 1024, "09807be0ca8c534a0e2b326a845054a5389c85a1d60f84a5", "43b0be9359d0bbecb75958d566decdd3", "a00e22994f134f1a0da919fa43a779314c5e706ab3fa4c1d72912cf1109b958a141075d206a7befe467efa85ab2d1a83d1a438bda7df009e1eaf66649920d9dfb4110a36575f034ad0a63344968dc0e171ea2972fda011f66e8bda6867eb769281af23488b5166c85289ad3a68407010ae6f62227a1c1d19a6f527c735dc145d"),
(sha512, 128, "87fc0d8c4477485bb574f5fcea264b30885dc8d90ad82782", None, "947665fbb9152153ef460238506a0245"),
(sha512, 128, "293e901c8f43178794a9792f98861732faa4677e72b8ce1e", None, "883e84f877b05a092ada456571c58cb9"),
(sha512, 128, "734315a823c278adb4517c952b0ae3f6fe2de6615b1c2650", None, "c8ee447ad8e7ff0a874e89b11616a824"),
(sha512, 128, "fece4214eb02a10d11dd7dffb0bd884e4aedbf705fa3726f", None, "2491f93f072adca1c051d800b5d82dec"),
(sha512, 128, "4ee79bcb0d621a7a0d42cd9a496b209dfd3f4276455139e0", None, "bdb3e1cf4414b0ba1829810defc94024"),
(sha512, 128, "18447afe05107a7729661bd1b23935b30983ff614631dec8", None, "1d1c68eabdfcfdd62a42d43a3e98c772"),
(sha512, 128, "c32dffc642ae400dfc21ade6adb936583999d5cf1379b783", None, "8a1abd901b090f808b2f1e355c6eb596"),
(sha512, 128, "57d4d684aa3543d6097bc7c0d0430527e1937b0f936ab479", None, "33f781afd506a4206b9b3af2371a67a4"),
(sha512, 128, "b7d969a749af87a02c0629c642bfc5e2e2aa10d015fde9ca", None, "dfbf12c462bc114997317b13c9cdda65"),
(sha512, 128, "fb03ba6b357d26ee18a22bdab14da74ca5727ed4b69a687b", None, "8dcdf450dd810e20c472d485a78a2d5f"),
(sha512, 1024, "00aa5bb79b33e389fa58ceadc047197f14e73712f452caa9fc4c9adb369348b81507392f1a86ddfdb7c4ff8231c4bd0f44e44a1b55b1404747a9e2e753f55ef05a2d", "e3b5b4c1b0d5cf1d2b3a2f9937895d31", "4463f869f3cc18769b52264b0112b5858f7ad32a5a2d96d8cffabf7fa733633d6e4dd2a599acceb3ea54a6217ce0b50eef4f6b40a5c30250a5a8eeee208002267089dbf351f3f5022aa9638bf1ee419dea9c4ff745a25ac27bda33ca08bd56dd1a59b4106cf2dbbc0ab2aa8e2efa7b17902d34276951ceccab87f9661c3e8816"),
(sha512, 1024, "009dcd6ba5c8c803ca21f9996ca5dd86047d4ddc150fddace1b1ebe996c2007e3ee907c8ff03b9ef766e8ceb4dedf7489e5162e2278c0185e4be381bec17dd992cf8", "1e60e51c11a538b0ea8990d69a4c6358", "4e55036a32f32fc965046fdfbf686c108e43a69f8fc1a64ff1bd77763f2eedc8bf277d78b4ce31243e1adbe2c2d5dd59b47503b5b90b54f9d7a9a5aea49c7f0283cb64c3849a1d157000fd41ef6c1d1a5b62734e7c9a20dcfb57f2da974933f57ee619d72898d0e93d9a4254aaddf73941d6269298b4d49c0ac64a33802fe8f2"),
(sha512, 1024, "01bbc44314f24db4d67a2a7fb5ca3f7a5022790f5875895d448050eda5611a2f39de48e394c5a3df26208eb01f804d0a1d68eece6b6fa96d6db895e133e129094f78", "433e3ee77d00e4a9634efd677e2ff21b", "f1255002293d5fbcf35ad0e532ae872171d11014616a2c52d7e5cb861b0251b9e505a77161c777bafc052b6525a6ecf34590605de72f13a1aff0a61a8a4a3364ebbe2f99224c13e043e497af8a26de749cd257e475b2f0e60e3b594901320a692a4af422f9636e4814b33f67d181a086265013b0d4efd9e1a94ea8a576afde66"),
(sha512, 1024, "01a33032a2bf6f8e9d6972dd339536c9e248ae9881844ff1bd04af48085be4ca1834f2a94ce1019dd9620d1e3a68203a5b291f40b5f8e3238a2a036312b89061cc60", "d3297ad6b9757d1f5a9d5b0e72176d74", "63565d1d3443620fba4218c97887ff40d6d68bf56b429c22018be5d91c318187ebe8a9399c5cc6c4a849288ab784d4340714ae3fdb426c4a83db9ce2ba8aea80d448e50ad543749b47bcaae519f7f00badd8d48296e81069104dcd293c605b08159ef2ef14c7833739d0414274136ae4db05ba4fa31b29c59de46d9be539525f"),
(sha512, 1024, "004b20a501776ea54cbdabffec2a664b7a93f8d67b17405a82bd9cbf3685a4659beb2deff1b6ecaa7ab187b6d4fd407f10db6992c65308410deb133be31a0de0c1c9", "fd5462cb37aa298e95f8e34bb49d85ca", "cafcbc117317661bf15277c2881e05e345c1720b0c1c4040c33fe4a3ecf8032802642d29828a077ca91b6fac216b7a06517740c7d633c279dd2115eb7a34fd337376247219f53da32df57070f47c2e0816710080d6492e1c3e8cac818c3cfca2a3ce5cf1515f066b1815d2d2f69fa3111a9e81570963b90a536da0376c12265b"),
(sha512, 1024, "01fb44335b437771777f14d44e5b634c18c7f570b935228fd3073e3cbde299dfb9f4d64ad720d30e875e8c6bbe181027459c9d3f92a276a38e22faf25f208576a63f", "2359d18657243d61963ceca3fa93587d", "1544e54cd293e533959bdd893337f01ef0c7685a4d8d403d438b0223a7e18330c312a0f16bd819f4359fdd74ae85cc603d35e3d9cba896177452c8dee5214066fca420c3ab522a245af215beb7de52ebb0bdd15d0596b8b763cf7e25610a53efa726b899a1d9727b25ec673ee91ff2111f03cf761a7880d69625e784becfd4e0"),
(sha512, 1024, "0109afa3904193690d3f2c49e42d08c8c5cd2ea907a0d699c876e418e303b485374c8d6cf5a32af1491b3ea8a3503692b4a0fd78f9b4082e2a6e72345db4532d749f", "7c19631d3cd65915fa4789cf7b1c0979", "fb60175568a66ef4202e110396663085fe2a9d6d2071e55d03c30ea499fee850c99c4e42a7227cca2eaf4d75e37dde205ae07260e84aeee6ef0819d98bd00d0ff5ba55994e7bf2a578baf2ee9aa862d94bf431fa14429010ebc30d7e602de726cdffacaeabc8541237fbc0c975abbf203c018c688ee354d07978654b90de9569"),
(sha512, 1024, "00632e165775f3c5b6e81d4042f809e904b8167687747638874b39ffce1993f46e8fc44e2a1c3df59563003bad3e25c85b61819e9addc0fdbe173dd4115c38f62ef6", "2bf0f18b7f21c4ec9c20b84c75f66b7c", "c324fed01b75c37fc96703031403d5cc6857dc7ffa48192d9a10d5c69dd6274ecd0eb9a278f9e6b616c27bbf2e3e016635b311940390c52c61a4f4b3383ca6046961dbd2455ff6a982e8269864edd3cc1b1053da7daf9699c61b05f1acca7b79e68db655fd526fdc392bd36dcaf1c5b2fafb8975e318070d4bb948829ac41bb6"),
(sha512, 1024, "0096172bf47d06d544ae98471490cf9e52ee59ea7a2208b33b26c52d4952bb8f41b2211d3f9ff32e77ca8cc906ba8d246ff266ddf1df8f53824ccb15b8fb39724703", "cf3a74ba86af42f1ae85477ead645583", "995d1ab8557dfeafcb347f8182583fa0ac5e6cb3912393592590989f38a0214f6cf7d6fbe23917b0966c6a870876de2a2c13a45fa7aa1715be137ed332e1ffc204ce4dcce33ece6dec7f3da61fa049780040e44142cc8a1e5121cf56b386f65b7c261a192f05e5fefae4221a602bc51c41ef175dc45fb7eab8642421b4f7e3e7"),
(sha512, 1024, "0037cd001a0ad87f35ddf58ab355d6144ba2ed0749a7435dab548ba0bfbe723c047e2396b4eef99653412a92c8db74bb5c03063f2eb0525ae87356750ae3676faa86", "eb17da8851c41c7ac6710b1c49f324f8", "829a28b81f9e95b5f306604067499c07d5944ca034ed130d513951f7143e4e162bad8adb2833e53b8235c293cd2a809659ac7f7e392cba6a543660e5d95070c0c9e6a9cdc38123e22da61bb4cbb6ad6d1a58a069e934fc231bd9fe39a24afcbf322ccea385f0418f3b01c1edd6e7124593a1cefe3e48fcd95daaf72cfd973c59"),
]
# fmt: on
for hf, length, z, shared_info, key_data in test_vectors:
result = ansi_x9_63_kdf(
bytes.fromhex(z),
length // 8,
hf,
None if shared_info is None else bytes.fromhex(shared_info),
)
assert result == bytes.fromhex(key_data)
| 122.020576 | 455 | 0.868976 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 23,527 | 0.793464 |
a661816e5916f0c7a9ee3d97c0dc846186965e97 | 4,728 | py | Python | dni/mlp.py | DingKe/pytorch_workplace | 4bc60a2c3640de522d0b72262667ba70391ba16e | [
"MIT"
] | 184 | 2017-05-23T12:06:08.000Z | 2021-12-21T09:09:51.000Z | dni/mlp.py | Ewenwan/pytorch_workplace | 0ef2e905596a0b0db11b4929564175e6f7ab7423 | [
"MIT"
] | 8 | 2017-08-14T11:40:20.000Z | 2019-01-16T13:14:32.000Z | dni/mlp.py | Ewenwan/pytorch_workplace | 0ef2e905596a0b0db11b4929564175e6f7ab7423 | [
"MIT"
] | 61 | 2017-04-29T09:53:47.000Z | 2021-02-19T02:13:14.000Z | import torch
import torch.nn as nn
import torchvision.datasets as dsets
import torchvision.transforms as transforms
from torch.autograd import Variable
# Hyper Parameters
input_size = 784
hidden_size = 256
dni_size = 1024
num_classes = 10
num_epochs = 50
batch_size = 500
learning_rate = 1e-3
use_cuda = torch.cuda.is_available()
# MNIST Dataset
train_dataset = dsets.MNIST(root='../data',
train=True,
transform=transforms.ToTensor(),
download=True)
test_dataset = dsets.MNIST(root='../data',
train=False,
transform=transforms.ToTensor())
# Data Loader (Input Pipeline)
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=batch_size,
shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
batch_size=batch_size,
shuffle=False)
class DNI(nn.Module):
def __init__(self, input_size, hidden_size):
super(DNI, self).__init__()
self.fc1 = nn.Linear(input_size, hidden_size)
self.bn1 = nn.BatchNorm1d(hidden_size)
self.act1 = nn.ReLU()
self.fc2 = nn.Linear(hidden_size, input_size)
def forward(self, x):
out = self.fc1(x)
out = self.bn1(out)
out = self.act1(out)
out = self.fc2(out)
return out
def reset_parameters(self):
super(DNI, self).reset_parameters()
for param in self.fc2.parameters():
param.data.zero_()
dni = DNI(hidden_size, dni_size)
class Net1(nn.Module):
def __init__(self, input_size, hidden_size):
super(Net1, self).__init__()
self.mlp = nn.Sequential(nn.Linear(input_size, hidden_size),
nn.BatchNorm1d(hidden_size),
nn.ReLU())
def forward(self, x):
return self.mlp.forward(x)
net1 = Net1(input_size, hidden_size)
class Net2(nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super(Net2, self).__init__()
self.mlp = nn.Sequential()
self.mlp.add_module('fc1', nn.Linear(input_size, hidden_size))
self.mlp.add_module('bn1', nn.BatchNorm1d(hidden_size))
self.mlp.add_module('act1', nn.ReLU())
self.mlp.add_module('fc', nn.Linear(hidden_size, num_classes))
def forward(self, x):
return self.mlp.forward(x)
net2 = Net2(hidden_size, hidden_size, num_classes)
# Loss
xent = nn.CrossEntropyLoss()
mse = nn.MSELoss()
# Optimizers
opt_net1 = torch.optim.Adam(net1.parameters(), lr=learning_rate)
opt_net2 = torch.optim.Adam(net2.parameters(), lr=learning_rate)
opt_dni = torch.optim.Adam(dni.parameters(), lr=learning_rate)
if use_cuda:
net1.cuda()
net2.cuda()
dni.cuda()
# Train the Model
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(train_loader):
# Convert torch tensor to Variable
if use_cuda:
images = images.cuda()
labels = labels.cuda()
images = Variable(images.view(-1, 28 * 28))
labels = Variable(labels)
# Forward + Backward + Optimize
opt_net1.zero_grad() # zero the gradient buffer
opt_net2.zero_grad() # zero the gradient buffer
opt_dni.zero_grad() # zero the gradient buffer
# Forward, Stage1
h = net1(images)
h1 = Variable(h.data, requires_grad=True)
h2 = Variable(h.data, requires_grad=False)
# Forward, Stage2
outputs = net2(h1)
# Backward
loss = xent(outputs, labels)
loss.backward()
# Synthetic gradient and backward
grad = dni(h2)
h.backward(grad)
# regress
regress_loss = mse(grad, Variable(h1.grad.data))
regress_loss.backward()
# optimize
opt_net1.step()
opt_net2.step()
opt_dni.step()
if (i + 1) % 100 == 0:
print ('Epoch [%d/%d], Step [%d/%d], Loss: %.4f'
% (epoch + 1, num_epochs, i + 1, len(train_dataset) // batch_size, loss.data[0]))
# Test the Model
correct = 0
total = 0
for images, labels in test_loader:
if use_cuda:
images = images.cuda()
labels = labels.cuda()
images = Variable(images.view(-1, 28 * 28))
outputs = net2(net1(images))
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum()
print('Accuracy of the network on the 10000 test images: %d %%' %
(100 * correct / total))
| 28.311377 | 100 | 0.59412 | 1,417 | 0.299704 | 0 | 0 | 0 | 0 | 0 | 0 | 489 | 0.103426 |
a66300393de948ba2295823f544984ec41706a76 | 1,505 | py | Python | dorkbot_extract_dll.py | dyussekeyev/dorkbot-c2-extractor | fad5faa0ff71d4df9fbae95155a046466c294051 | [
"MIT"
] | null | null | null | dorkbot_extract_dll.py | dyussekeyev/dorkbot-c2-extractor | fad5faa0ff71d4df9fbae95155a046466c294051 | [
"MIT"
] | null | null | null | dorkbot_extract_dll.py | dyussekeyev/dorkbot-c2-extractor | fad5faa0ff71d4df9fbae95155a046466c294051 | [
"MIT"
] | null | null | null | import pefile
import base64
from Crypto.Cipher import ARC4
datas = list()
def get_offset(resource_dir):
if hasattr(resource_dir, 'entries'):
for entry in resource_dir.entries:
if hasattr(entry, 'directory'):
get_offset(entry.directory)
if hasattr(entry, 'data'):
data_rva = entry.data.struct.OffsetToData
data_size = entry.data.struct.Size
datas.append(pe.get_memory_mapped_image()[data_rva:data_rva+data_size])
def isBase64(sb):
try:
if isinstance(sb, str):
sb_bytes = bytes(sb, 'ascii')
elif isinstance(sb, bytes):
sb_bytes = sb
else:
raise ValueError("Argument must be string or bytes")
return base64.b64encode(base64.b64decode(sb_bytes)) == sb_bytes
except Exception:
return False
pe = pefile.PE("5C55FC257423ACD1AE6382D88B1EE306.bin")
get_offset(pe.DIRECTORY_ENTRY_RESOURCE)
count = 0
for x in datas:
if not isBase64(x):
exit(1)
decoded = base64.b64decode(x)
size_dll = int.from_bytes(decoded[0:4], byteorder='little')
size_bytes = len(decoded) - 32
crypto = ARC4.new(decoded[8:13])
decrypted = crypto.decrypt(decoded[32:32 + size_bytes])
decrypted_dll = decrypted[size_bytes - size_dll:size_bytes]
f = open('decrypted' + str(count) + '.bin', 'wb')
f.write(decrypted_dll)
f.close()
count = count + 1
| 29.509804 | 88 | 0.613953 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 134 | 0.089037 |
a66302e7cabfe8760a8aa9340aba6ceda0934422 | 6,884 | py | Python | train-xception.py | jGsch/kaggle-dfdc | c074e4fcc2b88e478f306ddfbba7d7ceafcba7d6 | [
"MIT"
] | 124 | 2020-04-29T01:56:08.000Z | 2022-03-04T12:40:26.000Z | train-xception.py | Yolanda-Wang/Kaggle-DFDC-2nd | 72e94d8b2eb94b3aba5ac4de646b39847ac6fb19 | [
"MIT"
] | 14 | 2020-05-09T07:12:15.000Z | 2022-03-12T00:26:44.000Z | train-xception.py | Yolanda-Wang/Kaggle-DFDC-2nd | 72e94d8b2eb94b3aba5ac4de646b39847ac6fb19 | [
"MIT"
] | 38 | 2020-04-29T03:35:56.000Z | 2022-01-14T15:29:47.000Z | import os
import csv
import shutil
import random
from PIL import Image
import numpy as np
import torch
from torch import nn, optim
from torch.utils.data import Dataset, DataLoader
import xception_conf as config
from model_def import xception
from augmentation_utils import train_transform, val_transform
def save_checkpoint(path, state_dict, epoch=0, arch="", acc1=0):
new_state_dict = {}
for k, v in state_dict.items():
if k.startswith("module."):
k = k[7:]
if torch.is_tensor(v):
v = v.cpu()
new_state_dict[k] = v
torch.save({
"epoch": epoch,
"arch": arch,
"acc1": acc1,
"state_dict": new_state_dict,
}, path)
class DFDCDataset(Dataset):
def __init__(self, data_csv, required_set, data_root="",
ratio=(0.25, 0.05), stable=False, transform=None):
video_info = []
data_list = []
with open(data_csv) as fin:
reader = csv.DictReader(fin)
for row in reader:
if row["set_name"] == required_set:
label = int(row["is_fake"])
n_frame = int(row["n_frame"])
select_frame = round(n_frame * ratio[label])
for sample_idx in range(select_frame):
data_list.append((len(video_info), sample_idx))
video_info.append({
"name": row["name"],
"label": label,
"n_frame": n_frame,
"select_frame": select_frame,
})
self.stable = stable
self.data_root = data_root
self.video_info = video_info
self.data_list = data_list
self.transform = transform
def __getitem__(self, index):
video_idx, sample_idx = self.data_list[index]
info = self.video_info[video_idx]
if self.stable:
frame_idx = info["n_frame"] * sample_idx // info["select_frame"]
else:
frame_idx = random.randint(0, info["n_frame"] - 1)
image_path = os.path.join(self.data_root, info["name"],
"%03d.png" % frame_idx)
try:
img = Image.open(image_path).convert("RGB")
except OSError:
img = np.random.randint(0, 255, (320, 320, 3), dtype=np.uint8)
if self.transform is not None:
# img = self.transform(img)
result = self.transform(image=np.array(img))
img = result["image"]
return img, info["label"]
def __len__(self):
return len(self.data_list)
def main():
torch.backends.cudnn.benchmark = True
train_dataset = DFDCDataset(config.data_list, "train", config.data_root,
transform=train_transform)
val_dataset = DFDCDataset(config.data_list, "val", config.data_root,
transform=val_transform, stable=True)
kwargs = dict(batch_size=config.batch_size, num_workers=config.num_workers,
shuffle=True, pin_memory=True)
train_loader = DataLoader(train_dataset, **kwargs)
val_loader = DataLoader(val_dataset, **kwargs)
# Model initialization
model = xception(num_classes=2, pretrained=None)
if hasattr(config, "resume") and os.path.isfile(config.resume):
ckpt = torch.load(config.resume, map_location="cpu")
start_epoch = ckpt.get("epoch", 0)
best_acc = ckpt.get("acc1", 0.0)
model.load_state_dict(ckpt["state_dict"])
else:
start_epoch = 0
best_acc = 0.0
model = model.cuda()
model = nn.DataParallel(model)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(),
0.01, momentum=0.9, weight_decay=1e-4)
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=2, gamma=0.2)
os.makedirs(config.save_dir, exist_ok=True)
for epoch in range(config.n_epoches):
if epoch < start_epoch:
scheduler.step()
continue
print("Epoch {}".format(epoch + 1))
model.train()
loss_record = []
acc_record = []
for count, (inputs, labels) in enumerate(train_loader):
inputs = inputs.cuda(non_blocking=True)
labels = labels.cuda(non_blocking=True)
outputs = model(inputs)
loss = criterion(outputs, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
iter_loss = loss.item()
loss_record.append(iter_loss)
preds = torch.argmax(outputs.data, 1)
iter_acc = torch.sum(preds == labels).item() / len(preds)
acc_record.append(iter_acc)
if count and count % 100 == 0:
print("T-Iter %d: loss=%.4f, acc=%.4f"
% (count, iter_loss, iter_acc))
epoch_loss = np.mean(loss_record)
epoch_acc = np.mean(acc_record)
print("Training: loss=%.4f, acc=%.4f" % (epoch_loss, epoch_acc))
model.eval()
loss_record = []
acc_record = []
with torch.no_grad():
for count, (inputs, labels) in enumerate(val_loader):
inputs = inputs.cuda(non_blocking=True)
labels = labels.cuda(non_blocking=True)
outputs = model(inputs)
preds = torch.argmax(outputs, 1)
loss = criterion(outputs, labels)
iter_loss = loss.item()
loss_record.append(iter_loss)
preds = torch.argmax(outputs.data, 1)
iter_acc = torch.sum(preds == labels).item() / len(preds)
acc_record.append(iter_acc)
if count and count % 100 == 0:
print("V-Iter %d: loss=%.4f, acc=%.4f"
% (count, iter_loss, iter_acc))
epoch_loss = np.mean(loss_record)
epoch_acc = np.mean(acc_record)
print("Validation: loss=%.4f, acc=%.4f" % (epoch_loss, epoch_acc))
scheduler.step()
ckpt_path = os.path.join(config.save_dir, "ckpt-%d.pth" % epoch)
save_checkpoint(
ckpt_path,
model.state_dict(),
epoch=epoch + 1,
acc1=epoch_acc)
if epoch_acc > best_acc:
print("Best accuracy!")
shutil.copy(ckpt_path,
os.path.join(config.save_dir, "best.pth"))
best_acc = epoch_acc
print()
if __name__ == "__main__":
main()
| 32.319249 | 80 | 0.534863 | 2,000 | 0.290529 | 0 | 0 | 0 | 0 | 0 | 0 | 469 | 0.068129 |
a66403d16de1416c241fb45625d1702808f5e9a1 | 381 | py | Python | katas/beta/identifying_top_users_and_their_corresponding_purchases.py | the-zebulan/CodeWars | 1eafd1247d60955a5dfb63e4882e8ce86019f43a | [
"MIT"
] | 40 | 2016-03-09T12:26:20.000Z | 2022-03-23T08:44:51.000Z | katas/beta/identifying_top_users_and_their_corresponding_purchases.py | akalynych/CodeWars | 1eafd1247d60955a5dfb63e4882e8ce86019f43a | [
"MIT"
] | null | null | null | katas/beta/identifying_top_users_and_their_corresponding_purchases.py | akalynych/CodeWars | 1eafd1247d60955a5dfb63e4882e8ce86019f43a | [
"MIT"
] | 36 | 2016-11-07T19:59:58.000Z | 2022-03-31T11:18:27.000Z | from collections import Counter
from itertools import chain
def id_best_users(*args):
best_users = set.intersection(*(set(a) for a in args))
cnt = Counter(chain(*args))
users = {}
for k, v in cnt.iteritems():
if k in best_users:
users.setdefault(v, []).append(k)
return [[k, sorted(v)] for k, v in sorted(users.iteritems(), reverse=True)]
| 29.307692 | 79 | 0.64042 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
a6645500bd9415fcb00c5c1063997449655e7a5b | 464 | py | Python | supriya/ugens/LFNoise1.py | deeuu/supriya | 14fcb5316eccb4dafbe498932ceff56e1abb9d27 | [
"MIT"
] | null | null | null | supriya/ugens/LFNoise1.py | deeuu/supriya | 14fcb5316eccb4dafbe498932ceff56e1abb9d27 | [
"MIT"
] | null | null | null | supriya/ugens/LFNoise1.py | deeuu/supriya | 14fcb5316eccb4dafbe498932ceff56e1abb9d27 | [
"MIT"
] | null | null | null | import collections
from supriya import CalculationRate
from supriya.synthdefs import UGen
class LFNoise1(UGen):
"""
A ramp noise generator.
::
>>> supriya.ugens.LFNoise1.ar()
LFNoise1.ar()
"""
### CLASS VARIABLES ###
__documentation_section__ = "Noise UGens"
_ordered_input_names = collections.OrderedDict([("frequency", 500.0)])
_valid_calculation_rates = (CalculationRate.AUDIO, CalculationRate.CONTROL)
| 18.56 | 79 | 0.68319 | 370 | 0.797414 | 0 | 0 | 0 | 0 | 0 | 0 | 158 | 0.340517 |
a66552cce516414aec7adafc8e30867833d3cf13 | 859 | py | Python | Hackerrank/Max Array Sum/Max Array Sum.py | rahil-1407/Data-Structure-and-Algorithms | ea3eb9849aeb2716ef5812a0b5621a28120b1880 | [
"MIT"
] | 51 | 2021-01-14T04:05:55.000Z | 2022-01-25T11:25:37.000Z | Hackerrank/Max Array Sum/Max Array Sum.py | rahil-1407/Data-Structure-and-Algorithms | ea3eb9849aeb2716ef5812a0b5621a28120b1880 | [
"MIT"
] | 638 | 2020-12-27T18:49:53.000Z | 2021-11-21T05:22:52.000Z | Hackerrank/Max Array Sum/Max Array Sum.py | rahil-1407/Data-Structure-and-Algorithms | ea3eb9849aeb2716ef5812a0b5621a28120b1880 | [
"MIT"
] | 124 | 2021-01-30T06:40:20.000Z | 2021-11-21T15:14:40.000Z | """
Given an array of integers, find the subset of non-adjacent elements with the maximum sum.
Calculate the sum of that subset. It is possible that the maximum sum is , the case when all elements are negative.
"""
def maxSubsetSum(arr):
n = len(arr) # n = length of the array
dp = [0]*n # create a dp array of length n & initialize its values to 0
dp[0] = arr[0] # base
dp[1] = max(arr[1], dp[0])
for i in range(2,n):
# Because of the condition. No two adjacent elements can be picked.
# Therefore we can either take one and then skip one, or skip one and run the subroutine.
dp[i] = max(arr[i], dp[i-1], arr[i] + dp[i-2])
# in the dp, we store the max sum for the subarray up till the length of the subarray.
# Hence simply return the last item in this to get the answer
return dp[-1]
| 40.904762 | 115 | 0.649593 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 612 | 0.712456 |
a6661363f224a1485632726708cd49bda0d4229c | 80 | py | Python | landlab/components/lake_fill/__init__.py | saraahsimon/landlab | 1cf809b685efbccaaa149b5899a600c3ccedf30f | [
"MIT"
] | null | null | null | landlab/components/lake_fill/__init__.py | saraahsimon/landlab | 1cf809b685efbccaaa149b5899a600c3ccedf30f | [
"MIT"
] | null | null | null | landlab/components/lake_fill/__init__.py | saraahsimon/landlab | 1cf809b685efbccaaa149b5899a600c3ccedf30f | [
"MIT"
] | null | null | null | from .lake_fill_barnes import LakeMapperBarnes
__all__ = ["LakeMapperBarnes"]
| 16 | 46 | 0.8125 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 18 | 0.225 |
a66938ab29c637408c050d9373a90dc5de5acdf6 | 4,995 | py | Python | corehq/apps/hqadmin/management/commands/static_analysis.py | andyasne/commcare-hq | c59a24e57bdd4d2536493f9ecdcc9906f4ae1b88 | [
"BSD-3-Clause"
] | 471 | 2015-01-10T02:55:01.000Z | 2022-03-29T18:07:18.000Z | corehq/apps/hqadmin/management/commands/static_analysis.py | andyasne/commcare-hq | c59a24e57bdd4d2536493f9ecdcc9906f4ae1b88 | [
"BSD-3-Clause"
] | 14,354 | 2015-01-01T07:38:23.000Z | 2022-03-31T20:55:14.000Z | corehq/apps/hqadmin/management/commands/static_analysis.py | andyasne/commcare-hq | c59a24e57bdd4d2536493f9ecdcc9906f4ae1b88 | [
"BSD-3-Clause"
] | 175 | 2015-01-06T07:16:47.000Z | 2022-03-29T13:27:01.000Z | import os
import re
import subprocess
from collections import Counter
from django.conf import settings
from django.core.management.base import BaseCommand
import datadog
from dimagi.ext.couchdbkit import Document
from corehq.feature_previews import all_previews
from corehq.toggles import all_toggles
class DatadogLogger:
def __init__(self, stdout):
self.stdout = stdout
self.datadog = os.environ.get("TRAVIS_EVENT_TYPE") == 'cron'
if self.datadog:
api_key = os.environ.get("DATADOG_API_KEY")
app_key = os.environ.get("DATADOG_APP_KEY")
assert api_key and app_key, "DATADOG_API_KEY and DATADOG_APP_KEY must both be set"
datadog.initialize(api_key=api_key, app_key=app_key)
self.metrics = []
def log(self, metric, value, tags=None):
self.stdout.write(f"{metric}: {value} {tags or ''}")
if self.datadog:
self.metrics.append({
'metric': metric,
'points': value,
'type': "gauge",
'host': "travis-ci.org",
'tags': [
"environment:travis",
f"travis_build:{os.environ.get('TRAVIS_BUILD_ID')}",
f"travis_number:{os.environ.get('TRAVIS_BUILD_NUMBER')}",
f"travis_job_number:{os.environ.get('TRAVIS_JOB_NUMBER')}",
] + (tags or []),
})
def send_all(self):
if self.datadog:
datadog.api.Metric.send(self.metrics)
self.metrics = []
class Command(BaseCommand):
help = ("Display a variety of code-quality metrics. This is run on every travis "
"build, but only submitted to datadog during the daily cron job.")
def handle(self, **options):
self.stdout.write("----------> Begin Static Analysis <----------")
self.logger = DatadogLogger(self.stdout)
self.show_couch_model_count()
self.show_custom_modules()
self.show_js_dependencies()
self.show_toggles()
self.show_complexity()
self.logger.send_all()
self.stdout.write("----------> End Static Analysis <----------")
def show_couch_model_count(self):
def all_subclasses(cls):
return set(cls.__subclasses__()).union([
s for c in cls.__subclasses__() for s in all_subclasses(c)
])
model_count = len(all_subclasses(Document))
self.logger.log("commcare.static_analysis.couch_model_count", model_count)
def show_custom_modules(self):
custom_module_count = len(set(settings.DOMAIN_MODULE_MAP.values()))
custom_domain_count = len(settings.DOMAIN_MODULE_MAP)
self.logger.log("commcare.static_analysis.custom_module_count", custom_module_count)
self.logger.log("commcare.static_analysis.custom_domain_count", custom_domain_count)
def show_js_dependencies(self):
proc = subprocess.Popen(["./scripts/codechecks/hqDefine.sh", "static-analysis"], stdout=subprocess.PIPE)
output = proc.communicate()[0].strip().decode("utf-8")
(step1, step2, step3) = output.split(" ")
self.logger.log("commcare.static_analysis.hqdefine_file_count", int(step1), tags=[
'status:unmigrated',
])
self.logger.log("commcare.static_analysis.hqdefine_file_count", int(step2), tags=[
'status:hqdefine_only',
])
self.logger.log("commcare.static_analysis.requirejs_file_count", int(step3), tags=[
'status:migrated',
])
def show_toggles(self):
counts = Counter(t.tag.name for t in all_toggles() + all_previews())
for tag, count in counts.items():
self.logger.log("commcare.static_analysis.toggle_count", count, [f"toggle_tag:{tag}"])
def show_complexity(self):
# We can use `--json` for more granularity, but it doesn't provide a summary
output = subprocess.run([
"radon", "cc", ".",
"--min=C",
"--total-average",
"--exclude=node_modules/*,staticfiles/*",
], stdout=subprocess.PIPE).stdout.decode('utf-8').strip()
raw_blocks, raw_complexity = output.split('\n')[-2:]
blocks_pattern = r'^(\d+) blocks \(classes, functions, methods\) analyzed.$'
blocks = int(re.match(blocks_pattern, raw_blocks).group(1))
self.logger.log("commcare.static_analysis.code_blocks", blocks)
complexity_pattern = r'^Average complexity: A \(([\d.]+)\)$'
complexity = round(float(re.match(complexity_pattern, raw_complexity).group(1)), 3)
self.logger.log("commcare.static_analysis.avg_complexity", complexity)
for grade in ["C", "D", "E", "F"]:
count = len(re.findall(f" - {grade}\n", output))
self.logger.log(
"commcare.static_analysis.complex_block_count",
count,
tags=[f"complexity_grade:{grade}"],
)
| 40.282258 | 112 | 0.614815 | 4,684 | 0.937738 | 0 | 0 | 0 | 0 | 0 | 0 | 1,513 | 0.302903 |
a66e625e6d85f06cff78fbfdfb9830d9ca9c326c | 3,056 | py | Python | dsample.py | her/dsample | 4fdbda0c7f3334266589ade8de4503b656cd2cdd | [
"MIT"
] | 3 | 2018-04-26T22:53:00.000Z | 2019-04-04T12:20:43.000Z | dsample.py | her/dsample | 4fdbda0c7f3334266589ade8de4503b656cd2cdd | [
"MIT"
] | null | null | null | dsample.py | her/dsample | 4fdbda0c7f3334266589ade8de4503b656cd2cdd | [
"MIT"
] | null | null | null | import argparse
import cv2
class DSample:
SUPPORTED_FORMATS = (
".bmp",
".dib",
".jpeg",
".jpg",
".jpe",
".jp2",
".png",
".pbm",
".pgm",
".ppm",
".sr",
".ras",
".tif",
".tiff",
)
def __init__(self, *args_dict, **kwargs):
for dictionary in args_dict:
for key in dictionary:
setattr(self, key, dictionary[key])
for key in kwargs:
setattr(self, key, kwargs[key])
self.image()
if self.gaussian_kernel:
self.blur()
self.dimensions()
self.scale_factor()
self.sample()
# DSample.image
def image(self):
"""Read image data"""
self.image = cv2.imread(self.filename)
# DSample.blur
def blur(self):
"""Apply Gaussian Blur"""
self.image = cv2.GaussianBlur(
self.image,
ksize=(0, 0),
sigmaX=1,
sigmaY=1
)
# DSample.dimensions
def dimensions(self):
"""Set image dimensions"""
self.dimensions = (self.image.shape[1], self.image.shape[0])
# DSample.scale_factor
def scale_factor(self):
"""Factor for downsample, 2x, 3x, 4x"""
scale = {
'2': (0.5, 0.5),
'3': (0.33, 0.33),
'4': (0.25, 0.25),
'5': (0.2, 0.2),
'6': (0.12, 0.12),
}
self.scale_factor = scale.get(self.downsample, None)
# DSample.sample
def sample(self):
"""Downsample the image."""
fx, fy = self.scale_factor or (1, 1)
self.d_sample = cv2.resize(
self.image,
(0, 0),
fx=fx,
fy=fy,
interpolation=cv2.INTER_CUBIC
)
self.p_sample = cv2.resize(
self.d_sample,
self.dimensions,
interpolation=cv2.INTER_CUBIC
)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='downsample image using bicubic interpolation',
)
parser.add_argument(
"filename",
help="input filename to downsample"
)
parser.add_argument(
"-o",
"--output",
help="output filename for downsampled image"
)
parser.add_argument(
"-d",
"--downsample",
metavar='n',
help="downsample by a factor of 2, 3, 4, 5, 6"
)
parser.add_argument(
"-g",
"--gaussian-kernel",
help="apply a gaussian kernel, effective in reducing gaussian noise",
action="store_true"
)
parser.add_argument(
"-s",
"--save-dimensions",
help="downsampled image dimensions are the same as input dimensions",
action="store_true"
)
args = parser.parse_args()
dsample = DSample(**vars(args))
if dsample.save_dimensions:
cv2.imwrite(dsample.output, dsample.p_sample)
else:
cv2.imwrite(dsample.output, dsample.d_sample)
| 22.977444 | 77 | 0.508181 | 1,943 | 0.635798 | 0 | 0 | 0 | 0 | 0 | 0 | 732 | 0.239529 |
a66eed49a3b39d2e0d734ff021df1302bfabe786 | 1,598 | py | Python | tests/test_times.py | Invarato/sort_in_disk_project | 8e725e683999aa6cf9db52711309b6a58099c3e2 | [
"MIT"
] | 3 | 2020-11-12T16:59:04.000Z | 2021-12-03T18:57:27.000Z | tests/test_times.py | Invarato/sort_in_disk_project | 8e725e683999aa6cf9db52711309b6a58099c3e2 | [
"MIT"
] | null | null | null | tests/test_times.py | Invarato/sort_in_disk_project | 8e725e683999aa6cf9db52711309b6a58099c3e2 | [
"MIT"
] | 1 | 2021-04-11T11:21:42.000Z | 2021-04-11T11:21:42.000Z | # -*- coding: utf-8 -*-
#
# @autor: Ramón Invarato Menéndez
# @version 1.0
from datetime import datetime
"""
Several tests
"""
count = 20000000
if __name__ == "__main__":
start = datetime.now()
print("[if] start: {}".format(start))
val = True
for _ in range(1, count):
if val:
v = "aaa|bbb".split("|")
else:
v = "ccc|ddd".split("|")
finish = datetime.now()
print("[if] finish: {} | diff finish-start: {}".format(finish, finish-start))
# ===============================================
start = datetime.now()
print("[function] start: {}".format(start))
def mi_func():
"aaa|bbb".split("|")
for _ in range(1, count):
mi_func()
finish = datetime.now()
print("[function] finish: {} | diff finish-start: {}".format(finish, finish-start))
# ===============================================
start = datetime.now()
print("[function arg] start: {}".format(start))
def mi_func(ar):
del ar
"aaa|bbb".split("|")
for _ in range(1, count):
mi_func("ccc")
finish = datetime.now()
print("[function arg] finish: {} | diff finish-start: {}".format(finish, finish-start))
# ===============================================
start = datetime.now()
print("[function return] start: {}".format(start))
def mi_func(ar):
return "aaa|bbb".split("|")
for _ in range(1, count):
mi_func("ccc")
finish = datetime.now()
print("[function return] finish: {} | diff finish-start: {}".format(finish, finish-start)) | 23.850746 | 94 | 0.506258 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 607 | 0.379375 |
a66ef729e29a7b137d77c4428bb5f454b35d79aa | 330 | py | Python | backend/src/controllers/util/time_util.py | tmdt-buw/gideon-ts | b839672fcc19f13562f6da23e6407fff0b18d3ec | [
"MIT"
] | null | null | null | backend/src/controllers/util/time_util.py | tmdt-buw/gideon-ts | b839672fcc19f13562f6da23e6407fff0b18d3ec | [
"MIT"
] | null | null | null | backend/src/controllers/util/time_util.py | tmdt-buw/gideon-ts | b839672fcc19f13562f6da23e6407fff0b18d3ec | [
"MIT"
] | null | null | null | import datetime
import datetime as dt
import pytz
def current_time():
return dt.datetime.now().strftime("%H:%M:%S")
def time_string_to_js_timestamp(time: datetime) -> int:
# js need * 1000 because of different standards
timezone = pytz.timezone("UTC")
return round(timezone.localize(time).timestamp() * 1000)
| 22 | 60 | 0.715152 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 62 | 0.187879 |
a66ef816916b38aed54eb96c747c5ba0afcaba49 | 668 | py | Python | django_auto_model/tests/utils/test_get_now.py | dipasqualew/django-auto-model | 958063b2df99e95f043b06f6a6df02fe8ccfb5de | [
"MIT"
] | null | null | null | django_auto_model/tests/utils/test_get_now.py | dipasqualew/django-auto-model | 958063b2df99e95f043b06f6a6df02fe8ccfb5de | [
"MIT"
] | 23 | 2020-10-29T07:02:00.000Z | 2021-08-02T06:03:29.000Z | django_auto_model/tests/utils/test_get_now.py | rolafium/Django-AutoModel | 958063b2df99e95f043b06f6a6df02fe8ccfb5de | [
"MIT"
] | null | null | null | """
Tests for snakelize
module: django_auto_model.utils
"""
import datetime
from django_auto_model.utils import get_now
def test_is_datetime():
"""Should be a datetime instance"""
now = get_now()
assert isinstance(now, datetime.datetime)
def test_value_is_close_to_now():
"""Should be close enough to the test execution time"""
before = datetime.datetime.now()
now = get_now()
after = datetime.datetime.now()
assert now >= before
assert now <= after
def test_objects_are_not_singleton():
"""Different calls to the function return different instances"""
now1 = get_now()
now2 = get_now()
assert now1 is not now2
| 23.857143 | 68 | 0.703593 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 213 | 0.318862 |
a66f14c5e01cc8010c14109afa24170d23c38ccd | 2,216 | py | Python | setup.py | cid-chan/vsutil | 513576abd4bb08605f1f91ad32780ba16c7f3fea | [
"MIT"
] | 25 | 2019-03-05T21:43:41.000Z | 2022-03-17T06:18:57.000Z | setup.py | cid-chan/vsutil | 513576abd4bb08605f1f91ad32780ba16c7f3fea | [
"MIT"
] | 42 | 2019-03-05T21:49:45.000Z | 2022-03-04T17:33:21.000Z | setup.py | cid-chan/vsutil | 513576abd4bb08605f1f91ad32780ba16c7f3fea | [
"MIT"
] | 18 | 2019-03-05T20:49:49.000Z | 2022-01-12T14:24:50.000Z | from setuptools import setup, find_packages
from setuptools.command.test import test
from distutils.util import convert_path
# We can't import the submodule normally as that would "run" the main module
# code while the setup script is meant to *build* the module.
# Besides preventing a whole possible mess of issues with an un-built package,
# this also prevents the vapoursynth import which breaks the docs on RTD.
# convert_path is used here because according to the distutils docs:
# '...filenames in the setup script are always supplied in Unix
# style, and have to be converted to the local convention before we can
# actually use them in the filesystem.'
meta = {}
exec(open(convert_path('vsutil/_metadata.py')).read(), meta)
class DiscoverTest(test):
def finalize_options(self):
test.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import os
import unittest
path = os.path.join(os.path.dirname(__file__), "tests")
runner = unittest.TextTestRunner(verbosity=2)
suite = unittest.TestLoader().discover(path, pattern="test_*.py")
runner.run(suite)
setup(
name='vsutil',
version=meta['__version__'],
packages=find_packages(exclude=['tests']),
package_data={
'vsutil': ['py.typed']
},
url='https://encode.moe/vsutil',
license='MIT',
author=meta['__author__'].split()[0],
author_email=meta['__author__'].split()[1][1:-1],
description='A collection of general-purpose Vapoursynth functions to be reused in modules and scripts.',
install_requires=[
"vapoursynth"
],
cmdclass={
'test': DiscoverTest
},
python_requires='>=3.8',
project_urls={
'Documentation': 'http://vsutil.encode.moe/en/latest/',
'Source': 'https://github.com/Irrational-Encoding-Wizardry/vsutil',
'Tracker': 'https://github.com/Irrational-Encoding-Wizardry/vsutil/issues',
},
keywords='encoding vapoursynth video',
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Topic :: Multimedia :: Video",
"Typing :: Typed",
],
)
| 33.074627 | 109 | 0.670126 | 439 | 0.198105 | 0 | 0 | 0 | 0 | 0 | 0 | 1,123 | 0.506769 |
a66f5311e18930dba9aee6d5a079e0eb27871fa5 | 482 | py | Python | duffel_api/api/booking/seat_maps.py | duffelhq/duffel-api-python | 583703f34e345ac8fa185ca26441f9168d1b0dac | [
"MIT"
] | 2 | 2022-02-26T22:14:48.000Z | 2022-03-10T10:04:11.000Z | duffel_api/api/booking/seat_maps.py | duffelhq/duffel-api-python | 583703f34e345ac8fa185ca26441f9168d1b0dac | [
"MIT"
] | 29 | 2022-01-04T12:40:54.000Z | 2022-03-31T23:26:54.000Z | duffel_api/api/booking/seat_maps.py | duffelhq/duffel-api-python | 583703f34e345ac8fa185ca26441f9168d1b0dac | [
"MIT"
] | null | null | null | from ...http_client import HttpClient
from ...models import SeatMap
class SeatMapClient(HttpClient):
"""Client to interact with Seat Maps"""
def __init__(self, **kwargs):
self._url = "/air/seat_maps"
super().__init__(**kwargs)
def get(self, offer_id):
"""GET /air/seat_maps"""
res = self.do_get(self._url, query_params={"offer_id": offer_id})
if res is not None:
return [SeatMap.from_json(m) for m in res["data"]]
| 28.352941 | 73 | 0.626556 | 411 | 0.852697 | 0 | 0 | 0 | 0 | 0 | 0 | 95 | 0.197095 |
a66f853bf9f33f87146ba3858b82466747a4ba7f | 133 | py | Python | nicos_mlz/refsans/setups/elements/alphai.py | jkrueger1/nicos | 5f4ce66c312dedd78995f9d91e8a6e3c891b262b | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 12 | 2019-11-06T15:40:36.000Z | 2022-01-01T16:23:00.000Z | nicos_mlz/refsans/setups/elements/alphai.py | jkrueger1/nicos | 5f4ce66c312dedd78995f9d91e8a6e3c891b262b | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 91 | 2020-08-18T09:20:26.000Z | 2022-02-01T11:07:14.000Z | nicos_mlz/refsans/setups/elements/alphai.py | jkrueger1/nicos | 5f4ce66c312dedd78995f9d91e8a6e3c891b262b | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 6 | 2020-01-11T10:52:30.000Z | 2022-02-25T12:35:23.000Z | description = 'Alphai alias device'
group = 'lowlevel'
devices = dict(
alphai = device('nicos.devices.generic.DeviceAlias'),
)
| 16.625 | 57 | 0.706767 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 66 | 0.496241 |
a66fb86f66ae53aa1b1feda298a87c0ab06d20d0 | 268 | py | Python | src/core/migrations/0055_merge_20190305_1616.py | metabolism-of-cities/ARCHIVED-metabolism-of-cities-platform-v3 | c754d3b1b401906a21640b8eacb6b724a448b31c | [
"MIT"
] | null | null | null | src/core/migrations/0055_merge_20190305_1616.py | metabolism-of-cities/ARCHIVED-metabolism-of-cities-platform-v3 | c754d3b1b401906a21640b8eacb6b724a448b31c | [
"MIT"
] | null | null | null | src/core/migrations/0055_merge_20190305_1616.py | metabolism-of-cities/ARCHIVED-metabolism-of-cities-platform-v3 | c754d3b1b401906a21640b8eacb6b724a448b31c | [
"MIT"
] | null | null | null | # Generated by Django 2.1.2 on 2019-03-05 16:16
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0054_auto_20190305_1613'),
('core', '0054_merge_20190304_0758'),
]
operations = [
]
| 17.866667 | 47 | 0.641791 | 183 | 0.682836 | 0 | 0 | 0 | 0 | 0 | 0 | 110 | 0.410448 |
a671987b4aec698f200fd1e8f711ebabd9e8fdf6 | 359 | py | Python | afwf_fts_anything/__init__.py | MacHu-GWU/afwf_fts_anything-project | 7050f12f6df9688fd553a5673ab21e10fa571cf2 | [
"MIT"
] | 20 | 2019-01-03T22:31:41.000Z | 2021-10-14T11:32:29.000Z | afwf_fts_anything/__init__.py | MacHu-GWU/afwf_fts_anything-project | 7050f12f6df9688fd553a5673ab21e10fa571cf2 | [
"MIT"
] | 2 | 2019-01-02T21:36:40.000Z | 2020-08-23T18:03:54.000Z | afwf_fts_anything/__init__.py | MacHu-GWU/afwf_fts_anything-project | 7050f12f6df9688fd553a5673ab21e10fa571cf2 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Full text search workflow for Alfred.
"""
from ._version import __version__
__short_description__ = "Full text search workflow for Alfred."
__license__ = "MIT"
__author__ = "Sanhe Hu"
__author_email__ = "husanhe@gmail.com"
__maintainer__ = "Sanhe Hu"
__maintainer_email__ = "husanhe@gmail.com"
__github_username__ = "MacHu-GWU"
| 22.4375 | 63 | 0.743733 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 181 | 0.504178 |
a6736259f691ee42e01b77cfe927dab8c82e2223 | 609 | py | Python | users/home_work.py | annadokuchaeva2002/python-home-bot | 662acd3e3998d5c58a034004e0eef9e782d57447 | [
"MIT"
] | null | null | null | users/home_work.py | annadokuchaeva2002/python-home-bot | 662acd3e3998d5c58a034004e0eef9e782d57447 | [
"MIT"
] | null | null | null | users/home_work.py | annadokuchaeva2002/python-home-bot | 662acd3e3998d5c58a034004e0eef9e782d57447 | [
"MIT"
] | null | null | null | from main import dp
from aiogram import types
from aiogram.dispatcher.filters.builtin import Text
@dp.message_handler(Text(equals="Все задания 🤩"))
async def vse_zadaniya(msg: types.Message):
await msg.answer(text="<b>Ваши задания:</b>\n\nскоро наполню")
@dp.message_handler(Text(equals="Добавить 📝"))
async def dobavit(msg: types.Message):
await msg.answer(text="прикрепите ваше задание")
@dp.message_handler(Text(equals="Скрыть клавиутуру 😤"))
async def dobavit(msg: types.Message):
await msg.answer(text="Клавиатура скрыта\nДля вызова /start", reply_markup=types.ReplyKeyboardRemove())
| 29 | 107 | 0.758621 | 0 | 0 | 0 | 0 | 612 | 0.848821 | 416 | 0.576976 | 262 | 0.363384 |
a674f43bddee54cbea1107c11705b1d7ee339b2f | 583 | py | Python | vcorelib/paths/context.py | vkottler/vcorelib | 97c3b92932d5b2f8c6d9cdca55f34bf167980a21 | [
"MIT"
] | 1 | 2022-03-31T09:26:04.000Z | 2022-03-31T09:26:04.000Z | vcorelib/paths/context.py | vkottler/vcorelib | 97c3b92932d5b2f8c6d9cdca55f34bf167980a21 | [
"MIT"
] | 2 | 2022-03-31T09:35:06.000Z | 2022-03-31T09:38:07.000Z | vcorelib/paths/context.py | vkottler/vcorelib | 97c3b92932d5b2f8c6d9cdca55f34bf167980a21 | [
"MIT"
] | null | null | null | """
A module for context managers related to file-system paths.
"""
# built-in
from contextlib import contextmanager
from os import chdir as _chdir
from pathlib import Path as _Path
from typing import Iterator as _Iterator
# internal
from vcorelib.paths import Pathlike as _Pathlike
from vcorelib.paths import normalize as _normalize
@contextmanager
def in_dir(path: _Pathlike) -> _Iterator[None]:
"""Change the current working directory as a context manager."""
cwd = _Path.cwd()
try:
_chdir(_normalize(path))
yield
finally:
_chdir(cwd)
| 22.423077 | 68 | 0.728988 | 0 | 0 | 228 | 0.391081 | 244 | 0.418525 | 0 | 0 | 151 | 0.259005 |
a675cde92791cd0863858fcb8d5b4afd657a5c32 | 467 | py | Python | tf.py | thuyduongtt/region_based_active_learning | b3653c31a44135b5680949790549799c83a5a18b | [
"MIT"
] | null | null | null | tf.py | thuyduongtt/region_based_active_learning | b3653c31a44135b5680949790549799c83a5a18b | [
"MIT"
] | null | null | null | tf.py | thuyduongtt/region_based_active_learning | b3653c31a44135b5680949790549799c83a5a18b | [
"MIT"
] | null | null | null | def test_tf():
import tensorflow as tf
from utils import list_devices
list_devices()
gpu_available = tf.test.is_gpu_available()
print('GPU available:', gpu_available)
with tf.Session(config=tf.ConfigProto(log_device_placement=True)).as_default() as sess:
print('Session has started!')
def test_pt():
import torch
print('GPU available:', torch.cuda.is_available())
if __name__ == '__main__':
test_tf()
# test_pt()
| 22.238095 | 91 | 0.683084 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 75 | 0.1606 |
a6763636b40ab2fbbbb4112dcfc5086b9d872c0c | 6,529 | py | Python | smartmirror/authorization.py | not4juu/SmartMirror | 61618bcfdbcc67d7703bb501a31e9e0d247ced90 | [
"MIT"
] | null | null | null | smartmirror/authorization.py | not4juu/SmartMirror | 61618bcfdbcc67d7703bb501a31e9e0d247ced90 | [
"MIT"
] | 6 | 2021-03-18T21:17:37.000Z | 2022-03-11T23:32:24.000Z | smartmirror/authorization.py | not4juu/SmartMirror | 61618bcfdbcc67d7703bb501a31e9e0d247ced90 | [
"MIT"
] | 1 | 2019-08-25T10:01:52.000Z | 2019-08-25T10:01:52.000Z | import cv2
import os
import sys
import pickle
import face_recognition
from threading import Thread
from smartmirror.Logger import Logger
PATH = os.path.dirname(os.path.realpath(__file__))
if sys.platform != 'linux':
PATH = PATH.replace("\\", '/')
"""
Authorization Class
- authorization is based on face recognition method
- two options available :
1. opencv face lib
2. face_recognition (dlib)
- name_id is collected by a folder name where person images are located
"""
class Authorization:
def __init__(self, camera, callback):
self.camera = camera
self.callback_authorized_user = callback
self.thread_running = False
self.authorization_process_running = False
self.debug = False
self.font = cv2.FONT_HERSHEY_SIMPLEX
self.detected = {}
self.samples_confidence = 20
self.min_width = 0.1 * self.camera.get(3)
self.min_height = 0.1 * self.camera.get(4)
try:
self.face_cascade = cv2.CascadeClassifier(PATH + '/../cascades/haarcascade_frontal_face_default.xml')
except Exception as exception:
print("Face Cascade Classifier reading file problem: {0}".format(exception))
return
def run_opencv_face_recognition(self):
folders_name = [f for f in os.listdir(PATH + '/../dataset')]
tmp = 0
faces_dic = {}
for name_id in folders_name:
if name_id not in faces_dic.values():
faces_dic[tmp] = name_id
tmp += 1
recognizer = cv2.face.LBPHFaceRecognizer_create() # https://docs.opencv.org/3.4/d4/d48/namespacecv_1_1face.html
recognizer.read(PATH + '/../trained_data/trainer.yml')
while self.thread_running and self.authorization_process_running:
response, image = self.camera.read()
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
detected_face_square = self.face_cascade.detectMultiScale(gray, scaleFactor=1.2, minNeighbors=5,
minSize=(int(self.min_width), int(self.min_height)))
for (x, y, width, height) in detected_face_square:
cv2.rectangle(image, (x, y), (x + width, y + height), (0, 255, 0), 2)
name_id, confidence = recognizer.predict(gray[y:y + height, x:x + width])
recognition_name = "unknown"
if(confidence < 100):
recognition_name = faces_dic[name_id]
confidence = " {0}%".format(round(100 - confidence))
self.add_detected_face(str(recognition_name))
else:
confidence = " {0}%".format(round(100 - confidence))
cv2.putText(image, str(recognition_name), (x + 5, y - 5), self.font, 1, (255, 255, 255), 1)
cv2.putText(image, str(confidence), (x + 5, y + height - 5), self.font, 1, (255, 255, 255), 1)
if self.debug:
cv2.imshow('Authorization detected', image)
cv2.waitKey(10)
def run_dlib_face_recognition(self):
data = pickle.loads(open(PATH + "/../trained_data/encodings.pickle", "rb").read())
while self.thread_running and self.authorization_process_running:
response, image = self.camera.read()
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
detected_face_square = self.face_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=5,
minSize=(int(self.min_width), int(self.min_height)))
boxes = [(y, x + width, y + height, x) for (x, y, width, height) in detected_face_square]
encodings = face_recognition.face_encodings(rgb, boxes)
names = []
recognition_name = "Unknown"
for encoding in encodings:
matches = face_recognition.compare_faces(data["encodings"], encoding)
if True in matches:
matched_index = [i for (i, b) in enumerate(matches) if b]
counts = {}
for i in matched_index:
recognition_name = data["names"][i]
counts[recognition_name] = counts.get(recognition_name, 0) + 1
recognition_name = max(counts, key=counts.get)
names.append(recognition_name)
for ((top, right, bottom, left), name) in zip(boxes, names):
cv2.rectangle(image, (left, top), (right, bottom), (0, 255, 0), 2)
y = top - 15 if top - 15 > 15 else top + 15
cv2.putText(image, name, (left, y), self.font, 1, (255, 255, 255), 1)
if recognition_name != "Unknown":
self.add_detected_face(recognition_name)
if self.debug:
cv2.imshow('Authorization detected', image)
cv2.waitKey(10)
def add_detected_face(self, name):
Logger.debug("Detected {0}".format(name))
if name in self.detected:
self.detected[name] += 1
else:
self.detected[name] = 1
self.recognition_confidence()
def recognition_confidence(self):
Logger.debug("Authorization confidence {0}".format(self.detected))
if self.samples_confidence in self.detected.values():
Logger.debug("Authorization confidence {0}".format(self.samples_confidence))
self.authorization_process_running = False
for name, confidence in self.detected.items():
if self.samples_confidence == confidence:
self.callback_authorized_user(name)
def run(self, method='opencv_face_recognition', debug=False):
Logger.debug("Start authorization thread: {0}".format(method))
self.thread_running = True
self.authorization_process_running = True
self.debug = debug
if method is 'opencv_face_recognition':
target = self.run_opencv_face_recognition
if method is 'dlib_face_recognition':
target = self.run_dlib_face_recognition
listener_thread = Thread(target=target)
listener_thread.daemon = True
listener_thread.start()
def stop(self):
Logger.debug("Stop authorization thread")
self.thread_running = False
if __name__ == "__main__":
pass
| 38.405882 | 122 | 0.593506 | 5,978 | 0.915607 | 0 | 0 | 0 | 0 | 0 | 0 | 841 | 0.12881 |
a676ce23d30440ea1040f54ebcc3301013112767 | 7,174 | py | Python | xlsx2html/core.py | waldobeest/xlsx2html | c3bbeade832e23ebed196ca59745f999931d4176 | [
"MIT"
] | null | null | null | xlsx2html/core.py | waldobeest/xlsx2html | c3bbeade832e23ebed196ca59745f999931d4176 | [
"MIT"
] | null | null | null | xlsx2html/core.py | waldobeest/xlsx2html | c3bbeade832e23ebed196ca59745f999931d4176 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import openpyxl
import six
from openpyxl.styles.colors import COLOR_INDEX, aRGB_REGEX
from xlsx2html.format import format_cell
DEFAULT_BORDER_STYLE = {
'style': 'solid',
'width': '1px',
}
BORDER_STYLES = {
'dashDot': None,
'dashDotDot': None,
'dashed': {
'style': 'dashed',
},
'dotted': {
'style': 'dotted',
},
'double': {
'style': 'double',
},
'hair': None,
'medium': {
'style': 'solid',
'width': '2px',
},
'mediumDashDot': {
'style': 'solid',
'width': '2px',
},
'mediumDashDotDot': {
'style': 'solid',
'width': '2px',
},
'mediumDashed': {
'width': '2px',
'style': 'dashed',
},
'slantDashDot': None,
'thick': {
'style': 'solid',
'width': '1px',
},
'thin': {
'style': 'solid',
'width': '1px',
},
}
def render_attrs(attrs):
return ' '.join(["%s=%s" % a for a in sorted(attrs.items(), key=lambda a: a[0])])
def render_inline_styles(styles):
return ';'.join(["%s: %s" % a for a in sorted(styles.items(), key=lambda a: a[0]) if a[1] is not None])
def normalize_color(color):
# TODO RGBA
rgb = None
if color.type == 'rgb':
rgb = color.rgb
if color.type == 'indexed':
rgb = COLOR_INDEX[color.indexed]
if not aRGB_REGEX.match(rgb):
# TODO system fg or bg
rgb = '00000000'
if rgb:
return '#' + rgb[2:]
return None
def get_border_style_from_cell(cell):
h_styles = {}
for b_dir in ['right', 'left', 'top', 'bottom']:
b_s = getattr(cell.border, b_dir)
if not b_s:
continue
border_style = BORDER_STYLES.get(b_s.style)
if border_style is None and b_s.style:
border_style = DEFAULT_BORDER_STYLE
if not border_style:
continue
for k, v in border_style.items():
h_styles['border-%s-%s' % (b_dir, k)] = v
if b_s.color:
h_styles['border-%s-color' % (b_dir)] = normalize_color(b_s.color)
return h_styles
def get_styles_from_cell(cell, merged_cell_map=None):
merged_cell_map = merged_cell_map or {}
h_styles = {
'border-collapse': 'collapse'
}
b_styles = get_border_style_from_cell(cell)
if merged_cell_map:
# TODO edged_cells
for m_cell in merged_cell_map['cells']:
b_styles.update(get_border_style_from_cell(m_cell))
for b_dir in ['border-right-style', 'border-left-style', 'border-top-style', 'border-bottom-style']:
if b_dir not in b_styles:
b_styles[b_dir] = 'none'
h_styles.update(b_styles)
if cell.alignment.horizontal:
h_styles['text-align'] = cell.alignment.horizontal
if cell.fill.patternType == 'solid':
# TODO patternType != 'solid'
h_styles['background-color'] = normalize_color(cell.fill.fgColor)
if cell.font:
h_styles['font-size'] = "%spx" % cell.font.sz
if cell.font.color:
h_styles['color'] = normalize_color(cell.font.color)
if cell.font.b:
h_styles['font-weight'] = 'bold'
if cell.font.i:
h_styles['font-style'] = 'italic'
if cell.font.u:
h_styles['font-decoration'] = 'underline'
return h_styles
def worksheet_to_data(ws, locale=None):
merged_cell_map = {}
exclded_cells = set(ws.merged_cells)
for cell_range in ws.merged_cell_ranges:
cell_range_list = list(ws[cell_range])
m_cell = cell_range_list[0][0]
merged_cell_map[m_cell.coordinate] = {
'attrs': {
'colspan': len(cell_range_list[0]),
'rowspan': len(cell_range_list),
},
'cells': [c for rows in cell_range_list for c in rows],
}
exclded_cells.remove(m_cell.coordinate)
max_col_number = 0
data_list = []
for row_i, row in enumerate(ws.iter_rows()):
data_row = []
data_list.append(data_row)
for col_i, cell in enumerate(row):
col_dim = ws.column_dimensions[str(cell.column)]
row_dim = ws.row_dimensions[str(cell.row)]
width = 0.89
if col_dim.customWidth:
width = round(col_dim.width / 10., 2)
col_width = 96 * width
if cell.coordinate in exclded_cells or row_dim.hidden or col_dim.hidden:
continue
if col_i > max_col_number:
max_col_number = col_i
height = 19
if row_dim.customHeight:
height = round(row_dim.height, 2)
cell_data = {
'value': cell.value,
'formatted_value': format_cell(cell, locale=locale),
'attrs': {},
'col-width': col_width,
'style': {
"width": "{}in".format(width),
"height": "{}px".format(height),
},
}
merged_cell_info = merged_cell_map.get(cell.coordinate, {})
if merged_cell_info:
cell_data['attrs'].update(merged_cell_info['attrs'])
cell_data['style'].update(get_styles_from_cell(cell, merged_cell_info))
data_row.append(cell_data)
col_list = []
max_col_number += 1
column_dimensions = sorted(ws.column_dimensions.items(), key=lambda d: d[0])
for col_i, col_dim in column_dimensions:
if not all([col_dim.min, col_dim.max]):
continue
width = 0.89
if col_dim.customWidth:
width = round(col_dim.width / 10., 2)
col_width = 96 * width
for i in six.moves.range((col_dim.max - col_dim.min) + 1):
max_col_number -= 1
col_list.append({"col-width": col_width})
if max_col_number < 0:
break
return {'rows': data_list, 'cols': col_list}
def render_table(data):
html = [
'<table '
'style="border-collapse: collapse" '
'border="0" '
'cellspacing="0" '
'cellpadding="0">'
'<colgroup>'
]
for col in data['cols']:
html.append('<col width="%s">' % int(col['col-width']))
html.append('</colgroup>')
for i, row in enumerate(data['rows']):
trow = ['<tr>']
for col in row:
trow.append('<td {attrs_str} style="{styles_str}">{formatted_value}</td>'.format(
attrs_str=render_attrs(col['attrs']),
styles_str=render_inline_styles(col['style']),
**col))
trow.append('</tr>')
html.append('\n'.join(trow))
html.append('</table>')
return '\n'.join(html)
def render_data_to_html(data):
html = '''
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>Title</title>
</head>
<body>
%s
</body>
</html>
'''
return html % render_table(data)
def xlsx2html(ws):
data = worksheet_to_data(ws)
return render_data_to_html(data)
| 27.071698 | 107 | 0.550042 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,415 | 0.19724 |
a677a9d8c7dd81264820a936343f06e0dd8ec3ac | 359 | py | Python | bin/python/filterfasta.py | reid-wagner/proteomics-pipelines | 2214c2ad4c14fabcb50a3c0800e9d383ce73df3d | [
"MIT"
] | 2 | 2018-09-06T14:05:59.000Z | 2022-02-18T10:09:06.000Z | bin/python/filterfasta.py | reid-wagner/proteomics-pipelines | 2214c2ad4c14fabcb50a3c0800e9d383ce73df3d | [
"MIT"
] | 7 | 2018-09-30T00:49:04.000Z | 2022-01-27T07:55:26.000Z | bin/python/filterfasta.py | reid-wagner/proteomics-pipelines | 2214c2ad4c14fabcb50a3c0800e9d383ce73df3d | [
"MIT"
] | 3 | 2019-10-29T12:20:45.000Z | 2021-10-06T14:38:43.000Z | #!/usr/bin/env python
import Bio
from Bio import SeqIO
import sys
filt = []
seqs = list(SeqIO.parse(sys.argv[1],'fasta'))
minlen = int(sys.argv[2])
maxlen = int(sys.argv[3])
output = sys.argv[4]
for rec in seqs:
s = str(rec.seq)
l = len(s)
if ((l >= minlen) and (l <= maxlen)):
filt.append(rec)
SeqIO.write(filt, output, 'fasta')
| 14.36 | 45 | 0.607242 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 35 | 0.097493 |
a678784dd44b43ad9384b6e430dcd31a811cb096 | 4,898 | py | Python | src/krylov/lsqr.py | nschloe/krylov | 58813233ff732111aa56f7b1d71908fda78080be | [
"MIT"
] | 36 | 2020-06-17T15:51:16.000Z | 2021-12-30T04:33:11.000Z | src/krylov/lsqr.py | nschloe/krylov | 58813233ff732111aa56f7b1d71908fda78080be | [
"MIT"
] | 26 | 2020-08-27T17:38:15.000Z | 2021-11-11T20:00:07.000Z | src/krylov/lsqr.py | nschloe/krylov | 58813233ff732111aa56f7b1d71908fda78080be | [
"MIT"
] | 5 | 2021-05-20T19:47:44.000Z | 2022-01-03T00:20:33.000Z | """
Christopher C. Paige, Michael A. Saunders,
LSQR: An Algorithm for Sparse Linear Equations and Sparse Least Squares,
ACM Transactions on Mathematical Software,
Volume 8, Issue 1, March 1982, pp 43-71,
<https://doi.org/10.1145/355984.355989>.
<https://web.stanford.edu/group/SOL/software/lsqr/>
<https://petsc.org/release/src/ksp/ksp/impls/lsqr/lsqr.c.html#KSPLSQR>
"""
from __future__ import annotations
from typing import Callable
import numpy as np
from numpy.typing import ArrayLike
from scipy.linalg import lapack
from ._helpers import (
Info,
LinearOperator,
asrlinearoperator,
clip_imag,
get_default_inner,
)
def lsqr(
A: LinearOperator,
b: ArrayLike,
damp: float = 0.0,
x0: ArrayLike | None = None,
tol: float = 1e-5,
atol: float = 1.0e-15,
maxiter: int | None = None,
callback: Callable[[int, np.ndarray, list[np.ndarray], list[np.ndarray]], None]
| None = None,
tol_inner_real: float = 1.0e-15,
):
def _norm(y):
return np.sqrt(clip_imag(_inner(y, y), tol_inner_real))
A = asrlinearoperator(A)
b = np.asarray(b)
assert len(A.shape) == 2
assert A.shape[0] == b.shape[0]
N = A.shape[0]
assert damp >= 0.0
_inner = get_default_inner(b.shape)
maxiter = N if maxiter is None else maxiter
# get initial residual
if x0 is None:
x_shape = (A.shape[1], *b.shape[1:])
x = np.zeros(x_shape, dtype=b.dtype)
u = np.copy(b)
else:
x = np.copy(x0)
assert x.shape[0] == A.shape[1], f"A.shape = {A.shape}, but x.shape = {x.shape}"
u = b - A @ x
beta = _norm(u)
resnorms = [beta]
u /= beta
v = A.rmatvec(u)
alpha = _norm(v)
v /= alpha
w = v.copy()
# anorm = Frobenius norm of A
anorm = alpha
anorm2 = alpha ** 2
acond = None
arnorm = alpha * beta
phi_ = beta
rho_ = alpha
c = 1.0
s2 = 0.0
c2 = -1.0
z = 0.0
xnorm = 0.0
xxnorm = 0.0
nresnorms = [phi_ * alpha]
ddnorm = 0.0
res2 = 0.0
if callback is not None:
callback(0, x, resnorms, nresnorms)
# for the givens rotations
lartg = lapack.get_lapack_funcs("lartg", (rho_, beta))
# iterate
k = 0
success = False
criterion = np.maximum(tol * resnorms[0], atol)
while True:
if np.all(nresnorms[-1] <= criterion):
# oh really?
r = b - A @ x
xx0 = x if x0 is None else x - x0
nresnorms[-1] = _norm(A.rmatvec(r) - (damp ** 2) * xx0)
if np.all(nresnorms[-1] <= criterion):
success = True
break
if k == maxiter:
break
# continue bidiagonalization
u *= -alpha
u += A @ v
beta = _norm(u)
u /= beta
v *= -beta
v += A.rmatvec(u)
alpha = _norm(v)
v /= alpha
if damp == 0.0:
# c1, s1, rho1_ = lartg(rho_, 0.0)
rho1_ = rho_
psi = 0.0
else:
c1, s1, rho1_ = lartg(rho_, damp)
psi = s1 * phi_
phi_ *= c1
# rho = np.sqrt(rho_ ** 2 + beta ** 2); c = rho_ / rho; s = beta / rho
c, s, rho = lartg(rho1_, beta)
theta = s * alpha
rho_ = -c * alpha
phi = c * phi_
phi_ *= s
tau = s * phi
dk = w / rho
ddnorm += clip_imag(_inner(dk, dk))
# update x, w
x += (phi / rho) * w
w *= -theta / rho
w += v
# estimate <x, x>
delta = s2 * rho
gamma_ = -c2 * rho
rhs = phi - delta * z
z_ = rhs / gamma_
# xnorm is an approximation of ||x - x0||
xnorm = np.sqrt(xxnorm + z_ ** 2)
c2, s2, gamma = lartg(gamma_, theta)
z = rhs / gamma
xxnorm += z ** 2
# approximation of the Frobenius-norm of A
# this uses the old alpha
anorm2 += beta ** 2 + damp ** 2
anorm = np.sqrt(anorm2)
anorm2 += alpha ** 2
# estimate cond(A)
acond = anorm * np.sqrt(ddnorm)
res1 = phi_ ** 2
res2 += psi ** 2
# approximation of sqrt(||b - A @ x|| ** 2 + damp ** 2 * ||x - x0|| ** 2)
resnorm = np.sqrt(res1 + res2)
# approximation of ||A.H @ (b - A @ x) - damp ** 2 * (x - x0)||
arnorm = alpha * np.abs(tau)
resnorms.append(resnorm)
nresnorms.append(arnorm)
# The callback can override [n]resnorm with explicit values with, e.g.,
# resnorms[-1] = 3.14
if callback is not None:
callback(k + 1, x, resnorms, nresnorms)
k += 1
return x if success else None, Info(
success=success,
xk=x,
numsteps=k,
resnorms=np.array(resnorms),
nresnorms=np.array(nresnorms),
acond=acond,
anorm=anorm,
xnorm=xnorm,
)
| 24.737374 | 88 | 0.521233 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,040 | 0.212332 |
a67b799a74d0b1d933f25d6547dc5d5f94732f54 | 3,746 | py | Python | 2020/Day 24/main.py | edgorman/advent-of-code | 374fee72dd853c60851231a672498ab2732889cf | [
"MIT"
] | null | null | null | 2020/Day 24/main.py | edgorman/advent-of-code | 374fee72dd853c60851231a672498ab2732889cf | [
"MIT"
] | null | null | null | 2020/Day 24/main.py | edgorman/advent-of-code | 374fee72dd853c60851231a672498ab2732889cf | [
"MIT"
] | null | null | null | import os
import copy
def part_one(flip_tiles):
flipped_tiles = []
# For each tile
for tile in flip_tiles:
position = (0, 0, 0)
# For each direction
for direction in tile:
# Switch on direction
position = {
'ne': lambda p: (p[0]+1, p[1], p[2]-1),
'e': lambda p: (p[0]+1, p[1]-1, p[2]),
'se': lambda p: (p[0], p[1]-1, p[2]+1),
'sw': lambda p: (p[0]-1, p[1], p[2]+1),
'w': lambda p: (p[0]-1, p[1]+1, p[2]),
'nw': lambda p: (p[0], p[1]+1, p[2]-1),
}[direction](position)
# Check if position already flipped
if position in flipped_tiles:
# Flip back
flipped_tiles.remove(position)
else:
# Flip
flipped_tiles.append(position)
return flipped_tiles
def part_two_helper(tile):
return [
(tile[0]+1, tile[1], tile[2]-1), # ne
(tile[0]+1, tile[1]-1, tile[2]), # e
(tile[0], tile[1]-1, tile[2]+1), # se
(tile[0]-1, tile[1], tile[2]+1), # sw
(tile[0]-1, tile[1]+1, tile[2]), # w
(tile[0], tile[1]+1, tile[2]-1), # nw
]
def part_two(flip_tiles):
black_tiles = part_one(flip_tiles)
# Iterate n times
for x in range(100):
print(x, len(black_tiles))
new_black_tiles = copy.deepcopy(black_tiles)
# Generate list of white tiles
white_tiles = set()
for tile in black_tiles:
for surround in part_two_helper(tile):
if surround not in black_tiles:
white_tiles.add(surround)
# Calculate new white tiles
for tile in black_tiles:
black_count = 0
for surround in part_two_helper(tile):
if surround in black_tiles:
black_count = black_count + 1
if black_count == 0 or black_count > 2:
new_black_tiles.remove(tile)
# Calculate new black tiles
for tile in white_tiles:
black_count = 0
for surround in part_two_helper(tile):
if surround in black_tiles:
black_count = black_count + 1
if black_count == 2:
new_black_tiles.append(tile)
# Set black tiles to the new list
black_tiles = new_black_tiles
return black_tiles
if __name__ == "__main__":
# Get input from txt file
with open(os.getcwd() + '\\2020\\Day 24\\input.txt', 'r') as file_obj:
file_input = file_obj.readlines()
# Clean input
entries = []
for entry in file_input:
index = 0
directions = []
while index < len(entry.rstrip()):
if entry.rstrip()[index] == 'e':
directions.append('e')
index = index + 1
elif entry.rstrip()[index] == 'w':
directions.append('w')
index = index + 1
elif entry.rstrip()[index] == 'n':
if entry.rstrip()[index + 1] == 'e':
directions.append('ne')
index = index + 2
else:
directions.append('nw')
index = index + 2
elif entry.rstrip()[index] == 's':
if entry.rstrip()[index + 1] == 'e':
directions.append('se')
index = index + 2
else:
directions.append('sw')
index = index + 2
entries.append(directions)
# Part one
print(len(part_one(entries)))
# Part two
print(len(part_two(entries)))
| 31.478992 | 74 | 0.485585 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 424 | 0.113187 |
a67bd7080832832bfead9a2d81306ef2cac33ab1 | 2,760 | py | Python | zabbix_alarm_script/all.py | ansemz/zabbix-wechat | 870f3676e9ce569eae01656653669b244ef2b180 | [
"Apache-2.0"
] | 81 | 2017-03-19T13:54:44.000Z | 2022-01-13T08:36:44.000Z | zabbix_alarm_script/all.py | tony163/zabbixwechat | d0d187e490ebb2b563a417c450db4fe21e7817ea | [
"Apache-2.0"
] | 2 | 2017-04-12T09:33:07.000Z | 2019-04-24T11:20:54.000Z | zabbix_alarm_script/all.py | tony163/zabbixwechat | d0d187e490ebb2b563a417c450db4fe21e7817ea | [
"Apache-2.0"
] | 42 | 2017-03-19T14:00:39.000Z | 2021-12-26T04:52:38.000Z | #!/usr/bin/python3.4
# coding: utf-8
import urllib.request
import cgi
import cgitb
import sys
import json
import time
import os
import shutil
import logging
import os
import codecs
import re
import logging
import json
import base64
import http.cookiejar
import urllib.request
from urllib import request, parse
from Crypto.Cipher import PKCS1_v1_5 as Cipher_pkcs1_v1_5
from Crypto.Signature import PKCS1_v1_5 as Signature_pkcs1_v1_5
from Crypto.PublicKey import RSA
zabbix_server_charturl = "http://127.0.0.1/zabbix/chart.php"
ttserver = "http://www.example.com:1978"
server_url = "http://www.example.com/getvalue"
logging.basicConfig(
filename='/var/log/zabbix/wechat.log',
format='%(levelname)s:%(asctime)s:%(message)s',
level=logging.INFO
)
def savepic(itemid, eventid):
cookie = http.cookiejar.MozillaCookieJar()
cookie.load('/tmp/zabbix_cookie', ignore_discard=True, ignore_expires=True)
data = {
"itemids": '{0}'.format(itemid),
"period": "3600",
"stime": "0",
"width": "800"}
data = parse.urlencode(data).encode('utf-8')
req = urllib.request.Request(zabbix_server_charturl, data=data)
opener = urllib.request.build_opener(
urllib.request.HTTPCookieProcessor(cookie))
response = opener.open(req)
req = urllib.request.Request(
url='{1}/{0}.png'.format(eventid, ttserver), data=response.read(), method='PUT')
urllib.request.urlopen(req)
logging.info(sys.argv[1])
message = sys.argv[1]
alarm_title = message.split("@@@")[0]
triggerdescription = message.split("@@@")[1]
host_name = message.split("@@@")[2]
triggerseverity = message.split("@@@")[3]
itemid = message.split("@@@")[4]
alarm_status = message.split("@@@")[5]
hostconn = message.split("@@@")[6]
host_group = message.split("@@@")[7]
eventid = message.split("@@@")[8]
with open('/etc/zabbix/pub.key') as f:
key = f.read()
rsakey = RSA.importKey(key)
cipher = Cipher_pkcs1_v1_5.new(rsakey)
postdata = {
"host_group": "{0}".format(host_group),
"eventid": "{0}".format(eventid),
"alarm_title": "{0}".format(alarm_title),
"triggerdescription": "{0}".format(triggerdescription),
"host_name": "{0}".format(host_name),
"triggerseverity": "{0}".format(triggerseverity),
"alarm_status": "{0}".format(alarm_status),
"hostconn": "{0}".format(hostconn)}
cipher_text = base64.b64encode(cipher.encrypt(str(postdata).encode()))
logging.info(postdata)
postdata = {"data": cipher_text.decode()}
postdata = urllib.parse.urlencode(postdata)
postdata = postdata.encode('utf-8')
url = server_url
res = urllib.request.urlopen(url, postdata)
ID = res.read().decode()
if alarm_status == "PROBLEM" and "[text]" not in alarm_title:
savepic(itemid, ID)
logging.info("ID:{0}".format(ID))
| 31.363636 | 88 | 0.697464 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 544 | 0.197101 |
a67d10f27144999c6093454cdec1812be6142bc6 | 690 | py | Python | greenrocket/aio.py | kr41/GreenRocket | 4ae2658defc56cd567503045077b414d09ec2d6b | [
"BSD-2-Clause"
] | null | null | null | greenrocket/aio.py | kr41/GreenRocket | 4ae2658defc56cd567503045077b414d09ec2d6b | [
"BSD-2-Clause"
] | null | null | null | greenrocket/aio.py | kr41/GreenRocket | 4ae2658defc56cd567503045077b414d09ec2d6b | [
"BSD-2-Clause"
] | null | null | null | import asyncio
@asyncio.coroutine
def afire(self):
""" Fire signal asynchronously """
self.logger.debug('Fired %r', self)
for cls in self.__class__.__mro__:
if hasattr(cls, '__handlers__'):
self.logger.debug('Propagate on %r', cls)
for handler in cls.__handlers__:
try:
self.logger.debug('Call %r', handler)
result = handler(self)
if asyncio.iscoroutine(result):
yield from result
except:
self.logger.error('Failed on processing %r by %r',
self, handler, exc_info=True)
| 34.5 | 70 | 0.511594 | 0 | 0 | 653 | 0.946377 | 672 | 0.973913 | 0 | 0 | 115 | 0.166667 |
a67f5b2b69195447312a516daafb3c0a5077dd7e | 1,983 | py | Python | src/opera/executor/ansible.py | anzoman/xopera-opera | 0c9712dc6fc6bb2935e6c3e5a75364006b700390 | [
"Apache-2.0"
] | null | null | null | src/opera/executor/ansible.py | anzoman/xopera-opera | 0c9712dc6fc6bb2935e6c3e5a75364006b700390 | [
"Apache-2.0"
] | null | null | null | src/opera/executor/ansible.py | anzoman/xopera-opera | 0c9712dc6fc6bb2935e6c3e5a75364006b700390 | [
"Apache-2.0"
] | null | null | null | import json
import os
import sys
import tempfile
import yaml
from . import utils
def _get_inventory(host):
inventory = dict(
ansible_host=host,
ansible_ssh_common_args="-o StrictHostKeyChecking=no",
)
if host == "localhost":
inventory["ansible_connection"] = "local"
inventory["ansible_python_interpreter"] = sys.executable
else:
inventory["ansible_user"] = os.environ.get("OPERA_SSH_USER", "centos")
return yaml.safe_dump(dict(all=dict(hosts=dict(opera=inventory))))
def run(host, primary, dependencies, vars):
with tempfile.TemporaryDirectory() as dir_path:
playbook = os.path.join(dir_path, os.path.basename(primary))
utils.copy(primary, playbook)
for d in dependencies:
utils.copy(d, os.path.join(dir_path, os.path.basename(d)))
inventory = utils.write(
dir_path, _get_inventory(host), suffix=".yaml",
)
vars_file = utils.write(
dir_path, yaml.safe_dump(vars), suffix=".yaml",
)
with open("{}/ansible.cfg".format(dir_path), "w") as fd:
fd.write("[defaults]\n")
fd.write("retry_files_enabled = False\n")
cmd = [
"ansible-playbook",
"-i", inventory,
"-e", "@" + vars_file,
playbook
]
env = dict(
ANSIBLE_SHOW_CUSTOM_STATS="1",
ANSIBLE_STDOUT_CALLBACK="json",
)
code, out, err = utils.run_in_directory(dir_path, cmd, env)
if code != 0:
with open(out) as fd:
for l in fd:
print(l.rstrip())
print("------------")
with open(err) as fd:
for l in fd:
print(l.rstrip())
print("============")
return False, {}
with open(out) as fd:
attributes = json.load(fd)["global_custom_stats"]
return code == 0, attributes
| 29.597015 | 78 | 0.550177 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 298 | 0.150277 |
a680ef9a14d7459d55ed4ce401528c8fd443a196 | 1,261 | py | Python | e2e/test_ghprb_log_configure_page.py | svic/jenkins-configuration | 87eb90ba4adcf25f0b779f1277bbeff0e6b3525b | [
"Apache-2.0"
] | null | null | null | e2e/test_ghprb_log_configure_page.py | svic/jenkins-configuration | 87eb90ba4adcf25f0b779f1277bbeff0e6b3525b | [
"Apache-2.0"
] | null | null | null | e2e/test_ghprb_log_configure_page.py | svic/jenkins-configuration | 87eb90ba4adcf25f0b779f1277bbeff0e6b3525b | [
"Apache-2.0"
] | null | null | null | import unittest
import yaml
import os
from bok_choy.web_app_test import WebAppTest
from pages.ghprb_log_configure_page import GhprbLogConfigurePage
class TestGhprbLogConfigurePage(WebAppTest):
def setUp(self):
super(TestGhprbLogConfigurePage, self).setUp()
config_path = os.getenv('CONFIG_PATH')
try:
yaml_contents = open(
"{}/log_config.yml".format(config_path), 'r'
).read()
except IOError:
pass
self.log_config = yaml.safe_load(yaml_contents)
self.ghprb_log_configure_page = GhprbLogConfigurePage(self.browser)
def test_ghprb_configure_page(self):
"""
Ensure the configuration page for our test data log (GHPRB)
has the appropriate name and includes all of the name, log_level
pairs from the log_config.yml file.
"""
ghprb_log_configure_page = self.ghprb_log_configure_page.visit()
assert self.log_config[0]["LOG_RECORDER"] == self.ghprb_log_configure_page.get_log_recorder_name()
ghprb_loggers = self.ghprb_log_configure_page.get_loggers_with_level()
for log in self.log_config[0]["LOGGERS"]:
assert (log["name"], log["log_level"]) in ghprb_loggers
| 38.212121 | 106 | 0.681205 | 1,111 | 0.881047 | 0 | 0 | 0 | 0 | 0 | 0 | 275 | 0.218081 |
a6815c7893a0d94c3e1ad438c9c59e228b6c752c | 522 | py | Python | InquirerPy/prompts/__init__.py | jfilipedias/InquirerPy | 9f67125f808cbe6a73ab3cb652f35faba3f3443e | [
"MIT"
] | null | null | null | InquirerPy/prompts/__init__.py | jfilipedias/InquirerPy | 9f67125f808cbe6a73ab3cb652f35faba3f3443e | [
"MIT"
] | null | null | null | InquirerPy/prompts/__init__.py | jfilipedias/InquirerPy | 9f67125f808cbe6a73ab3cb652f35faba3f3443e | [
"MIT"
] | null | null | null | """Module contains import of all prompts classes."""
from InquirerPy.prompts.checkbox import CheckboxPrompt
from InquirerPy.prompts.confirm import ConfirmPrompt
from InquirerPy.prompts.expand import ExpandPrompt
from InquirerPy.prompts.filepath import FilePathPrompt
from InquirerPy.prompts.fuzzy.fuzzy import FuzzyPrompt
from InquirerPy.prompts.input import InputPrompt
from InquirerPy.prompts.list import ListPrompt
from InquirerPy.prompts.rawlist import RawlistPrompt
from InquirerPy.prompts.secret import SecretPrompt
| 47.454545 | 54 | 0.867816 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 52 | 0.099617 |
a68185697f357240b61f536aa08af6593cfde478 | 2,357 | py | Python | MMMaker/core/highlight_extractor.py | C4Ution/MMMaker | 7e64b50abb2257c02618d5a5b3323a1e2993fe8a | [
"MIT"
] | 9 | 2020-05-23T04:50:18.000Z | 2020-12-19T05:16:39.000Z | MMMaker/core/highlight_extractor.py | C4Ution/MMMaker | 7e64b50abb2257c02618d5a5b3323a1e2993fe8a | [
"MIT"
] | 13 | 2020-05-23T12:01:08.000Z | 2022-02-10T10:31:18.000Z | MMMaker/core/highlight_extractor.py | C4Ution/MMMaker | 7e64b50abb2257c02618d5a5b3323a1e2993fe8a | [
"MIT"
] | 5 | 2020-07-02T11:44:56.000Z | 2021-07-10T03:19:17.000Z | import numpy as np
import scipy.io.wavfile as wave
from moviepy.editor import VideoFileClip
from scipy.fftpack import fft
from misc import get_random_name
HIGHLIGHT_LENGTH = 0.65
def extract_highlights(file_paths):
"""
:param file_paths: 동영상 경로 리스트
:return: 추출한 하이라이트 동영상 경로 리스트
"""
max_highlights = []
min_highlights = []
for file_path in file_paths:
video_clip = VideoFileClip(file_path)
audio_clip = video_clip.audio
src_audio_file_name = get_random_name('wav')
audio_clip.write_audiofile(src_audio_file_name)
source_wave = wave.read(src_audio_file_name)
stereo_channel_wave = source_wave[1].T[1]
normalize_wave = [(ele / 2 ** 8.) * 2 - 1 for ele in stereo_channel_wave] # this is 8-bit track, now normalized on [-1,1)
# fourier_transform_wave = fft(normalize_wave) # calculate fourier transform (complex numbers list)
fourier_transform_wave = normalize_wave
normalize_time = len(fourier_transform_wave) / video_clip.duration
argmax_frequency = np.argmax(fourier_transform_wave) / normalize_time
argmin_frequency = np.argmin(fourier_transform_wave) / normalize_time
max_highlight_path = max_highlights.append(get_random_name('mp4'))
min_highlight_path = min_highlights.append(get_random_name('mp4'))
start_max = argmax_frequency - (HIGHLIGHT_LENGTH/2)
end_max = argmax_frequency + (HIGHLIGHT_LENGTH/2)
if start_max < 0:
end_max -= start_max
start_max = 0
elif end_max > video_clip.duration:
start_max -= end_max - video_clip.duration
end_max = video_clip.duration
video_clip.subclip(start_max, end_max).write_videofile(max_highlights[-1], codec='libx264', audio_codec='aac')
start_min = argmin_frequency - (HIGHLIGHT_LENGTH/2)
end_min = argmin_frequency + (HIGHLIGHT_LENGTH/2)
if start_min < 0:
end_min -= start_min
start_min = 0
elif end_min > video_clip.duration:
start_min -= end_min - video_clip.duration
end_min = video_clip.duration
video_clip.subclip(start_min, end_min).write_videofile(min_highlights[-1], codec='libx264', audio_codec='aac')
video_clip.close()
return max_highlights, min_highlights
| 35.712121 | 130 | 0.686042 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 317 | 0.131809 |
a682e0a43b3f440fa929e4b88ed44df23b27afe0 | 1,755 | py | Python | fafdata/fetch.py | yaniv-aknin/fafscrape | bd1beb61241ef28edc57c9b656dd039f58c4dcf1 | [
"MIT"
] | null | null | null | fafdata/fetch.py | yaniv-aknin/fafscrape | bd1beb61241ef28edc57c9b656dd039f58c4dcf1 | [
"MIT"
] | 23 | 2022-03-12T11:51:46.000Z | 2022-03-19T19:32:52.000Z | fafdata/fetch.py | yaniv-aknin/fafscrape | bd1beb61241ef28edc57c9b656dd039f58c4dcf1 | [
"MIT"
] | null | null | null | import json
import datetime
import requests
import urlobject
from .utils import format_faf_date
API_BASE = urlobject.URLObject('https://api.faforever.com')
ENTITY_TYPE_TO_DEFAULT_DATE_FIELD = {
'game': 'startTime',
'player': 'createTime',
'map': 'createTime',
'mapVersion': 'createTime',
}
def construct_url(entity, include, date_field, page_size, start_date, end_date, page_number=1, sort='ASC', filters=(), api_base=API_BASE):
url = api_base.with_path(f'/data/{entity}')
url = url.add_query_param('page[size]', page_size)
url = url.add_query_param('page[number]', page_number)
url = url.add_query_param('page[totals]', '')
filters = list(filters)
start_date = format_faf_date(start_date)
filters.append(f'{date_field}=ge={start_date}')
end_date = format_faf_date(end_date)
filters.append(f'{date_field}=le={end_date}')
url = url.add_query_param('filter', ';'.join(filters))
if include:
url = url.add_query_param('include', ','.join(include))
url = url.add_query_param('sort', f'-{date_field}' if sort == 'DESC' else f'{date_field}')
return url
def fetch_page(url):
response = requests.get(url)
response.raise_for_status()
return response.json()
def yield_pages(url_constructor, start_page=1, max_pages=float('inf')):
current_page = start_page
page = fetch_page(url_constructor(page_number=current_page))
yield page
max_pages = min(max_pages, page['meta']['page']['totalPages'])
while current_page < max_pages:
current_page += 1
yield fetch_page(url_constructor(page_number=current_page))
def write_json(path, doc, pretty):
with open(path, 'w') as handle:
json.dump(doc, handle, indent=(4 if pretty else None))
| 35.1 | 138 | 0.698575 | 0 | 0 | 378 | 0.215385 | 0 | 0 | 0 | 0 | 327 | 0.186325 |
a68431761d9755f8a2a08a35a6e7019caa2ea98e | 2,395 | py | Python | main.py | sschr15/tldr-discord | 4119a643b9b5f5fa48bf01b6032dad4072c6faaa | [
"MIT"
] | 2 | 2020-10-21T17:09:23.000Z | 2021-05-02T13:42:20.000Z | main.py | sschr15/tldr-discord | 4119a643b9b5f5fa48bf01b6032dad4072c6faaa | [
"MIT"
] | null | null | null | main.py | sschr15/tldr-discord | 4119a643b9b5f5fa48bf01b6032dad4072c6faaa | [
"MIT"
] | 1 | 2020-10-21T17:09:28.000Z | 2020-10-21T17:09:28.000Z | import discord
import os
import sys
import datetime
import glob
import tldr
# noinspection SpellCheckingInspection
token: str = "revoked, accidentally made public"
# If you want a custom language, input the language as shown in the tldr pages here.
language = ""
# noinspection PyMethodMayBeStatic
class Client(discord.Client):
async def on_ready(self):
tldr.refresh_cache()
print("tldr-bot is started!")
async def on_message(self, message: discord.Message):
content: str = message.content
channel: discord.TextChannel = message.channel
if content == "!tldrrefresh":
tldr.refresh_cache()
await channel.send("Cache refreshed!")
elif content.startswith("!tldr"):
if content == "!tldros":
embed = discord.Embed(
title="OS Command",
color=0x00FE4D,
description="Request a tldr, prioritizing an OS.\nOptions are `linux`, `windows`, `osx`, and `sunos`."
)
elif content == "!tldrlang":
embed = discord.Embed(
title="Language Command",
color=0x00FE4D,
description="Request a tldr, in a certain language if possible.\nAvailable options are:\n`" + "`, `".join(tldr.languages()) + "`"
)
elif content == "!tldr":
embed = discord.Embed(
title="tldr",
color=0xA930D9,
description="man pages made simple"
)
embed.set_footer(text="Discord bot created by sschr15")
else:
custom_os = False
custom_lang = False
if content.startswith("!tldros"):
split_at = 2
custom_os = True
elif content.startswith("!tldrlang"):
split_at = 2
custom_lang = True
else:
split_at = 1
command = "-".join(content.split(" ")[split_at:])
first = content.split(" ")[1]
embed = tldr.parse(command, first if custom_lang else language, first if custom_os else "common")
await channel.send(embed=embed)
if __name__ == '__main__':
client = Client()
client.run(token)
| 34.710145 | 149 | 0.534864 | 2,018 | 0.842589 | 0 | 0 | 0 | 0 | 1,978 | 0.825887 | 597 | 0.249269 |
a6844d436f3ac8eae3409198b262402806d0a4c7 | 1,022 | py | Python | class3/exercises/exercise3/exercise3.py | EndlessDynamics/Fork_nornir_course | 04bf7e3819659f481a4e04059152877b795177b2 | [
"Apache-2.0"
] | null | null | null | class3/exercises/exercise3/exercise3.py | EndlessDynamics/Fork_nornir_course | 04bf7e3819659f481a4e04059152877b795177b2 | [
"Apache-2.0"
] | null | null | null | class3/exercises/exercise3/exercise3.py | EndlessDynamics/Fork_nornir_course | 04bf7e3819659f481a4e04059152877b795177b2 | [
"Apache-2.0"
] | null | null | null | from nornir import InitNornir
from nornir.core.filter import F
def main():
nr = InitNornir()
print("\nExercise 3a (role AGG)")
print("-" * 20)
agg_devs = nr.filter(F(role__contains="AGG"))
print(agg_devs.inventory.hosts)
print("-" * 20)
print("\nExercise 3b (sea or sfo group)")
print("-" * 20)
union = nr.filter(F(groups__contains="sea") | F(groups__contains="sfo"))
print(union.inventory.hosts)
print("-" * 20)
print("\nExercise 3c (WAN-role and WIFI password 'racecar')")
print("-" * 20)
racecar = nr.filter(
F(site_details__wifi_password__contains="racecar") & F(role="WAN")
)
print(racecar.inventory.hosts)
print("-" * 20)
print("\nExercise 3d (WAN-role and not WIFI password 'racecar')")
print("-" * 20)
not_racecar = nr.filter(
~F(site_details__wifi_password__contains="racecar") & F(role="WAN")
)
print(not_racecar.inventory.hosts)
print("-" * 20)
print()
if __name__ == "__main__":
main()
| 25.55 | 76 | 0.620352 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 249 | 0.24364 |
a6847afb9f867644d48e0218625f0dd5bd6e0330 | 13,700 | py | Python | deprecated/version1/utility.py | kpimparkar/cloudmesh-cloud | cb5ec6c2c8e5eb8c41a697cb67e72183808adb64 | [
"Apache-2.0"
] | null | null | null | deprecated/version1/utility.py | kpimparkar/cloudmesh-cloud | cb5ec6c2c8e5eb8c41a697cb67e72183808adb64 | [
"Apache-2.0"
] | 1 | 2020-10-21T18:15:46.000Z | 2020-10-21T18:15:46.000Z | deprecated/version1/utility.py | kpimparkar/cloudmesh-cloud | cb5ec6c2c8e5eb8c41a697cb67e72183808adb64 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Sep 23 19:15:04 2018
@author: yuluo
"""
import subprocess
class Utility(object):
def __init__(self, debug=False):
"""
initializes the utulity class for awscm
:param debug: enables debug information to be printed
"""
self.debug = debug
self.default_path_aws = '/home/ubuntu/'
def get_instance(self, instance):
"""
get the content of the labeled or named instance
:param instance: the key-value pair of the instance information
:return instance: the detailed value of the instance
"""
title = list(instance.keys())[0]
instance = instance.get(title)
return instance
def copy_file(self, instance, file, where):
# runable for aws
"""
copy the file from local into the instance
:param instance: the instance that we want to access
:param file: the file path that we want to copy to the instance
:param where: the destination of the copied file
:return: "Success" or "Fail"
"""
instance = self.get_instance(instance)
try:
if instance.get('address'):
username = instance.get('address') + "@" + instance.get('credentials').get('username')
key = instance.get('credentials').get('publickey')
subprocess.check_output(["scp", key, file, username + ":" + self.default_path_aws + where])
else:
username = 'ubuntu@' + instance.get('credentials').get('EC2_ACCESS_ID')
key = instance.get('credentials').get('EC2_SECRET_KEY')
subprocess.check_output(["scp", "-i", key, file, username + ":" + self.default_path_aws + where])
return "Success to copy the file " + file + " to " + self.default_path_aws + where
except:
return "Fail to access the instance"
def copy_folder(self, instance, folder, where):
# runable for aws
"""
copy the folder from local into the instance
:param instance: the instance that we want to access
:param folder: the folder path that we want to copy to the instance
:param where: the destination of the copied file
:return: "Success" or "Fail"
"""
instance = self.get_instance(instance)
try:
if instance.get('address'):
username = instance.get('address') + "@" + instance.get('credentials').get('username')
key = instance.get('credentials').get('publickey')
subprocess.check_output(["scp", key, "-r", folder, username + ":" + self.default_path_aws + where])
else:
username = 'ubuntu@' + instance.get('credentials').get('EC2_ACCESS_ID')
key = instance.get('credentials').get('EC2_SECRET_KEY')
subprocess.check_output(
["scp", "-i", key, "-r", folder, username + ":" + self.default_path_aws + where])
return "Success to copy the folder " + folder + " to " + self.default_path_aws + where
except:
return "Fail to access the instance"
def dir_list(self, instance, where):
"""
list objects from the instance directory
:param instance: the instance we want to access
:param where: the directory that we want to view
:return output: the list of objects
"""
instance = self.get_instance(instance)
output = ''
try:
if instance.get('address'):
username = instance.get('address') + "@" + instance.get('credentials').get('username')
key = instance.get('credentials').get('publickey')
output = subprocess.check_output(["ssh", key, username, 'ls', self.default_path_aws + where]).decode(
"utf-8")
else:
username = 'ubuntu@' + instance.get('credentials').get('EC2_ACCESS_ID')
key = instance.get('credentials').get('EC2_SECRET_KEY')
# output = os.popen("ls"+ " | " + "ssh"+ " -i "+ key +" "+ username).read()
output = subprocess.check_output(
["ssh", "-i", key, username, 'ls', self.default_path_aws + where]).decode("utf-8")
return output
except:
return "Fail to access the instance"
def delete_file(self, instance, file, where):
"""
delete the file from the instance
:param instance: the instance that we want to access
:param file: the file name that we want to delete
:param where: the destination of the deleted file
:return: "Success" or "Fail"
"""
instance = self.get_instance(instance)
try:
if instance.get('address'):
username = instance.get('address') + "@" + instance.get('credentials').get('username')
key = instance.get('credentials').get('publickey')
subprocess.check_output(["ssh", key, username, 'rm', self.default_path_aws + where + file])
else:
username = 'ubuntu@' + instance.get('credentials').get('EC2_ACCESS_ID')
key = instance.get('credentials').get('EC2_SECRET_KEY')
# output = os.popen("ls"+ " | " + "ssh"+ " -i "+ key +" "+ username).read()
subprocess.check_output(["ssh", "-i", key, username, 'rm', self.default_path_aws + where + file])
return "Success to delete the file " + file + " from " + self.default_path_aws + where
except:
return "Fail to access the instance"
def delete_folder(self, instance, folder, where):
"""
delete the folder from the instance
:param instance: the instance that we want to access
:param folder: the folder name that we want to delete
:param where: the destination of the deleted folder
:return: "Success" or "Fail"
"""
instance = self.get_instance(instance)
try:
if instance.get('address'):
username = instance.get('address') + "@" + instance.get('credentials').get('username')
key = instance.get('credentials').get('publickey')
subprocess.check_output(["ssh", key, username, 'rm', '-r', self.default_path_aws + where + folder])
else:
username = 'ubuntu@' + instance.get('credentials').get('EC2_ACCESS_ID')
key = instance.get('credentials').get('EC2_SECRET_KEY')
# output = os.popen("ls"+ " | " + "ssh"+ " -i "+ key +" "+ username).read()
subprocess.check_output(
["ssh", "-i", key, username, 'rm', '-r', self.default_path_aws + where + folder])
return "Success to delete the folder " + folder + " from " + self.default_path_aws + where
except:
return "Fail to access the instance"
def create_folder(self, instance, folder, where):
"""
create a folder in the instance
:param instance: the instance that we want to access
:param folder: the name of created folder
:param where: the destination location in the remote instance
:return: "Success" or "Fail"
"""
instance = self.get_instance(instance)
try:
if instance.get('address'):
username = instance.get('address') + "@" + instance.get('credentials').get('username')
key = instance.get('credentials').get('publickey')
subprocess.check_output(["ssh", key, username, 'mkdir', self.default_path_aws + where + folder])
else:
username = 'ubuntu@' + instance.get('credentials').get('EC2_ACCESS_ID')
key = instance.get('credentials').get('EC2_SECRET_KEY')
# output = os.popen("ls"+ " | " + "ssh"+ " -i "+ key +" "+ username).read()
subprocess.check_output(["ssh", "-i", key, username, 'mkdir', self.default_path_aws + where + folder])
return "Success to create the folder " + folder + " in " + self.default_path_aws + where
except:
return "Faile to access the instance"
def read_file(self, instance, file, where):
"""
read file from the instance
:param instance: the instance that we want to access
:param file: the file name that we want to read
:param where: the location of the file in the instance
:return output: the content of file
"""
instance = self.get_instance(instance)
output = ""
try:
if instance.get('address'):
username = instance.get('address') + "@" + instance.get('credentials').get('username')
key = instance.get('credentials').get('publickey')
output = subprocess.check_output(
["ssh", key, username, 'cat', self.default_path_aws + where + file]).decode("utf-8")
else:
username = 'ubuntu@' + instance.get('credentials').get('EC2_ACCESS_ID')
key = instance.get('credentials').get('EC2_SECRET_KEY')
# output = os.popen("ls"+ " | " + "ssh"+ " -i "+ key +" "+ username).read()
output = subprocess.check_output(
["ssh", "-i", key, username, 'cat', self.default_path_aws + where + file]).decode("utf-8")
return output
except:
return "Faile to access the instance"
def download_file(self, instance, file, where, local):
"""
download file from instance to local
:param instance: the instance that we want to access
:param file: the file name that we want to download
:param where: the directory path of the file in the instance
:param local: the local destination that we want to save the file
:return: "Success" or "Fail"
"""
instance = self.get_instance(instance)
try:
if instance.get('address'):
username = instance.get('address') + "@" + instance.get('credentials').get('username')
key = instance.get('credentials').get('publickey')
subprocess.check_output(["scp", key, username + ":" + self.default_path_aws + where + file, local])
else:
username = 'ubuntu@' + instance.get('credentials').get('EC2_ACCESS_ID')
key = instance.get('credentials').get('EC2_SECRET_KEY')
# output = os.popen("ls"+ " | " + "ssh"+ " -i "+ key +" "+ username).read()
subprocess.check_output(
["scp", "-i", key, username + ':' + self.default_path_aws + where + file, local])
return "Success to download file " + self.default_path_aws + where + file + " to " + local
except:
return "Faile to access the instance"
def download_folder(self, instance, folder, where, local):
"""
download folder from instance to local
:param instance: the instance that we want to access
:param folder: the folder name that we want to download
:param where: the directory path of the folder in the instance
:param local: the local destination that we want to save the folder
:return: "Success" or "Fail"
"""
instance = self.get_instance(instance)
try:
if instance.get('address'):
username = instance.get('address') + "@" + instance.get('credentials').get('username')
key = instance.get('credentials').get('publickey')
subprocess.check_output(
["scp", key, '-r', username + ":" + self.default_path_aws + where + folder, local])
else:
username = 'ubuntu@' + instance.get('credentials').get('EC2_ACCESS_ID')
key = instance.get('credentials').get('EC2_SECRET_KEY')
# output = os.popen("ls"+ " | " + "ssh"+ " -i "+ key +" "+ username).read()
subprocess.check_output(
["scp", "-i", key, '-r', username + ':' + self.default_path_aws + where + folder, local])
return "Success to download folder " + self.default_path_aws + where + folder + " to " + local
except:
return "Faile to access the instance"
def check_process(self, instance, process):
"""
check where the process is running or not
:param instance: the instance that we want to access
:param process: the process name
:return output: the information of the running process
"""
instance = self.get_instance(instance)
output = ""
try:
if instance.get('address'):
username = instance.get('address') + "@" + instance.get('credentials').get('username')
key = instance.get('credentials').get('publickey')
output = subprocess.check_output(["ssh", key, username, 'ps', 'aux', '|', 'grep', process]).decode(
"utf-8")
else:
username = 'ubuntu@' + instance.get('credentials').get('EC2_ACCESS_ID')
key = instance.get('credentials').get('EC2_SECRET_KEY')
# output = os.popen("ls"+ " | " + "ssh"+ " -i "+ key +" "+ username).read()
output = subprocess.check_output(
["ssh", '-i', key, username, 'ps', 'aux', '|', 'grep', process]).decode("utf-8")
return output
except:
return "Faile to access the instance"
| 45.51495 | 118 | 0.564161 | 13,572 | 0.990657 | 0 | 0 | 0 | 0 | 0 | 0 | 6,172 | 0.450511 |
a68504f4c7d13cc8c0e0489a51095d8397a4bb6d | 1,370 | gyp | Python | binding.gyp | Ciccio99/electron-overlay-window | f25e29ec1ffe8f4ccb942051a986d217fb36a2b8 | [
"MIT"
] | null | null | null | binding.gyp | Ciccio99/electron-overlay-window | f25e29ec1ffe8f4ccb942051a986d217fb36a2b8 | [
"MIT"
] | null | null | null | binding.gyp | Ciccio99/electron-overlay-window | f25e29ec1ffe8f4ccb942051a986d217fb36a2b8 | [
"MIT"
] | null | null | null | {
'targets': [
{
'target_name': 'overlay_window',
'sources': [
'src/lib/addon.c',
'src/lib/napi_helpers.c'
],
'include_dirs': [
'src/lib'
],
'conditions': [
['OS=="win"', {
'defines': [
'WIN32_LEAN_AND_MEAN'
],
'link_settings': {
'libraries': [
'oleacc.lib'
]
},
'sources': [
'src/lib/windows.c',
]
}],
['OS=="linux"', {
'defines': [
'_GNU_SOURCE'
],
'link_settings': {
'libraries': [
'-lxcb', '-lpthread'
]
},
'cflags': ['-std=c99', '-pedantic', '-Wall', '-pthread'],
'sources': [
'src/lib/x11.c',
]
}],
['OS=="mac"', {
'link_settings': {
'libraries': [
'-lpthread', '-framework AppKit', '-framework ApplicationServices'
]
},
'xcode_settings': {
'OTHER_CFLAGS': [
'-fobjc-arc'
]
},
'cflags': ['-std=c99', '-pedantic', '-Wall', '-pthread'],
'sources': [
'src/lib/mac.mm',
'src/lib/mac/OWFullscreenObserver.mm'
]
}]
]
}
]
}
| 22.459016 | 80 | 0.350365 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 628 | 0.458394 |
a685b99254f1096694c40e8467f047e4febef123 | 5,714 | py | Python | aserializer/fields/serializer_fields.py | orderbird/aserializer | 3aeaa073f2dac7830458a1f45ffa9af6540bd315 | [
"MIT"
] | null | null | null | aserializer/fields/serializer_fields.py | orderbird/aserializer | 3aeaa073f2dac7830458a1f45ffa9af6540bd315 | [
"MIT"
] | null | null | null | aserializer/fields/serializer_fields.py | orderbird/aserializer | 3aeaa073f2dac7830458a1f45ffa9af6540bd315 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from collections import Iterable
from aserializer.utils import py2to3, registry
from aserializer.fields.fields import BaseSerializerField, SerializerFieldValueError
class SerializerObjectField(BaseSerializerField):
def __init__(self, fields=None, exclude=None, *args, **kwargs):
super(SerializerObjectField, self).__init__(*args, **kwargs)
self.only_fields = fields or []
self.exclude = exclude or []
self.unknown_error = None
self.extras = {}
self._serializer_cls = None
@staticmethod
def normalize_serializer_cls(serializer_cls):
if isinstance(serializer_cls, py2to3.string):
serializer_cls = registry.get_serializer(serializer_cls)
return serializer_cls
def get_serializer_cls(self):
return self.normalize_serializer_cls(self._serializer_cls)
def pre_value(self, fields=None, exclude=None, **extras):
if isinstance(fields, (list, tuple, set)):
self.only_fields = set(list(self.only_fields) + list(fields))
if isinstance(exclude, (list, tuple, set)):
self.exclude = set(list(self.exclude) + list(exclude))
self.unknown_error = extras.pop('unknown_error', None)
self.extras = extras
def get_instance(self):
return None
def __get__(self, instance, owner):
if instance is None:
return self
field, field_name = self._get_field_from_instance(instance=instance)
if field:
return field.get_instance()
return self
class SerializerField(SerializerObjectField):
def __init__(self, serializer, *args, **kwargs):
super(SerializerField, self).__init__(*args, **kwargs)
self._serializer_cls = serializer
self._serializer = None
def get_instance(self):
return self._serializer
def validate(self):
if self._serializer:
if not self._serializer.is_valid():
raise SerializerFieldValueError(self._serializer.errors, field_names=self.names)
elif self.required:
raise SerializerFieldValueError(self._error_messages['required'], field_names=self.names)
def set_value(self, value):
if value is None:
self._serializer = None
return
if self._serializer is None:
self._serializer_cls = self.normalize_serializer_cls(self._serializer_cls)
self._serializer = self._serializer_cls(source=value,
fields=self.only_fields,
exclude=self.exclude,
unknown_error=self.unknown_error,
**self.extras)
else:
self._serializer.initial(source=value)
def _to_native(self):
if self._serializer:
return self._serializer.dump()
return None
def _to_python(self):
if self._serializer:
return self._serializer.to_dict()
return None
class ListSerializerField(SerializerObjectField):
error_messages = {
'required': 'This list is empty.',
}
def __init__(self, serializer, sort_by=None, *args, **kwargs):
super(ListSerializerField, self).__init__(*args, **kwargs)
self._serializer_cls = serializer
self.items = []
self._python_items = []
self._native_items = []
self._sort_by = None
if sort_by:
self._sort_by = [sort_by, ] if isinstance(sort_by, py2to3.string) else sort_by
def validate(self):
if self.items:
_errors = []
for item in self.items:
if not item.is_valid():
_errors.append(item.errors)
if _errors:
raise SerializerFieldValueError(_errors)
elif self.required:
raise SerializerFieldValueError(self._error_messages['required'], field_names=self.names)
def get_instance(self):
return self.items
def add_item(self, source):
self._serializer_cls = self.normalize_serializer_cls(self._serializer_cls)
_serializer = self._serializer_cls(source=source,
fields=self.only_fields,
exclude=self.exclude,
unknown_error=self.unknown_error,
**self.extras)
self.items.append(_serializer)
def set_value(self, value):
self.items[:] = []
self._native_items[:] = []
self._python_items[:] = []
if isinstance(value, Iterable):
for item in value:
self.add_item(source=item)
def _to_native(self):
if not self._native_items:
for item in self.items:
self._native_items.append(item.dump())
if self._sort_by:
self._native_items = sorted(self._native_items,
key=lambda item: [item.get(k, None) for k in self._sort_by])
return self._native_items
def _to_python(self):
if not self._python_items:
for item in self.items:
self._python_items.append(item.to_dict())
# TODO: what about deserialization? do we want/need sorting here as well or do we trust the order of items from json?
# if self._sort_by:
# return sorted(unsorted,
# key=lambda item: [getattr(item, k, None) for k in self._sort_by])
return self._python_items
| 36.628205 | 129 | 0.593455 | 5,514 | 0.964998 | 0 | 0 | 216 | 0.037802 | 0 | 0 | 339 | 0.059328 |
a6891d269483be6ec34924e0b2a724eb2aa0420b | 29,297 | py | Python | autotest/ogr/ogr_gpkg.py | NathanW2/gdal | a5fc0fa500765f484b497d23ec5459176837e422 | [
"MIT"
] | 2 | 2015-07-24T16:16:34.000Z | 2015-07-24T16:16:37.000Z | autotest/ogr/ogr_gpkg.py | samalone/gdal-ios | beed159503ce550c4e09edb25c168c8344e8998c | [
"MIT"
] | null | null | null | autotest/ogr/ogr_gpkg.py | samalone/gdal-ios | beed159503ce550c4e09edb25c168c8344e8998c | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# $Id$
#
# Project: GDAL/OGR Test Suite
# Purpose: Test GeoPackage driver functionality.
# Author: Paul Ramsey <pramsey@boundlessgeom.com>
#
###############################################################################
# Copyright (c) 2004, Paul Ramsey <pramsey@boundlessgeom.com>
# Copyright (c) 2014, Even Rouault <even dot rouault at mines-paris dot org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import os
import sys
import string
import shutil
# Make sure we run from the directory of the script
if os.path.basename(sys.argv[0]) == os.path.basename(__file__):
if os.path.dirname(sys.argv[0]) != '':
os.chdir(os.path.dirname(sys.argv[0]))
sys.path.append( '../pymod' )
from osgeo import ogr, osr, gdal
import gdaltest
import ogrtest
###############################################################################
# Create a fresh database.
def ogr_gpkg_1():
gdaltest.gpkg_ds = None
gdaltest.gpkg_dr = None
try:
gdaltest.gpkg_dr = ogr.GetDriverByName( 'GPKG' )
if gdaltest.gpkg_dr is None:
return 'skip'
except:
return 'skip'
try:
os.remove( 'tmp/gpkg_test.gpkg' )
except:
pass
# This is to speed-up the runtime of tests on EXT4 filesystems
# Do not use this for production environment if you care about data safety
# w.r.t system/OS crashes, unless you know what you are doing.
gdal.SetConfigOption('OGR_SQLITE_SYNCHRONOUS', 'OFF')
gdaltest.gpkg_ds = gdaltest.gpkg_dr.CreateDataSource( 'tmp/gpkg_test.gpkg' )
if gdaltest.gpkg_ds is not None:
return 'success'
else:
return 'fail'
gdaltest.gpkg_ds.Destroy()
###############################################################################
# Re-open database to test validity
def ogr_gpkg_2():
if gdaltest.gpkg_dr is None:
return 'skip'
gdaltest.gpkg_ds = gdaltest.gpkg_dr.Open( 'tmp/gpkg_test.gpkg', update = 1 )
if gdaltest.gpkg_ds is not None:
return 'success'
else:
return 'fail'
###############################################################################
# Create a layer
def ogr_gpkg_3():
if gdaltest.gpkg_dr is None or gdaltest.gpkg_ds is None:
return 'skip'
# Test invalid FORMAT
#gdal.PushErrorHandler('CPLQuietErrorHandler')
srs4326 = osr.SpatialReference()
srs4326.ImportFromEPSG( 4326 )
lyr = gdaltest.gpkg_ds.CreateLayer( 'first_layer', geom_type = ogr.wkbPoint, srs = srs4326)
#gdal.PopErrorHandler()
if lyr is None:
return 'fail'
# Test creating a layer with an existing name
gdal.PushErrorHandler('CPLQuietErrorHandler')
lyr = gdaltest.gpkg_ds.CreateLayer( 'a_layer')
lyr = gdaltest.gpkg_ds.CreateLayer( 'a_layer' )
gdal.PopErrorHandler()
if lyr is not None:
gdaltest.post_reason('layer creation should have failed')
return 'fail'
return 'success'
###############################################################################
# Close and re-open to test the layer registration
def ogr_gpkg_4():
if gdaltest.gpkg_dr is None or gdaltest.gpkg_ds is None:
return 'skip'
gdaltest.gpkg_ds.Destroy()
gdal.PushErrorHandler('CPLQuietErrorHandler')
gdaltest.gpkg_ds = gdaltest.gpkg_dr.Open( 'tmp/gpkg_test.gpkg', update = 1 )
gdal.PopErrorHandler()
if gdaltest.gpkg_ds is None:
return 'fail'
if gdaltest.gpkg_ds.GetLayerCount() != 2:
gdaltest.post_reason( 'unexpected number of layers' )
return 'fail'
lyr0 = gdaltest.gpkg_ds.GetLayer(0)
lyr1 = gdaltest.gpkg_ds.GetLayer(1)
if lyr0.GetName() != 'first_layer':
gdaltest.post_reason( 'unexpected layer name for layer 0' )
return 'fail'
if lyr1.GetName() != 'a_layer':
gdaltest.post_reason( 'unexpected layer name for layer 1' )
return 'fail'
return 'success'
###############################################################################
# Delete a layer
def ogr_gpkg_5():
if gdaltest.gpkg_dr is None or gdaltest.gpkg_ds is None:
return 'skip'
if gdaltest.gpkg_ds.GetLayerCount() != 2:
gdaltest.post_reason( 'unexpected number of layers' )
return 'fail'
if gdaltest.gpkg_ds.DeleteLayer(1) != 0:
gdaltest.post_reason( 'got error code from DeleteLayer(1)' )
return 'fail'
if gdaltest.gpkg_ds.DeleteLayer(0) != 0:
gdaltest.post_reason( 'got error code from DeleteLayer(0)' )
return 'fail'
if gdaltest.gpkg_ds.GetLayerCount() != 0:
gdaltest.post_reason( 'unexpected number of layers (not 0)' )
return 'fail'
return 'success'
###############################################################################
# Add fields
def ogr_gpkg_6():
if gdaltest.gpkg_dr is None or gdaltest.gpkg_ds is None:
return 'skip'
srs4326 = osr.SpatialReference()
srs4326.ImportFromEPSG( 4326 )
lyr = gdaltest.gpkg_ds.CreateLayer( 'field_test_layer', geom_type = ogr.wkbPoint, srs = srs4326)
if lyr is None:
return 'fail'
field_defn = ogr.FieldDefn('dummy', ogr.OFTString)
ret = lyr.CreateField(field_defn)
if lyr.GetLayerDefn().GetFieldDefn(0).GetType() != ogr.OFTString:
gdaltest.post_reason( 'wrong field type' )
return 'fail'
gdaltest.gpkg_ds.Destroy()
gdal.PushErrorHandler('CPLQuietErrorHandler')
gdaltest.gpkg_ds = gdaltest.gpkg_dr.Open( 'tmp/gpkg_test.gpkg', update = 1 )
gdal.PopErrorHandler()
if gdaltest.gpkg_ds is None:
return 'fail'
if gdaltest.gpkg_ds.GetLayerCount() != 1:
return 'fail'
lyr = gdaltest.gpkg_ds.GetLayer(0)
if lyr.GetName() != 'field_test_layer':
return 'fail'
field_defn_out = lyr.GetLayerDefn().GetFieldDefn(0)
if field_defn_out.GetType() != ogr.OFTString:
gdaltest.post_reason( 'wrong field type after reopen' )
return 'fail'
if field_defn_out.GetName() != 'dummy':
gdaltest.post_reason( 'wrong field name after reopen' )
return 'fail'
return 'success'
###############################################################################
# Add a feature / read a feature / delete a feature
def ogr_gpkg_7():
if gdaltest.gpkg_dr is None or gdaltest.gpkg_ds is None:
return 'skip'
lyr = gdaltest.gpkg_ds.GetLayerByName('field_test_layer')
geom = ogr.CreateGeometryFromWkt('POINT(10 10)')
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetGeometry(geom)
feat.SetField('dummy', 'a dummy value')
if lyr.CreateFeature(feat) != 0:
gdaltest.post_reason('cannot create feature')
return 'fail'
# Read back what we just inserted
lyr.ResetReading()
feat_read = lyr.GetNextFeature()
if feat_read.GetField('dummy') != 'a dummy value':
gdaltest.post_reason('output does not match input')
return 'fail'
# Only inserted one thing, so second feature should return NULL
feat_read = lyr.GetNextFeature()
if feat_read is not None:
gdaltest.post_reason('last call should return NULL')
return 'fail'
# Add another feature
geom = ogr.CreateGeometryFromWkt('POINT(100 100)')
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetGeometry(geom)
feat.SetField('dummy', 'who you calling a dummy?')
if lyr.CreateFeature(feat) != 0:
gdaltest.post_reason('cannot create feature')
return 'fail'
# Random read a feature
feat_read_random = lyr.GetFeature(feat.GetFID())
if feat_read_random.GetField('dummy') != 'who you calling a dummy?':
gdaltest.post_reason('random read output does not match input')
return 'fail'
# Random write a feature
feat.SetField('dummy', 'i am no dummy')
lyr.SetFeature(feat)
feat_read_random = lyr.GetFeature(feat.GetFID())
if feat_read_random.GetField('dummy') != 'i am no dummy':
gdaltest.post_reason('random read output does not match random write input')
return 'fail'
# Delete a feature
lyr.DeleteFeature(feat.GetFID())
if lyr.GetFeatureCount() != 1:
gdaltest.post_reason('delete feature did not delete')
return 'fail'
# Delete the layer
if gdaltest.gpkg_ds.DeleteLayer('field_test_layer') != 0:
gdaltest.post_reason( 'got error code from DeleteLayer(field_test_layer)' )
return 'success'
###############################################################################
# Test a variety of geometry feature types and attribute types
def ogr_gpkg_8():
# try:
# os.remove( 'tmp/gpkg_test.gpkg' )
# except:
# pass
# gdaltest.gpkg_dr = ogr.GetDriverByName( 'GPKG' )
# gdaltest.gpkg_ds = gdaltest.gpkg_dr.CreateDataSource( 'tmp/gpkg_test.gpkg' )
if gdaltest.gpkg_dr is None or gdaltest.gpkg_ds is None:
return 'skip'
srs = osr.SpatialReference()
# Test a non-default SRS
srs.ImportFromEPSG( 32631 )
lyr = gdaltest.gpkg_ds.CreateLayer( 'tbl_linestring', geom_type = ogr.wkbLineString, srs = srs)
if lyr is None:
return 'fail'
lyr.StartTransaction()
ret = lyr.CreateField(ogr.FieldDefn('fld_integer', ogr.OFTInteger))
ret = lyr.CreateField(ogr.FieldDefn('fld_string', ogr.OFTString))
ret = lyr.CreateField(ogr.FieldDefn('fld_real', ogr.OFTReal))
ret = lyr.CreateField(ogr.FieldDefn('fld_date', ogr.OFTDate))
ret = lyr.CreateField(ogr.FieldDefn('fld_datetime', ogr.OFTDateTime))
ret = lyr.CreateField(ogr.FieldDefn('fld_binary', ogr.OFTBinary))
geom = ogr.CreateGeometryFromWkt('LINESTRING(5 5,10 5,10 10,5 10)')
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetGeometry(geom)
for i in range(10):
feat.SetField('fld_integer', 10 + i)
feat.SetField('fld_real', 3.14159/(i+1) )
feat.SetField('fld_string', 'test string %d test' % i)
feat.SetField('fld_date', '2014/05/17 ' )
feat.SetField('fld_datetime', '2014/05/17 12:34:56' )
feat.SetFieldBinaryFromHexString('fld_binary', 'fffe' )
if lyr.CreateFeature(feat) != 0:
gdaltest.post_reason('cannot create feature %d' % i)
return 'fail'
lyr.CommitTransaction()
feat = ogr.Feature(lyr.GetLayerDefn())
if lyr.CreateFeature(feat) != 0:
gdaltest.post_reason('cannot insert empty')
return 'fail'
feat.SetFID(6)
if lyr.SetFeature(feat) != 0:
gdaltest.post_reason('cannot update with empty')
return 'fail'
gdaltest.gpkg_ds = None
gdaltest.gpkg_ds = gdaltest.gpkg_dr.Open( 'tmp/gpkg_test.gpkg', update = 1 )
lyr = gdaltest.gpkg_ds.GetLayerByName('tbl_linestring')
feat = lyr.GetNextFeature()
if feat.GetField(0) != 10 or feat.GetField(1) != 'test string 0 test' or \
feat.GetField(2) != 3.14159 or feat.GetField(3) != '2014/05/17' or \
feat.GetField(4) != '2014/05/17 12:34:56' or feat.GetField(5) != 'FFFE':
gdaltest.post_reason('fail')
feat.DumpReadable()
return 'fail'
lyr = gdaltest.gpkg_ds.CreateLayer( 'tbl_polygon', geom_type = ogr.wkbPolygon, srs = srs)
if lyr is None:
return 'fail'
lyr.StartTransaction()
ret = lyr.CreateField(ogr.FieldDefn('fld_datetime', ogr.OFTDateTime))
ret = lyr.CreateField(ogr.FieldDefn('fld_string', ogr.OFTString))
geom = ogr.CreateGeometryFromWkt('POLYGON((5 5, 10 5, 10 10, 5 10, 5 5),(6 6, 6 7, 7 7, 7 6, 6 6))')
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetGeometry(geom)
for i in range(10):
feat.SetField('fld_string', 'my super string %d' % i)
feat.SetField('fld_datetime', '2010-01-01' )
if lyr.CreateFeature(feat) != 0:
gdaltest.post_reason('cannot create polygon feature %d' % i)
return 'fail'
lyr.CommitTransaction()
feat = lyr.GetFeature(3)
geom_read = feat.GetGeometryRef()
if geom.ExportToWkt() != geom_read.ExportToWkt():
gdaltest.post_reason('geom output not equal to geom input')
return 'fail'
# Test out the 3D support...
lyr = gdaltest.gpkg_ds.CreateLayer( 'tbl_polygon25d', geom_type = ogr.wkbPolygon25D, srs = srs)
if lyr is None:
return 'fail'
ret = lyr.CreateField(ogr.FieldDefn('fld_string', ogr.OFTString))
geom = ogr.CreateGeometryFromWkt('POLYGON((5 5 1, 10 5 2, 10 10 3, 5 104 , 5 5 1),(6 6 4, 6 7 5, 7 7 6, 7 6 7, 6 6 4))')
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetGeometry(geom)
lyr.CreateFeature(feat)
lyr.ResetReading()
feat = lyr.GetNextFeature()
geom_read = feat.GetGeometryRef()
if geom.ExportToWkt() != geom_read.ExportToWkt():
gdaltest.post_reason('3d geom output not equal to geom input')
return 'fail'
return 'success'
###############################################################################
# Test support for extents and counts
def ogr_gpkg_9():
if gdaltest.gpkg_dr is None or gdaltest.gpkg_ds is None:
return 'skip'
lyr = gdaltest.gpkg_ds.GetLayerByName('tbl_linestring')
extent = lyr.GetExtent()
if extent != (5.0, 10.0, 5.0, 10.0):
gdaltest.post_reason('got bad extent')
print(extent)
return 'fail'
fcount = lyr.GetFeatureCount()
if fcount != 11:
gdaltest.post_reason('got bad featurecount')
print(fcount)
return 'fail'
return 'success'
###############################################################################
# Test non-SELECT SQL commands
def ogr_gpkg_11():
if gdaltest.gpkg_dr is None:
return 'skip'
gdaltest.gpkg_ds = None
gdaltest.gpkg_ds = ogr.Open('tmp/gpkg_test.gpkg', update = 1)
gdaltest.gpkg_ds.ExecuteSQL('CREATE INDEX tbl_linestring_fld_integer_idx ON tbl_linestring(fld_integer)')
gdaltest.gpkg_ds.ExecuteSQL('ALTER TABLE tbl_linestring RENAME TO tbl_linestring_renamed')
gdaltest.gpkg_ds.ExecuteSQL('VACUUM')
gdaltest.gpkg_ds = None
gdaltest.gpkg_ds = ogr.Open('tmp/gpkg_test.gpkg', update = 1)
lyr = gdaltest.gpkg_ds.GetLayerByName('tbl_linestring_renamed')
if lyr is None:
return 'fail'
lyr.SetAttributeFilter('fld_integer = 10')
if lyr.GetFeatureCount() != 1:
return 'fail'
return 'success'
###############################################################################
# Test SELECT SQL commands
def ogr_gpkg_12():
if gdaltest.gpkg_dr is None:
return 'skip'
sql_lyr = gdaltest.gpkg_ds.ExecuteSQL('SELECT * FROM tbl_linestring_renamed')
if sql_lyr.GetFIDColumn() != 'fid':
gdaltest.post_reason('fail')
return 'fail'
if sql_lyr.GetGeomType() != ogr.wkbLineString:
gdaltest.post_reason('fail')
return 'fail'
if sql_lyr.GetGeometryColumn() != 'geom':
gdaltest.post_reason('fail')
return 'fail'
if sql_lyr.GetSpatialRef().ExportToWkt().find('32631') < 0:
gdaltest.post_reason('fail')
return 'fail'
feat = sql_lyr.GetNextFeature()
if feat.GetFID() != 1:
gdaltest.post_reason('fail')
return 'fail'
if sql_lyr.GetFeatureCount() != 11:
gdaltest.post_reason('fail')
return 'fail'
if sql_lyr.GetLayerDefn().GetFieldCount() != 6:
gdaltest.post_reason('fail')
return 'fail'
gdaltest.gpkg_ds.ReleaseResultSet(sql_lyr)
sql_lyr = gdaltest.gpkg_ds.ExecuteSQL('SELECT * FROM tbl_linestring_renamed WHERE 0=1')
feat = sql_lyr.GetNextFeature()
if feat is not None:
gdaltest.post_reason('fail')
return 'fail'
gdaltest.gpkg_ds.ReleaseResultSet(sql_lyr)
for sql in [ 'SELECT * FROM tbl_linestring_renamed LIMIT 1',
'SELECT * FROM tbl_linestring_renamed ORDER BY fld_integer LIMIT 1',
'SELECT * FROM tbl_linestring_renamed UNION ALL SELECT * FROM tbl_linestring_renamed ORDER BY fld_integer LIMIT 1' ]:
sql_lyr = gdaltest.gpkg_ds.ExecuteSQL(sql)
feat = sql_lyr.GetNextFeature()
if feat is None:
gdaltest.post_reason('fail')
return 'fail'
feat = sql_lyr.GetNextFeature()
if feat is not None:
gdaltest.post_reason('fail')
return 'fail'
if sql_lyr.GetFeatureCount() != 1:
gdaltest.post_reason('fail')
return 'fail'
gdaltest.gpkg_ds.ReleaseResultSet(sql_lyr)
sql_lyr = gdaltest.gpkg_ds.ExecuteSQL('SELECT sqlite_version()')
feat = sql_lyr.GetNextFeature()
if feat is None:
gdaltest.post_reason('fail')
return 'fail'
if sql_lyr.GetLayerDefn().GetFieldCount() != 1:
gdaltest.post_reason('fail')
return 'fail'
if sql_lyr.GetLayerDefn().GetGeomFieldCount() != 0:
gdaltest.post_reason('fail')
return 'fail'
gdaltest.gpkg_ds.ReleaseResultSet(sql_lyr)
return 'success'
###############################################################################
# Test non-spatial tables
def ogr_gpkg_13():
if gdaltest.gpkg_dr is None:
return 'skip'
lyr = gdaltest.gpkg_ds.CreateLayer('non_spatial', geom_type = ogr.wkbNone )
feat = ogr.Feature(lyr.GetLayerDefn())
lyr.CreateFeature(feat)
feat = None
lyr.CreateField(ogr.FieldDefn('fld_integer', ogr.OFTInteger))
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetField('fld_integer', 1)
lyr.CreateFeature(feat)
feat = None
lyr.ResetReading()
feat = lyr.GetNextFeature()
if feat.IsFieldSet('fld_integer'):
feat.DumpReadable()
gdaltest.post_reason('fail')
return 'fail'
feat = lyr.GetNextFeature()
if feat.GetField('fld_integer') != 1:
feat.DumpReadable()
gdaltest.post_reason('fail')
return 'fail'
gdaltest.gpkg_ds = None
gdaltest.gpkg_ds = ogr.Open('tmp/gpkg_test.gpkg', update = 1)
if gdaltest.gpkg_ds.GetLayerCount() != 4:
gdaltest.post_reason('fail')
return 'fail'
lyr = gdaltest.gpkg_ds.GetLayer('non_spatial')
if lyr.GetGeomType() != ogr.wkbNone:
gdaltest.post_reason('fail')
return 'fail'
feat = lyr.GetNextFeature()
if feat.IsFieldSet('fld_integer'):
gdaltest.post_reason('fail')
return 'fail'
feat = lyr.GetNextFeature()
if feat.GetField('fld_integer') != 1:
feat.DumpReadable()
gdaltest.post_reason('fail')
return 'fail'
return 'success'
###############################################################################
# Add various geometries to test spatial filtering
def ogr_gpkg_14():
if gdaltest.gpkg_dr is None:
return 'skip'
sr = osr.SpatialReference()
sr.ImportFromEPSG(32631)
lyr = gdaltest.gpkg_ds.CreateLayer('point_no_spi-but-with-dashes', geom_type = ogr.wkbPoint, options = ['SPATIAL_INDEX=NO'], srs = sr )
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetGeometry(ogr.CreateGeometryFromWkt('POINT(1000 30000000)'))
lyr.CreateFeature(feat)
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetGeometry(ogr.CreateGeometryFromWkt('POINT(-1000 30000000)'))
lyr.CreateFeature(feat)
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetGeometry(ogr.CreateGeometryFromWkt('POINT(1000 -30000000)'))
lyr.CreateFeature(feat)
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetGeometry(ogr.CreateGeometryFromWkt('POINT(-1000 -30000000)'))
lyr.CreateFeature(feat)
lyr = gdaltest.gpkg_ds.CreateLayer('point-with-spi-and-dashes', geom_type = ogr.wkbPoint )
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetGeometry(ogr.CreateGeometryFromWkt('POINT(1000 30000000)'))
lyr.CreateFeature(feat)
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetGeometry(ogr.CreateGeometryFromWkt('POINT(-1000 30000000)'))
lyr.CreateFeature(feat)
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetGeometry(ogr.CreateGeometryFromWkt('POINT(1000 -30000000)'))
lyr.CreateFeature(feat)
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetGeometry(ogr.CreateGeometryFromWkt('POINT(-1000 -30000000)'))
lyr.CreateFeature(feat)
return 'success'
###############################################################################
# Test SQL functions
def ogr_gpkg_15():
if gdaltest.gpkg_dr is None:
return 'skip'
sql_lyr = gdaltest.gpkg_ds.ExecuteSQL(
'SELECT ST_IsEmpty(geom), ST_SRID(geom), ST_GeometryType(geom), ' + \
'ST_MinX(geom), ST_MinY(geom), ST_MaxX(geom), ST_MaxY(geom) FROM \"point_no_spi-but-with-dashes\" WHERE fid = 1')
feat = sql_lyr.GetNextFeature()
if feat.GetField(0) != 0 or feat.GetField(1) != 32631 or \
feat.GetField(2) != 'POINT' or \
feat.GetField(3) != 1000 or feat.GetField(4) != 30000000 or \
feat.GetField(5) != 1000 or feat.GetField(6) != 30000000:
feat.DumpReadable()
gdaltest.post_reason('fail')
return 'fail'
gdaltest.gpkg_ds.ReleaseResultSet(sql_lyr)
sql_lyr = gdaltest.gpkg_ds.ExecuteSQL(
'SELECT ST_IsEmpty(geom), ST_SRID(geom), ST_GeometryType(geom), ' + \
'ST_MinX(geom), ST_MinY(geom), ST_MaxX(geom), ST_MaxY(geom) FROM tbl_linestring_renamed WHERE geom IS NULL')
feat = sql_lyr.GetNextFeature()
if feat.IsFieldSet(0) or feat.IsFieldSet(1) or feat.IsFieldSet(2) or \
feat.IsFieldSet(3) or feat.IsFieldSet(4) or feat.IsFieldSet(5) or feat.IsFieldSet(6):
feat.DumpReadable()
gdaltest.post_reason('fail')
return 'fail'
gdaltest.gpkg_ds.ReleaseResultSet(sql_lyr)
for (expected_type, actual_type, expected_result) in [
('POINT', 'POINT', 1),
('LINESTRING', 'POINT', 0),
('GEOMETRY', 'POINT', 1),
('POINT', 'GEOMETRY', 0),
('GEOMETRYCOLLECTION', 'MULTIPOINT', 1),
('GEOMETRYCOLLECTION', 'POINT', 0) ]:
sql_lyr = gdaltest.gpkg_ds.ExecuteSQL("SELECT GPKG_IsAssignable('%s', '%s')" % (expected_type, actual_type))
feat = sql_lyr.GetNextFeature()
got_result = feat.GetField(0)
gdaltest.gpkg_ds.ReleaseResultSet(sql_lyr)
if got_result != expected_result:
print("expected_type=%s actual_type=%s expected_result=%d got_result=%d" % (expected_type, actual_type, expected_result, got_result))
gdaltest.post_reason('fail')
return 'fail'
for (sql, expected_result) in [
("SELECT DisableSpatialIndex('point-with-spi-and-dashes', 'geom')", 1),
("SELECT DisableSpatialIndex('point-with-spi-and-dashes', 'geom')", 0),
("SELECT CreateSpatialIndex('point-with-spi-and-dashes', 'geom')", 1),
("SELECT CreateSpatialIndex('point-with-spi-and-dashes', 'geom')", 0),
("SELECT CreateSpatialIndex('point-with-spi-and-dashes', NULL)", 0),
("SELECT CreateSpatialIndex(NULL, 'geom')", 0),
("SELECT CreateSpatialIndex('bla', 'geom')", 0),
("SELECT CreateSpatialIndex('point-with-spi-and-dashes', 'bla')", 0),
("SELECT DisableSpatialIndex('point-with-spi-and-dashes', NULL)", 0),
("SELECT DisableSpatialIndex(NULL, 'geom')", 0),
("SELECT DisableSpatialIndex('bla', 'geom')", 0),
("SELECT DisableSpatialIndex('point-with-spi-and-dashes', 'bla')", 0),
("SELECT CreateSpatialIndex('non_spatial', '')", 0),
("SELECT CreateSpatialIndex('point_no_spi-but-with-dashes', 'geom')", 1),
# Final DisableSpatialIndex: will be effectively deleted at dataset closing
("SELECT DisableSpatialIndex('point_no_spi-but-with-dashes', 'geom')", 1),
]:
if expected_result == 0:
gdal.PushErrorHandler('CPLQuietErrorHandler')
sql_lyr = gdaltest.gpkg_ds.ExecuteSQL(sql)
if expected_result == 0:
gdal.PopErrorHandler()
feat = sql_lyr.GetNextFeature()
got_result = feat.GetField(0)
gdaltest.gpkg_ds.ReleaseResultSet(sql_lyr)
if got_result != expected_result:
print(sql)
gdaltest.post_reason('fail')
return 'fail'
gdaltest.gpkg_ds = None
gdaltest.gpkg_ds = gdaltest.gpkg_dr.Open( 'tmp/gpkg_test.gpkg', update = 1 )
return 'success'
###############################################################################
# Test unknown extensions
def ogr_gpkg_16():
if gdaltest.gpkg_dr is None:
return 'skip'
ds = gdaltest.gpkg_dr.CreateDataSource('/vsimem/ogr_gpk_16.gpkg')
lyr = ds.CreateLayer('foo')
sql_lyr = ds.ExecuteSQL("INSERT INTO gpkg_extensions ( table_name, column_name, " + \
"extension_name, definition, scope ) VALUES ( 'foo', 'geom', 'myext', 'some ext', 'write-only' ) ")
ds = None
# No warning since we open as read-only
ds = ogr.Open('/vsimem/ogr_gpk_16.gpkg')
ds = None
# Warning since we open as read-write
gdal.PushErrorHandler('CPLQuietErrorHandler')
ds = ogr.Open('/vsimem/ogr_gpk_16.gpkg', update = 1)
gdal.PopErrorHandler()
if gdal.GetLastErrorMsg() == '':
gdaltest.post_reason('fail : warning expected')
return 'fail'
sql_lyr = ds.ExecuteSQL("UPDATE gpkg_extensions SET scope = 'read-write' WHERE extension_name = 'myext'")
ds = None
# Warning since we open as read-only
gdal.PushErrorHandler('CPLQuietErrorHandler')
ds = ogr.Open('/vsimem/ogr_gpk_16.gpkg')
gdal.PopErrorHandler()
if gdal.GetLastErrorMsg() == '':
gdaltest.post_reason('fail : warning expected')
return 'fail'
# and also as read-write
gdal.PushErrorHandler('CPLQuietErrorHandler')
ds = ogr.Open('/vsimem/ogr_gpk_16.gpkg', update = 1)
gdal.PopErrorHandler()
if gdal.GetLastErrorMsg() == '':
gdaltest.post_reason('fail : warning expected')
return 'fail'
ds = None
gdal.Unlink('/vsimem/ogr_gpk_16.gpkg')
return 'success'
###############################################################################
# Run test_ogrsf
def ogr_gpkg_test_ogrsf():
if gdaltest.gpkg_dr is None:
return 'skip'
# Do integrity check first
sql_lyr = gdaltest.gpkg_ds.ExecuteSQL("PRAGMA integrity_check")
feat = sql_lyr.GetNextFeature()
if feat.GetField(0) != 'ok':
gdaltest.post_reason('integrity check failed')
return 'fail'
gdaltest.gpkg_ds.ReleaseResultSet(sql_lyr)
import test_cli_utilities
if test_cli_utilities.get_test_ogrsf_path() is None:
return 'skip'
gdaltest.gpkg_ds = None
#sys.exit(0)
ret = gdaltest.runexternal(test_cli_utilities.get_test_ogrsf_path() + ' tmp/gpkg_test.gpkg --config OGR_SQLITE_SYNCHRONOUS OFF')
if ret.find('INFO') == -1 or ret.find('ERROR') != -1:
gdaltest.post_reason('fail')
print(ret)
return 'fail'
ret = gdaltest.runexternal(test_cli_utilities.get_test_ogrsf_path() + ' tmp/gpkg_test.gpkg -sql "select * from tbl_linestring_renamed" --config OGR_SQLITE_SYNCHRONOUS OFF')
if ret.find('INFO') == -1 or ret.find('ERROR') != -1:
gdaltest.post_reason('fail')
print(ret)
return 'fail'
return 'success'
###############################################################################
# Remove the test db from the tmp directory
def ogr_gpkg_cleanup():
if gdaltest.gpkg_dr is None:
return 'skip'
gdaltest.gpkg_ds = None
try:
os.remove( 'tmp/gpkg_test.gpkg' )
except:
pass
return 'success'
###############################################################################
gdaltest_list = [
ogr_gpkg_1,
ogr_gpkg_2,
ogr_gpkg_3,
ogr_gpkg_4,
ogr_gpkg_5,
ogr_gpkg_6,
ogr_gpkg_7,
ogr_gpkg_8,
ogr_gpkg_9,
ogr_gpkg_11,
ogr_gpkg_12,
ogr_gpkg_13,
ogr_gpkg_14,
ogr_gpkg_15,
ogr_gpkg_16,
ogr_gpkg_test_ogrsf,
ogr_gpkg_cleanup,
]
if __name__ == '__main__':
gdaltest.setup_run( 'ogr_gpkg' )
gdaltest.run_tests( gdaltest_list )
gdaltest.summarize()
| 34.225467 | 176 | 0.617708 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 10,787 | 0.368195 |
a689a5d6a9c1f320fa1c6a898b3f880278371c7b | 769 | py | Python | directory/models/directory_access_group.py | darkismus/kompassi | 35dea2c7af2857a69cae5c5982b48f01ba56da1f | [
"CC-BY-3.0"
] | 13 | 2015-11-29T12:19:12.000Z | 2021-02-21T15:42:11.000Z | directory/models/directory_access_group.py | darkismus/kompassi | 35dea2c7af2857a69cae5c5982b48f01ba56da1f | [
"CC-BY-3.0"
] | 23 | 2015-04-29T19:43:34.000Z | 2021-02-10T05:50:17.000Z | directory/models/directory_access_group.py | darkismus/kompassi | 35dea2c7af2857a69cae5c5982b48f01ba56da1f | [
"CC-BY-3.0"
] | 11 | 2015-09-20T18:59:00.000Z | 2020-02-07T08:47:34.000Z | from django.db import models
from django.utils.translation import ugettext_lazy as _
class DirectoryAccessGroup(models.Model):
"""
Grants expiring group access to the personnel directory.
"""
organization = models.ForeignKey('core.Organization', on_delete=models.CASCADE)
group = models.ForeignKey('auth.Group', on_delete=models.CASCADE)
active_from = models.DateTimeField(blank=True, null=True)
active_until = models.DateTimeField(blank=True, null=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
verbose_name = _('directory access group')
verbose_name_plural = _('directory access groups')
ordering = ('organization', 'group')
| 34.954545 | 83 | 0.73212 | 681 | 0.885566 | 0 | 0 | 0 | 0 | 0 | 0 | 173 | 0.224967 |
a68a59e9017053930697ebb496bf811bd8da3fd6 | 3,219 | py | Python | @utils/parser/templating/technology.py | RogulinSV/stellaris-aoe | 120d9114059b5e744c4025966e1f4b50e1d76200 | [
"Unlicense"
] | null | null | null | @utils/parser/templating/technology.py | RogulinSV/stellaris-aoe | 120d9114059b5e744c4025966e1f4b50e1d76200 | [
"Unlicense"
] | null | null | null | @utils/parser/templating/technology.py | RogulinSV/stellaris-aoe | 120d9114059b5e744c4025966e1f4b50e1d76200 | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
from string import Template
from domains import Module, Technology
class GatheringTechnologyDataTemplate(object):
def __init__(self):
self.__technologies = list()
self.__areas = set()
self.__categories = set()
self.__templates = dict()
self.__compiled = dict()
self.__templates['dropper'] = Template('''\
set_variable = {
which = "espionage_data_technology_count_$tag"
value = 0
}
''')
self.__templates['prefix'] = Template('''\
# Module $module: $count technologies discovered
''')
self.__templates['limiter'] = Template('''\
if = {
limit = {
has_technology = "$name"
}
$modifiers
}
''')
self.__templates['modifier'] = Template('''\
change_variable = {
which = "espionage_data_technology_count_$tag"
value = 1
}\
''')
def process(self, technology: Technology, module: Module):
if technology.name in self.__technologies:
logging.info('... technology %s from module %s skipped' % (technology, module))
return
self.__technologies.append(technology.name)
self.__areas.add(technology.area)
for category in technology.categories:
self.__categories.add(category)
modifiers = list()
modifiers.append(self.__templates['modifier'].substitute(tag='area_' + technology.area))
if technology.is_dangerous:
modifiers.append(self.__templates['modifier'].substitute(tag='type_dangerous'))
elif technology.is_rare:
modifiers.append(self.__templates['modifier'].substitute(tag='type_rare'))
for category in technology.categories:
modifiers.append(self.__templates['modifier'].substitute(tag='category_' + category))
if len(modifiers) > 0:
module = str(module)
if module not in self.__compiled:
self.__compiled[module] = list()
self.__compiled[module].append(
self.__templates['limiter'].substitute(name=technology.name, modifiers="\n".join(modifiers).lstrip())
)
def compile(self) -> list:
compiled = list()
compiled.append('intelligence_gather_technology_data = {\n')
for area in sorted(self.__areas):
compiled.append(self.__templates['dropper'].substitute(tag='area_' + area))
for category in sorted(self.__categories):
compiled.append(self.__templates['dropper'].substitute(tag='category_' + category))
for type in ('rare', 'dangerous'):
compiled.append(self.__templates['dropper'].substitute(tag='type_' + type))
for module in self.__compiled:
compiled.append(self.__templates['prefix'].substitute(module=module, count=len(self.__compiled.get(module))))
compiled.extend(self.__compiled.get(module))
compiled.append('}')
return compiled
@staticmethod
def supports(value):
return isinstance(value, Technology)
def name(self) -> str:
return 'gathering_technology_data_template.txt'
| 35.373626 | 121 | 0.622864 | 3,087 | 0.958993 | 0 | 0 | 83 | 0.025784 | 0 | 0 | 789 | 0.245107 |
a68c2fae55a4172bd6d6344ce187492035975eee | 1,263 | py | Python | app/analyzers/indicators/sar.py | r15ch13/crypto-signal | d423681223124278a3942cf2e930aafe5b84a855 | [
"MIT"
] | 4 | 2021-03-03T16:39:59.000Z | 2021-08-28T21:05:34.000Z | app/analyzers/indicators/sar.py | r15ch13/crypto-signal | d423681223124278a3942cf2e930aafe5b84a855 | [
"MIT"
] | 1 | 2021-05-10T16:11:48.000Z | 2021-05-10T16:11:48.000Z | app/analyzers/indicators/sar.py | r15ch13/crypto-signal | d423681223124278a3942cf2e930aafe5b84a855 | [
"MIT"
] | 6 | 2019-03-07T10:58:45.000Z | 2021-05-08T22:18:01.000Z | """ MACD Indicator
"""
import math
import pandas
from talib import abstract
from analyzers.utils import IndicatorUtils
class SAR(IndicatorUtils):
def analyze(self, historical_data, signal=['sar'], hot_thresh=None, cold_thresh=None):
"""Performs a macd analysis on the historical data
Args:
historical_data (list): A matrix of historical OHCLV data.
signal (list, optional): Defaults to macd. The indicator line to check hot/cold
against.
hot_thresh (float, optional): Defaults to None. The threshold at which this might be
good to purchase.
cold_thresh (float, optional): Defaults to None. The threshold at which this might be
good to sell.
Returns:
pandas.DataFrame: A dataframe containing the indicators and hot/cold values.
"""
dataframe = self.convert_to_dataframe(historical_data)
sar_values = abstract.SAR(dataframe).iloc[:]
sar_values.dropna(how='all', inplace=True)
if sar_values[signal[0]].shape[0]:
sar_values['is_hot'] = sar_values[signal[0]] > hot_thresh
sar_values['is_cold'] = sar_values[signal[0]] < cold_thresh
return sar_values
| 33.236842 | 97 | 0.648456 | 1,138 | 0.901029 | 0 | 0 | 0 | 0 | 0 | 0 | 680 | 0.538401 |
a68cf49a12bc62a074851902de8ab970dee1d0f4 | 376 | py | Python | tests/protocol/primary/a.py | gufolabs/gufo_loader | ffb4e17b2e8f36d938a145d50b7bd27d976f9fce | [
"BSD-3-Clause"
] | 4 | 2022-03-04T07:49:18.000Z | 2022-03-08T07:57:05.000Z | tests/protocol/primary/a.py | gufolabs/gufo_loader | ffb4e17b2e8f36d938a145d50b7bd27d976f9fce | [
"BSD-3-Clause"
] | null | null | null | tests/protocol/primary/a.py | gufolabs/gufo_loader | ffb4e17b2e8f36d938a145d50b7bd27d976f9fce | [
"BSD-3-Clause"
] | 1 | 2022-03-08T07:57:07.000Z | 2022-03-08T07:57:07.000Z | # ---------------------------------------------------------------------
# Gufo Labs Loader:
# a plugin
# ---------------------------------------------------------------------
# Copyright (C) 2022, Gufo Labs
# ---------------------------------------------------------------------
class APlugin(object):
name = "a"
def get_name(self) -> str:
return self.name
| 26.857143 | 71 | 0.260638 | 94 | 0.25 | 0 | 0 | 0 | 0 | 0 | 0 | 276 | 0.734043 |
a68d575600de2c24556e7348001ad2a913abac90 | 1,178 | py | Python | tools/csv2kml.py | platypii/BASElineFlightComputer | 7881febb680b32c88c7331563e6b4ed15db9a433 | [
"MIT"
] | 22 | 2016-06-28T15:18:25.000Z | 2022-03-20T01:44:13.000Z | tools/csv2kml.py | Probot9/BASElineFlightComputer | cd449d32887e82d8a187586894cbd1a44c5b668a | [
"MIT"
] | 5 | 2018-04-30T00:54:03.000Z | 2020-01-30T18:11:45.000Z | tools/csv2kml.py | Probot9/BASElineFlightComputer | cd449d32887e82d8a187586894cbd1a44c5b668a | [
"MIT"
] | 5 | 2015-05-27T03:26:49.000Z | 2021-12-11T05:49:33.000Z | #!/usr/bin/python
import sys
if len(sys.argv) != 2:
print 'Usage: csv2kml input.csv'
exit()
kml_header = """<?xml version=\"1.0\" encoding=\"UTF-8\"?>
<kml xmlns=\"http://www.opengis.net/kml/2.2\" xmlns:gx=\"http://www.google.com/kml/ext/2.2\" xmlns:kml=\"http://www.opengis.net/kml/2.2\" xmlns:atom=\"http://www.w3.org/2005/Atom\">
<Document>
<Style id=\"flight\">
<LineStyle>
<color>ffff5500</color>
<width>5</width>
</LineStyle>
</Style>
<Folder>
<name>Jump</name>
<open>1</open>
<Placemark>
<name>Track</name>
<styleUrl>#flight</styleUrl>
<LineString>
<tessellate>1</tessellate>
<altitudeMode>absolute</altitudeMode>
<coordinates>"""
kml_footer = """
</coordinates>
</LineString>
</Placemark>
</Folder>
</Document>
</kml>
"""
# Write KML file
with open(sys.argv[1]) as f:
# write kml header
print kml_header
# write point data
for line in f:
cols = line.split(',')
if cols[1] == 'gps':
lat = cols[5]
lon = cols[6]
alt = cols[7]
print lon + ',' + lat + ',' + alt
# write kml footer
print kml_footer
| 21.418182 | 181 | 0.565365 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 881 | 0.747878 |
a6932bb49ce1656029b076c4c5e5b86a4e1586b2 | 1,741 | py | Python | scripts/artifacts/installedappsGass.py | Krypterry/ALEAPP | 970b2e62742603336fe791b7c02e12e1f1e77375 | [
"MIT"
] | 187 | 2020-02-22T23:35:32.000Z | 2022-03-31T13:46:24.000Z | scripts/artifacts/installedappsGass.py | Krypterry/ALEAPP | 970b2e62742603336fe791b7c02e12e1f1e77375 | [
"MIT"
] | 65 | 2020-02-25T18:22:47.000Z | 2022-03-27T21:41:21.000Z | scripts/artifacts/installedappsGass.py | Krypterry/ALEAPP | 970b2e62742603336fe791b7c02e12e1f1e77375 | [
"MIT"
] | 47 | 2020-02-24T22:33:35.000Z | 2022-03-11T05:19:42.000Z | import sqlite3
from scripts.artifact_report import ArtifactHtmlReport
from scripts.ilapfuncs import logfunc, tsv, open_sqlite_db_readonly
def get_installedappsGass(files_found, report_folder, seeker, wrap_text):
for file_found in files_found:
file_found = str(file_found)
if file_found.endswith('.db'):
db = open_sqlite_db_readonly(file_found)
cursor = db.cursor()
cursor.execute('''
SELECT
distinct(package_name)
FROM
app_info
''')
if 'user' in file_found:
usernum = file_found.split("/")
usernum = '_'+str(usernum[-4])
else:
usernum = ''
all_rows = cursor.fetchall()
usageentries = len(all_rows)
if usageentries > 0:
report = ArtifactHtmlReport('Installed Apps')
report.start_artifact_report(report_folder, f'Installed Apps (GMS){usernum}')
report.add_script()
data_headers = ('Bundle ID',) # Don't remove the comma, that is required to make this a tuple as there is only 1 element
data_list = []
for row in all_rows:
data_list.append((row[0],))
report.write_artifact_data_table(data_headers, data_list, file_found)
report.end_artifact_report()
tsvname = f'installed apps - GMS{usernum}'
tsv(report_folder, data_headers, data_list, tsvname)
else:
logfunc('No Installed Apps data available{usernum}')
db.close()
| 37.847826 | 136 | 0.546238 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 370 | 0.212522 |
a6932f9707d279d372f18d0e6ea0ae4bd2d7bb9b | 3,494 | py | Python | 6_google_trace/VMFuzzyPrediction/estimators/GAEstimator.py | nguyenthieu95/machine_learning | 40595a003815445a7a9fef7e8925f71d19f8fa30 | [
"MIT"
] | 1 | 2017-12-30T20:10:07.000Z | 2017-12-30T20:10:07.000Z | 6_google_trace/VMFuzzyPrediction/estimators/GAEstimator.py | ThieuNv/machine_learning | 40595a003815445a7a9fef7e8925f71d19f8fa30 | [
"MIT"
] | null | null | null | 6_google_trace/VMFuzzyPrediction/estimators/GAEstimator.py | ThieuNv/machine_learning | 40595a003815445a7a9fef7e8925f71d19f8fa30 | [
"MIT"
] | 1 | 2019-12-23T15:30:16.000Z | 2019-12-23T15:30:16.000Z | # from lasagne.nonlinearities import sigmoid
from pyevolve import Crossovers
from pyevolve import G1DList
from pyevolve import GSimpleGA
from pyevolve import Initializators, Mutators
from pyevolve import Selectors
from sklearn.base import BaseEstimator
from sklearn.metrics import mean_squared_error
from FeedFlow import FeedFlow
import numpy as np
class GAEstimator(BaseEstimator):
def __init__(self,gen_size=400,pop_size = 225,cross_rate=0.9,mutation_rate = 0.01,freq_stats=10):
# self.n_input = n_input
# self.fan_in = n_input
# self.fan_out = 15
# self.theta_shape = (self.n_input,1)
self.gen_size = self.number_of_solutions = gen_size
self.pop_size = pop_size
self.cross_rate = cross_rate
self.mutation_rate = mutation_rate
self.freq_stats = freq_stats
def get_params(self, deep=True):
return {
"gen_size": self.gen_size,
"pop_size": self.pop_size,
"cross_rate": self.cross_rate,
"mutation_rate": self.mutation_rate,
"freq_stats": self.freq_stats
}
def set_params(self, **params):
for param,value in params.items():
self.__setattr__(param,value)
return self
# def activation(self,x):
# return sigmoid(x)
def eval_score(self,chronosome):
# theta = np.zeros(self.theta_shape)
# for i in np.arange(self.theta_shape[0]):
# theta[i] = chronosome[i]
# return self.costFunction(self.X_data,self.y_data,theta)
self.score_fn.set_weights(np.array(chronosome.genomeList))
return self.score_fn.score(self.X,self.y)
def fit(self,X,y,**param):
self.neural_shape = param.get("neural_shape")
self.n_input = self.neural_shape[0]
self.n_output = self.neural_shape[-1]
self.n_hidden = self.neural_shape[1]
self.number_of_weights = self.n_hidden*(self.n_input+1)+self.n_output*(self.n_hidden+1)
self.score_fn = FeedFlow(self.neural_shape)
self.X = X
self.y = y
#setting params
self.weights = G1DList.G1DList(self.number_of_weights)
lim = np.sqrt(6)/np.sqrt((self.n_input+self.n_output))
#Khoi tao trong so
self.weights.setParams(rangemin=-lim,rangemax=lim)
# cai dat ham khoi tao
self.weights.initializator.set(Initializators.G1DListInitializatorReal)
#cai dat ham dot bien
self.weights.mutator.set(Mutators.G1DListMutatorRealGaussian)
#cai dat ham do thich nghi
self.weights.evaluator.set(self.eval_score)
# cai dat ham lai ghep
self.weights.crossover.set(Crossovers.G1DListCrossoverUniform)
#code genetic
# thiet lap he so lai ghep
self.ga = GSimpleGA.GSimpleGA(self.weights)
self.ga.selector.set(Selectors.GRouletteWheel)
self.ga.setMutationRate(self.mutation_rate)
self.ga.setCrossoverRate(self.cross_rate)
self.ga.setPopulationSize(self.pop_size)
self.ga.setGenerations(self.pop_size)
self.ga.terminationCriteria.set(GSimpleGA.ConvergenceCriteria)
self.ga.evolve(freq_stats=self.freq_stats)
self.best_archive = self.getParam()
return self
def getParam(self):
return np.array(self.ga.bestIndividual().genomeList)
def predict(self,X):
return self.score_fn.predict(X)
def score(self,X,y):
return np.sqrt(mean_squared_error(y, self.predict(X))) | 42.609756 | 101 | 0.670864 | 3,145 | 0.900114 | 0 | 0 | 0 | 0 | 0 | 0 | 600 | 0.171723 |
a69449e89ea900074212b71558e97b166df355bd | 1,388 | py | Python | skassist-docs/python/doc_definitions.py | radmerti/scikit-assist | 6ecd58608632c2ef8b52f6f71fd3695db522e22e | [
"BSD-2-Clause"
] | null | null | null | skassist-docs/python/doc_definitions.py | radmerti/scikit-assist | 6ecd58608632c2ef8b52f6f71fd3695db522e22e | [
"BSD-2-Clause"
] | null | null | null | skassist-docs/python/doc_definitions.py | radmerti/scikit-assist | 6ecd58608632c2ef8b52f6f71fd3695db522e22e | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# ______________________________________________________________________________
def boolean_func(experiment):
"""Function that returns True when an experiment matches and False otherwise.
Args:
experiment (:class:`~skassist.Experiment`): Experiment that is to be tested.
"""
# ______________________________________________________________________________
def scoring_function(self, model, y_true, y_predicted_probability):
"""The scoring function takes a model, the true labels and the prediction
and calculates one or more scores. These are returned in a dictionary which
:func:`~skassist.Model.calc_results` uses to commit them to permanent storage.
Args:
scoring_function (:func:`function`):
A python function for calculating the results given the true labels
and the predictions. See :func:`~skassist.Model.scoring_function`.
skf (:obj:`numpy.ndarray`):
An array containing arrays of splits. E.g. an array with 10 arrays,
each containing 3 splits for a 10-fold cross-validation with
training, test and validation set.
df (:obj:`pandas.DataFrame`):
The DataFrame on which to evaluate the model. Must contain all
feature, "extra" feature and target columns that the model
requires.
""" | 42.060606 | 84 | 0.701009 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,276 | 0.919308 |
a69483b15fce7657ead4227c986e5b9e8717d867 | 3,327 | py | Python | CyTrack/sendData.py | M2I-HABET/ahac2020workshop | 7ebc6e2ba3bf10ef18881e96523adc5d54f8d62e | [
"MIT"
] | null | null | null | CyTrack/sendData.py | M2I-HABET/ahac2020workshop | 7ebc6e2ba3bf10ef18881e96523adc5d54f8d62e | [
"MIT"
] | null | null | null | CyTrack/sendData.py | M2I-HABET/ahac2020workshop | 7ebc6e2ba3bf10ef18881e96523adc5d54f8d62e | [
"MIT"
] | null | null | null | import rot2proG
import serial
import math
import time
import requests
from queue import Queue
from threading import Thread
def _parse_degrees(nmea_data):
# Parse a NMEA lat/long data pair 'dddmm.mmmm' into a pure degrees value.
# Where ddd is the degrees, mm.mmmm is the minutes.
if nmea_data is None or len(nmea_data) < 3:
return None
raw = float(nmea_data)
deg = raw // 100
minutes = raw % 100
return deg + minutes/60
def MainLoop():
flightID = "fa0b701b-e40a-4711-94c0-09fedd0b1cac"
scriptID = "429c4f46-140b-4db4-8cf9-6acc88f5b018"
postURL = "http://10.29.189.44/REST/V1/flight_location"
postURLRaw = "http://10.29.189.44/REST/V1/flight_data_raw"
run = True
lora = serial.Serial(port="COM27", baudrate=9600, bytesize=8, parity='N', stopbits=1, timeout=2)
lora.flushInput()
lora.flushOutput()
while run:
line = ""
invalid = True
data = ""
latB = ''
lonB = ''
alt = ''
rssi = ''
while invalid: # Or: while ser.inWaiting():
if lora.in_waiting:
print("in wating: "+str(lora.in_waiting))
try:
line = lora.readline().decode("utf-8")
lineToSave = line
if("rssi" in lineToSave):
rssi = lineToSave.strip("rssi:").strip("\r\n")
print(rssi)
try:
params = {'scriptID': scriptID, 'flightID': flightID, 'gps':lineToSave}
r = requests.post(url = postURLRaw, data = params, timeout=5)
print(r.text)
except Exception as e:
print(e)
line =lineToSave.strip('\n').strip('\r')
invalid = False
except:
invalid = True
print("bad Unicode")
continue
#print(line)
vals = line.split(',')
time.sleep(.1)
#print(line)
if "GPGGA" not in line:
continue
try:
data = [vals[0],_parse_degrees(vals[3]),vals[4],_parse_degrees(vals[5]),vals[6],vals[10]]
except:
continue
if data[2] == "S":
data[1] = -1* data[1]
if data[4] == "W":
data[3] = -1*data[3]
print(data)
try:
latB = float(data[1])#43.02700680709
lonB = float(data[3])#-94.6533878648
alt = float(data[5])
if(latB == 0):
invalid = True
params = {'scriptID': scriptID, 'flightID': flightID, 'time': int(time.time()), 'lat': latB, 'lon': lonB, 'alt':alt, 'rssi': rssi}
try:
r = requests.post(url = postURL, data = params, timeout=5)
print(r.text)
except Exception as e:
print(e)
print("\n\n\n\n\n NOT SENT \n\n\n\n")
invalid = False
except Exception as e:
print(e)
print("bad String")
if __name__ == "__main__":
MainLoop() | 33.606061 | 146 | 0.471596 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 594 | 0.178539 |
a69566d00c0403a5c7929fe88c805f00d3a9a9e2 | 587 | py | Python | examples/rf_tmd_example.py | eltonfss/TMDLibrary | 2eaca21d61ae4d012435c2dc8b65be0b62a2afd3 | [
"MIT"
] | null | null | null | examples/rf_tmd_example.py | eltonfss/TMDLibrary | 2eaca21d61ae4d012435c2dc8b65be0b62a2afd3 | [
"MIT"
] | null | null | null | examples/rf_tmd_example.py | eltonfss/TMDLibrary | 2eaca21d61ae4d012435c2dc8b65be0b62a2afd3 | [
"MIT"
] | null | null | null | from examples.util import get_tmd_dataset
from detectors.rf_tmd import RandomForestTMD
from sklearn.metrics import accuracy_score
if __name__ == '__main__':
# load dataset
df = get_tmd_dataset()
travel_mode_column = 'target'
# train model
rf_tmd = RandomForestTMD()
rf_tmd.fit(
data_frame=df,
travel_mode_column=travel_mode_column,
)
# load model and evaluate accuracy
labeled_modes = df.pop(travel_mode_column)
detected_modes = rf_tmd.predict(df)
print('Full Data Accuracy', accuracy_score(labeled_modes, detected_modes))
| 26.681818 | 78 | 0.729131 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 99 | 0.168654 |
a6957adb4b1b1df4357ebf29509b3e8d10fca9bb | 357 | py | Python | lib/core/errors.py | h3ar7dump/Zeus-Scanner | e5ac6316d351851883c7a895999b92846380392c | [
"RSA-MD"
] | 1 | 2019-11-29T10:11:01.000Z | 2019-11-29T10:11:01.000Z | lib/core/errors.py | h3ar7dump/Zeus-Scanner | e5ac6316d351851883c7a895999b92846380392c | [
"RSA-MD"
] | null | null | null | lib/core/errors.py | h3ar7dump/Zeus-Scanner | e5ac6316d351851883c7a895999b92846380392c | [
"RSA-MD"
] | null | null | null | class InvalidProxyType(Exception): pass
class ApiConnectionError(Exception): pass
class ApplicationNotFound(Exception): pass
class SqlmapFailedStart(Exception): pass
class SpiderTestFailure(Exception): pass
class InvalidInputProvided(Exception): pass
class InvalidTamperProvided(Exception): pass
class PortScanTimeOutException(Exception): pass | 16.227273 | 47 | 0.829132 | 336 | 0.941176 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
a695aa6f1ab93783e6c03d418b1565d4a3c0d64d | 2,696 | py | Python | cli.py | hiway/adventofcode | c4b3e94ad2bc967bc684a10ddc90541d6e1657fc | [
"MIT"
] | null | null | null | cli.py | hiway/adventofcode | c4b3e94ad2bc967bc684a10ddc90541d6e1657fc | [
"MIT"
] | null | null | null | cli.py | hiway/adventofcode | c4b3e94ad2bc967bc684a10ddc90541d6e1657fc | [
"MIT"
] | null | null | null | import click
import collections
@click.group()
def cli():
pass
@cli.group()
def day1():
pass
@day1.command()
@click.argument('input_file', type=click.File())
def part1(input_file):
from day1 import part1_stanta_floor_positioning_system
for directions in input_file:
floor = part1_stanta_floor_positioning_system(directions)
print('Final Floor: {0}'.format(floor))
@day1.command()
@click.argument('input_file', type=click.File())
@click.option('--halt', type=int)
def part2(input_file, halt):
from day1 import part2_santa_fps_halt
for directions in input_file:
position = part2_santa_fps_halt(directions, halt)
if position is not None:
print('Position: {0}'.format(position))
else:
print('Never reached floor: {0}'.format(halt))
@cli.group()
def day2():
pass
@day2.command()
@click.argument('input_file', type=click.File())
def part1(input_file):
from day2 import part1_wrapping_paper_estimate
total = 0
for dimensions in input_file:
total += part1_wrapping_paper_estimate(dimensions)
print('Total wrapping paper required: {0} sq ft'.format(total))
@day2.command()
@click.argument('input_file', type=click.File())
def part2(input_file):
from day2 import part2_ribbon_estimate
total = 0
for dimensions in input_file:
total += part2_ribbon_estimate(dimensions)
print('Total ribbon required: {0} ft'.format(total))
@cli.group()
def day3():
pass
@day3.command()
@click.argument('input_file', type=click.File())
def part1(input_file):
from day3 import part1_santa_gps
for directions in input_file:
print(part1_santa_gps(directions.strip()))
@day3.command()
@click.argument('input_file', type=click.File())
def part2(input_file):
from day3 import part2_santa_and_robo_gps
for directions in input_file:
print(part2_santa_and_robo_gps(directions.strip()))
@cli.group()
def day4():
pass
@day4.command()
@click.argument('secret_key')
def part1(secret_key):
from day4 import part1_adventcoin_miner
print(part1_adventcoin_miner(secret_key.strip()))
@day4.command()
@click.argument('secret_key')
@click.argument('match', default='000000')
def part2(secret_key, match):
from day4 import part2_adventcoin_miner
print(part2_adventcoin_miner(secret_key.strip(), match))
@cli.group()
def day5():
pass
@day5.command()
@click.argument('input_file', type=click.File())
def part1(input_file):
from day5 import part1_is_nice_string
results = collections.Counter()
results.update([part1_is_nice_string(directions.strip()) for directions in input_file])
print(results[True])
| 23.241379 | 91 | 0.709941 | 0 | 0 | 0 | 0 | 2,618 | 0.971068 | 0 | 0 | 263 | 0.097552 |
a696ee2566ea61b7ea08a6739c150d86fe1efe72 | 1,526 | py | Python | bostaSDK/pickup/create/CreatePickupResponse.py | bostaapp/bosta-python | df3f48dafac49b2577669fd4d74a5e5e9d28f2c1 | [
"MIT"
] | null | null | null | bostaSDK/pickup/create/CreatePickupResponse.py | bostaapp/bosta-python | df3f48dafac49b2577669fd4d74a5e5e9d28f2c1 | [
"MIT"
] | 1 | 2020-11-18T11:01:32.000Z | 2020-11-18T11:10:52.000Z | bostaSDK/pickup/create/CreatePickupResponse.py | bostaapp/bosta-python | df3f48dafac49b2577669fd4d74a5e5e9d28f2c1 | [
"MIT"
] | null | null | null |
class CreatePickupResponse:
def __init__(self, res):
"""
Initialize new instance from CreatePickupResponse class
Parameters:
res (dict, str): JSON response object or response text message
Returns:
instance from CreatePickupResponse
"""
self.fromResponseObj(res)
def fromResponseObj(self, res):
"""
Extract _id, puid, business, businessLocationId,
scheduledDate, scheduledTimeSlot, contactPerson,
createdAt and updatedAt fields from json response object
Parameters:
res (dict, str): JSON response object or response text message
"""
if type(res) is dict and res.get('data') is not None:
self.message = res.get("message")
newPickup = res["data"]
self._id = newPickup["_id"]
self.puid = newPickup["puid"]
self.business = newPickup["business"]
self.businessLocationId = newPickup["businessLocationId"]
self.scheduledDate = newPickup["scheduledDate"]
self.scheduledTimeSlot = newPickup["scheduledTimeSlot"]
self.contactPerson = newPickup["contactPerson"]
self.createdAt = newPickup["createdAt"]
self.updatedAt = newPickup["updatedAt"]
else:
self.message = str(res)
def __str__(self):
return self.message
def get_pickupId(self):
return self._id
def get_message(self):
return self.message
| 29.921569 | 70 | 0.609436 | 1,518 | 0.994758 | 0 | 0 | 0 | 0 | 0 | 0 | 652 | 0.427261 |
a696f0c13d70451610aab5bd60cb7ae4a08f0d3c | 671 | py | Python | tflowModel/pb2uff.py | moennen/sceneIllEst | c02358e43016c3b44059554c4e202e922656be89 | [
"Apache-2.0"
] | 3 | 2019-03-04T09:52:29.000Z | 2019-06-17T07:02:49.000Z | tflowModel/pb2uff.py | moennen/sceneIllEst | c02358e43016c3b44059554c4e202e922656be89 | [
"Apache-2.0"
] | null | null | null | tflowModel/pb2uff.py | moennen/sceneIllEst | c02358e43016c3b44059554c4e202e922656be89 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
""" Tensorflow Frozen PB model to UFF
"""
import argparse
import tensorflow as tf
import uff
if __name__ == "__main__":
#------------------------------------------------------------------------------------------------
parser = argparse.ArgumentParser()
parser.add_argument("inModelPath")
parser.add_argument("outModelPath")
parser.add_argument("inputNodes")
parser.add_argument("outputNodes")
args = parser.parse_args()
uff.from_tensorflow_frozen_model(
args.inModelPath, args.outputNodes.split(), text=False, list_nodes=False,
output_filename=args.outModelPath, input_nodes=args.inputNodes.split())
| 29.173913 | 101 | 0.61997 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 217 | 0.323398 |
a69723490e88c379953ed361d981759344135792 | 4,219 | py | Python | jigu/core/proposal/__init__.py | bongtrop/jigu | 448bce8ce693f3f7c530bea0f2f268e22100937a | [
"MIT"
] | 14 | 2020-03-03T06:46:39.000Z | 2021-05-01T15:29:35.000Z | jigu/core/proposal/__init__.py | bongtrop/jigu | 448bce8ce693f3f7c530bea0f2f268e22100937a | [
"MIT"
] | 9 | 2020-03-09T06:36:30.000Z | 2021-02-15T14:40:48.000Z | jigu/core/proposal/__init__.py | bongtrop/jigu | 448bce8ce693f3f7c530bea0f2f268e22100937a | [
"MIT"
] | 5 | 2020-05-30T22:38:34.000Z | 2021-02-11T00:56:20.000Z | from __future__ import annotations
import abc
from dataclasses import dataclass
from typing import Any, Dict, Type
from jigu.core import Coin, Coins, Timestamp
from jigu.core.denoms import uLuna
from jigu.util.serdes import JiguBox, JsonDeserializable, JsonSerializable
from jigu.util.validation import Schemas as S
__all__ = [
"ProposalStatus",
"Content",
"PROPOSAL_TYPES",
"Proposal",
"TextProposal",
"ParameterChangeProposal",
"CommunityPoolSpendProposal",
"TaxRateUpdateProposal",
"RewardWeightUpdateProposal",
]
class ProposalStatus(str):
NIL = ""
DEPOSIT_PERIOD = "DepositPeriod"
VOTING_PERIOD = "VotingPeriod"
PASSED = "Passed"
REJECTED = "Rejected"
FAILED = "Failed"
class Content(JsonSerializable, JsonDeserializable, metaclass=abc.ABCMeta):
__schema__ = S.OBJECT(
type=S.STRING, value=S.OBJECT(title=S.STRING, description=S.STRING)
)
@property
@abc.abstractmethod
def type(self):
raise NotImplementedError
def proposal_value(self):
return dict(self.__dict__)
def to_data(self) -> Dict[str, Any]:
return {"type": self.type, "value": self.proposal_value()}
from .distribution import CommunityPoolSpendProposal # isort:skip
from .gov import TextProposal # isort:skip
from .params import ParameterChangeProposal # isort:skip
from .treasury import RewardWeightUpdateProposal, TaxRateUpdateProposal # isort:skip
PROPOSAL_TYPES = {
"gov/TextProposal": TextProposal,
"params/ParameterChangeProposal": ParameterChangeProposal,
"distribution/CommunityPoolSpendProposal": CommunityPoolSpendProposal,
"treasury/TaxRateUpdateProposal": TaxRateUpdateProposal,
"treasury/RewardWeightUpdateProposal": RewardWeightUpdateProposal,
}
@dataclass
class Proposal(JsonSerializable, JsonDeserializable):
__schema__ = S.OBJECT(
content=Content.__schema__,
id=S.STRING_INTEGER,
proposal_status=S.STRING,
final_tally_result=S.OPTIONAL(
S.OBJECT( # this gets marshalled into Coin
yes=S.STRING_INTEGER,
abstain=S.STRING_INTEGER,
no=S.STRING_INTEGER,
no_with_veto=S.STRING_INTEGER,
)
),
submit_time=Timestamp.__schema__,
deposit_end_time=Timestamp.__schema__,
total_deposit=Coins.__schema__,
voting_start_time=Timestamp.__schema__,
voting_end_time=Timestamp.__schema__,
)
content: Type[Content]
id: int
proposal_status: str
final_tally_result: JiguBox[str, Coin]
submit_time: Timestamp
deposit_end_time: Timestamp
total_deposit: Coins
voting_start_time: Timestamp
voting_end_time: Timestamp
def to_data(self) -> dict:
d = JiguBox(self.__dict__)
d.id = str(d.id)
for x in d.final_tally_result:
d.final_tally_result[x] = d.final_tally_result[x].amount
return d
@property
def pretty_data(self):
d = dict(self.__dict__)
proposal_id = d.pop("id")
content = d.pop("content")
ix = [
("id", proposal_id),
("type", content.type),
*list(content.pretty_data),
*list(d.items()),
]
return ix
@classmethod
def from_data(cls, data: dict) -> Proposal:
final_tally_result = data["final_tally_result"]
for key in final_tally_result:
final_tally_result[key] = Coin(uLuna, int(final_tally_result[key]))
p_type = PROPOSAL_TYPES[data["content"]["type"]]
content = p_type.from_data(data["content"])
return cls(
content=content,
id=int(data["id"]),
proposal_status=ProposalStatus(data["proposal_status"]),
final_tally_result=JiguBox(final_tally_result),
submit_time=Timestamp.from_data(data["submit_time"]),
deposit_end_time=Timestamp.from_data(data["deposit_end_time"]),
total_deposit=Coins.from_data(data["total_deposit"]),
voting_start_time=Timestamp.from_data(data["voting_start_time"]),
voting_end_time=Timestamp.from_data(data["voting_end_time"]),
)
| 30.79562 | 85 | 0.670775 | 3,055 | 0.724105 | 0 | 0 | 2,519 | 0.597061 | 0 | 0 | 649 | 0.153828 |
a6975cc67cf3d1ce0bf1918a4452973ed9a1cb9b | 952 | py | Python | interview/amazon/utilization-checks.py | Zhenye-Na/leetcode | 95196a45f5709ccf7b970ee5ac84a4bf8fe2301e | [
"MIT"
] | 10 | 2019-09-15T00:23:57.000Z | 2022-01-05T12:53:42.000Z | interview/amazon/utilization-checks.py | Zhenye-Na/leetcode | 95196a45f5709ccf7b970ee5ac84a4bf8fe2301e | [
"MIT"
] | 3 | 2021-06-30T00:39:26.000Z | 2021-08-01T07:13:59.000Z | interview/amazon/utilization-checks.py | Zhenye-Na/leetcode | 95196a45f5709ccf7b970ee5ac84a4bf8fe2301e | [
"MIT"
] | 6 | 2020-02-08T02:55:22.000Z | 2022-01-02T22:48:18.000Z | # Utilization Checks
# https://aonecode.com/amazon-online-assessment-utilization-checks
import math
class UtilizationChecks:
def solve(self, instances, averageUtil):
i = 0
while i < len(averageUtil):
if 25 <= averageUtil[i] <= 60:
i += 1
continue
# we need increase or decrease #. of instances
if averageUtil[i] > 60:
if 2 * instances > 2 * pow(10, 8):
continue
instances *= 2
else:
if instances == 1:
i += 1
continue
instances = math.ceil(instances / 2)
i += 10
print(instances)
return instances
uc = UtilizationChecks()
uc.solve(2, [25, 23, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 76, 80])
uc.solve(3, [5, 10, 80])
uc.solve(5, [30, 5, 4, 8, 19, 89])
| 23.219512 | 66 | 0.456933 | 700 | 0.735294 | 0 | 0 | 0 | 0 | 0 | 0 | 132 | 0.138655 |
a6983bd2dae45fdbaac009e69a080431f0e315a5 | 522 | py | Python | code/bot/mapping-bringup/launch/slam_launch.py | jacobwaller/beer-bot | 60d89bee2029d87a870940081bd6ab93d05c4eca | [
"MIT"
] | null | null | null | code/bot/mapping-bringup/launch/slam_launch.py | jacobwaller/beer-bot | 60d89bee2029d87a870940081bd6ab93d05c4eca | [
"MIT"
] | null | null | null | code/bot/mapping-bringup/launch/slam_launch.py | jacobwaller/beer-bot | 60d89bee2029d87a870940081bd6ab93d05c4eca | [
"MIT"
] | null | null | null | from launch import LaunchDescription
import launch_ros.actions
from ament_index_python.packages import get_package_share_directory
def generate_launch_description():
return LaunchDescription([
launch_ros.actions.Node(
parameters=[
get_package_share_directory("slam_toolbox") + '/config/mapper_params_lifelong.yaml'
],
package='slam_toolbox',
executable='lifelong_slam_toolbox_node',
name='slam_toolbox',
output='screen'
)
]) | 30.705882 | 95 | 0.683908 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 115 | 0.220307 |