blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
20bf26f6d4991ace9ec7a0a74530718eaf4fd7c0 | be0f3dfbaa2fa3d8bbe59229aef3212d032e7dd1 | /Gauss_v45r10p1/Gen/DecFiles/options/20072002.py | a5d0a92707cd731867593ec97455a57a39004f3e | [] | no_license | Sally27/backup_cmtuser_full | 34782102ed23c6335c48650a6eaa901137355d00 | 8924bebb935b96d438ce85b384cfc132d9af90f6 | refs/heads/master | 2020-05-21T09:27:04.370765 | 2018-12-12T14:41:07 | 2018-12-12T14:41:07 | 185,989,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,268 | py | # file /home/hep/ss4314/cmtuser/Gauss_v45r10p1/Gen/DecFiles/options/20072002.py generated: Wed, 25 Jan 2017 15:25:32
#
# Event Type: 20072002
#
# ASCII decay Descriptor: pp => [<Xc>]cc ...
#
from Gaudi.Configuration import *
importOptions( "$DECFILESROOT/options/DiMuon_for_tau23mu.py" )
from Configurables import Generation
Generation().EventType = 20072002
Generation().SampleGenerationTool = "Inclusive"
from Configurables import Inclusive
Generation().addTool( Inclusive )
Generation().Inclusive.ProductionTool = "PythiaProduction"
from Configurables import ToolSvc
from Configurables import EvtGenDecay
ToolSvc().addTool( EvtGenDecay )
ToolSvc().EvtGenDecay.UserDecayFile = "$DECFILESROOT/dkfiles/incl_c=DiMuon,tau23mu_cuts.dec"
Generation().Inclusive.CutTool = "LHCbAcceptance"
Generation().FullGenEventCutTool = "DiLeptonInAcceptance"
Generation().Inclusive.InclusivePIDList = [ 421, -421, 411, -411, 431, -431, 4122, -4122, 443, 4112, -4112, 4212, -4212, 4222, -4222, 4312, -4312, 4322, -4322, 4332, -4332, 4132, -4132, 4232, -4232, 100443, 441, 10441, 20443, 445, 4214, -4214, 4224, -4224, 4314, -4314, 4324, -4324, 4334, -4334, 4412, -4412, 4414,-4414, 4422, -4422, 4424, -4424, 4432, -4432, 4434, -4434, 4444, -4444, 14122, -14122, 14124, -14124, 100441 ]
| [
"slavomirastefkova@b2pcx39016.desy.de"
] | slavomirastefkova@b2pcx39016.desy.de |
80ee0b807b6645849b86857892b8f89176016c50 | 98b63e3dc79c75048163512c3d1b71d4b6987493 | /tensorflow/python/kernel_tests/linalg/sparse/csr_sparse_matrix_sparse_mat_mul_grad_test.py | 07d1e6a2a061f98ad65a92e1259fed8bc646e0ac | [
"Apache-2.0"
] | permissive | galeone/tensorflow | 11a4e4a3f42f4f61a65b432c429ace00401c9cc4 | 1b6f13331f4d8e7fccc66bfeb0b066e77a2b7206 | refs/heads/master | 2022-11-13T11:56:56.143276 | 2020-11-10T14:35:01 | 2020-11-10T14:35:01 | 310,642,488 | 21 | 12 | Apache-2.0 | 2020-11-06T16:01:03 | 2020-11-06T16:01:02 | null | UTF-8 | Python | false | false | 5,236 | py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""CSR sparse matrix tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import numpy as np
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.linalg.sparse import sparse_csr_matrix_grad # pylint: disable=unused-import
from tensorflow.python.ops.linalg.sparse import sparse_csr_matrix_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
def dense_to_csr_sparse_matrix(dense):
dense_t = ops.convert_to_tensor(dense)
locs = array_ops.stop_gradient(array_ops.where(math_ops.abs(dense_t) > 0))
return sparse_csr_matrix_ops.dense_to_csr_sparse_matrix(dense_t, locs)
def _add_test(test, op_name, testcase_name, fn): # pylint: disable=redefined-outer-name
if fn is None:
return
test_name = "_".join(["test", op_name, testcase_name])
if hasattr(test, test_name):
raise RuntimeError("Test %s defined more than once" % test_name)
setattr(test, test_name, fn)
class CSRSparseMatrixGradTest(test.TestCase):
@classmethod
def setUpClass(cls):
super(CSRSparseMatrixGradTest, cls).setUpClass()
cls._gpu_available = test_util.is_gpu_available()
# TODO(penporn): Make these tests runnable on eager mode.
# (tf.gradients and gradient_checker only run in graph mode.)
@test_util.run_deprecated_v1
def _testLargeBatchSparseMatrixSparseMatMulGrad(self, datatype, transpose_a,
transpose_b, adjoint_a,
adjoint_b):
if not self._gpu_available:
return
sparsify = lambda m: m * (m > 0)
a_mats_val = sparsify(
np.random.randn(3, 5, 11) +
1.j * np.random.randn(3, 5, 11)).astype(datatype)
if transpose_a or adjoint_a:
a_mats_val = np.transpose(a_mats_val, (0, 2, 1))
if adjoint_a:
a_mats_val = np.conj(a_mats_val)
b_mats_val = sparsify(
np.random.randn(3, 11, 13) +
1.j * np.random.randn(3, 11, 13)).astype(datatype)
if transpose_b or adjoint_b:
b_mats_val = np.transpose(b_mats_val, (0, 2, 1))
if adjoint_b:
b_mats_val = np.conj(b_mats_val)
with self.test_session(use_gpu=True):
a_mats = ops.convert_to_tensor(a_mats_val, dtype=datatype)
b_mats = ops.convert_to_tensor(b_mats_val, dtype=datatype)
a_sm = dense_to_csr_sparse_matrix(a_mats)
b_sm = dense_to_csr_sparse_matrix(b_mats)
c_sm = sparse_csr_matrix_ops.sparse_matrix_sparse_mat_mul(
a_sm,
b_sm,
transpose_a=transpose_a,
transpose_b=transpose_b,
adjoint_a=adjoint_a,
adjoint_b=adjoint_b,
type=datatype)
c_dense = sparse_csr_matrix_ops.csr_sparse_matrix_to_dense(
c_sm, type=datatype)
for ten, val, nn in [[a_mats, a_mats_val, "a"], [b_mats, b_mats_val,
"b"]]:
tf_logging.info("Testing gradients for %s" % nn)
theoretical, numerical = gradient_checker.compute_gradient(
ten,
ten.get_shape().as_list(),
c_dense,
c_dense.get_shape().as_list(),
x_init_value=val,
delta=1e-3)
self.assertAllClose(theoretical, numerical, atol=1e-3, rtol=1e-3)
# These tests are refactored from sparse_csr_matrix_grad_test to keep its size
# "medium".
for dtype in (np.float32, np.complex64):
for (t_a, t_b, adj_a, adj_b) in itertools.product(*(([False, True],) * 4)):
def create_sparse_mat_mul_test_fn(dtype_, t_a_, t_b_, adj_a_, adj_b_):
# Skip invalid cases.
if (t_a_ and adj_a_) or (t_b_ and adj_b_):
return
# Skip cases where we conjugate real matrices.
if dtype_ == np.float32 and (adj_a_ or adj_b_):
return
def test_fn(self):
self._testLargeBatchSparseMatrixSparseMatMulGrad(
dtype_, t_a_, t_b_, adj_a_, adj_b_)
return test_fn
name = (
"_testLargeBatchSparseMatrixSparseMatMulGrad_dtype_%s_t_a_%s_t_b_%s_"
"adj_a_%s_adj_b_%s" % (dtype.__name__, t_a, t_b, adj_a, adj_b))
_add_test(CSRSparseMatrixGradTest, "CSRSparseMatrixSparseGradTest", name,
create_sparse_mat_mul_test_fn(dtype, t_a, t_b, adj_a, adj_b))
if __name__ == "__main__":
test.main()
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
acec0e95d1ab761a24af1b9dc5a5772237ac0c6a | 28c517d75c71dcdefa3dfaa0dff6d5998f885e53 | /testing/regrid/testDiag.py | 6722eaa555a1b1ca4ba13982a9b03f7b2e8220ae | [] | no_license | AZed/uvcdat | 5863eba99f230c23336cc4e0772e0d406b24a919 | 58e745bbed25c7b80584fa25a29dd8239d917cd9 | refs/heads/master | 2021-01-18T10:36:45.792190 | 2013-01-25T22:29:10 | 2013-01-25T22:29:10 | 8,485,065 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,890 | py | """
$Id: testDiag.py 2389 2012-07-26 15:51:43Z dkindig $
Test diagnostics
"""
import re
import numpy
import cdms2
import regrid2
import unittest
import ESMP
from regrid2 import esmf
from matplotlib import pylab
from mpi4py import MPI
import types
from math import pi
import sys
class Test(unittest.TestCase):
def setUp(self):
self.pe = MPI.COMM_WORLD.Get_rank()
self.nprocs = MPI.COMM_WORLD.Get_size()
def Xtest1_libcf(self):
srcF = cdms2.open(sys.prefix + \
'/sample_data/so_Omon_ACCESS1-0_historical_r1i1p1_185001-185412_2timesteps.nc')
so = srcF('so')[0, 0, ...]
clt = cdms2.open(sys.prefix + '/sample_data/clt.nc')('clt')
diag = {'numValid': None, 'numDstPoints': None}
soInterp = so.regrid(clt.getGrid(),
regridTool = 'libcf',
regridMethod='linear',
diag = diag)
if self.pe == 0:
# diag = {'numDstPoints': 3312, 'numValid': 2933}
self.assertEqual(diag['numDstPoints'], 3312)
self.assertEqual(diag['numValid'], 2933)
if False:
pylab.subplot(1, 2, 1)
pylab.pcolor(so, vmin = 20, vmax = 40)
pylab.colorbar()
pylab.title('so')
pylab.subplot(1, 2, 2)
pylab.pcolor(soInterp, vmin = 20, vmax = 40)
pylab.colorbar()
pylab.title('soInterp')
def test2_varRegrid(self):
print
print 'test2_varRegrid'
srcF = cdms2.open(sys.prefix + \
'/sample_data/so_Omon_ACCESS1-0_historical_r1i1p1_185001-185412_2timesteps.nc')
so = srcF('so')[0, 0, ...]
clt = cdms2.open(sys.prefix + '/sample_data/clt.nc')('clt')
diag = {'srcAreas': None, 'dstAreas': None,
'srcAreaFractions': None, 'dstAreaFractions': None}
soInterp = so.regrid(clt.getGrid(),
regridTool = 'esmf',
regridMethod='conserve',
diag = diag)
if self.pe == 0:
totSrcArea = diag['srcAreas'].sum()
totDstArea = diag['dstAreas'].sum()
totSrcFrac = diag['srcAreaFractions'].sum()
self.assertEqual(numpy.isnan(totSrcFrac).sum(), 0)
self.assertLess(abs(totSrcArea - 4*pi)/(4*pi), 0.02)
self.assertLess(abs(totDstArea - 4*pi)/(4*pi), 0.01)
soMass = (so*diag['srcAreas']).sum()
inMass = (soInterp*diag['dstAreas']).sum()
print soMass, inMass
diff = abs(soMass - inMass)/soMass
self.assertLess(diff, 7.e-7)
if False:
pylab.subplot(1, 2, 1)
pylab.pcolor(so, vmin = 20, vmax = 40)
pylab.colorbar()
pylab.title('so')
pylab.subplot(1, 2, 2)
pylab.pcolor(soInterp, vmin = 20, vmax = 40)
pylab.colorbar()
pylab.title('soInterp')
def Xtest3_esmf(self):
print
print 'test3_esmf'
srcF = cdms2.open(sys.prefix + \
'/sample_data/so_Omon_ACCESS1-0_historical_r1i1p1_185001-185412_2timesteps.nc')
so = srcF('so')[0, 0, ...]
clt = cdms2.open(sys.prefix + '/sample_data/clt.nc')('clt')[0, ...]
diag = {'srcAreas': None, 'dstAreas': None,
'srcAreaFractions': None, 'dstAreaFractions': None}
srcCoords = [so.getGrid().getLatitude()[:], so.getGrid().getLongitude()[:]]
srcBounds = cdms2.mvCdmsRegrid.getBoundList(srcCoords)
tmp = clt.getGrid().toCurveGrid()
dstCoords = [tmp.getLatitude()[:], tmp.getLongitude()[:]]
dstBounds = cdms2.mvCdmsRegrid.getBoundList(dstCoords)
# Establish the grids
srcGrid = esmf.EsmfStructGrid(so.shape, periodicity = 1)
dstGrid = esmf.EsmfStructGrid(clt.shape, periodicity = 1)
srcGrid.setCoords(srcCoords)
dstGrid.setCoords(dstCoords)
srcGrid.setCoords(srcBounds, staggerloc = esmf.CORNER)
dstGrid.setCoords(dstBounds, staggerloc = esmf.CORNER)
# Establish the fields
srcFeld = esmf.EsmfStructField(srcGrid, 'srcFeld', so.dtype)
dstFeld = esmf.EsmfStructField(dstGrid, 'dstFeld',so.dtype)
srcFeldPtr = srcFeld.getPointer()
srcFeldPtr[:] = so.data.flat
dstFeldPtr = dstFeld.getPointer()
dstFeldPtr[:] = so.missing_value
# Fractions
srcFrac = esmf.EsmfStructField(srcGrid, 'srcFrac', so.dtype)
dstFrac = esmf.EsmfStructField(dstGrid, 'dstFrac', so.dtype)
srcFracPtr = srcFrac.getPointer()
srcFracPtr[:] = 1.0
dstFracPtr = dstFrac.getPointer()
dstFracPtr[:] = 1.0
nnan = numpy.isnan(srcFeldPtr).sum()
if nnan > 0: print "There are nan's in srcFracPtr"
# Areas
presrcArea = esmf.EsmfStructField(srcGrid, 'srcArea', so.dtype)
predstArea = esmf.EsmfStructField(dstGrid, 'dstArea', so.dtype)
presrcAreaPtr = presrcArea.getPointer()
presrcAreaPtr[:] = 1.0
predstAreaPtr = predstArea.getPointer()
predstAreaPtr[:] = 1.0
ro = esmf.EsmfRegrid(srcFeld, dstFeld,
srcFrac = srcFrac, dstFrac = dstFrac,
srcMaskValues = None, dstMaskValues = None,
regridMethod = esmf.CONSERVE,
unMappedAction = esmf.IGNORE)
ro()
srcAreas = ro.getSrcAreas(None)
dstAreas = ro.getDstAreas(None)
srcFracPtrPost = ro.getSrcAreaFractions(None)
nnanPost = numpy.isnan(srcFracPtrPost).sum()
nnan = numpy.isnan(srcFracPtr).sum()
if nnan > 0 or nnanPost > 0:
print "There are nan's in srcFracPtrPost", nnanPost
print "There are nan's in srcFracPtr", nnan
self.assertNotEqual(nnan, 0)
self.assertNotEqual(nnanPost, 0)
dstFracPtrPost = ro.getDstAreaFractions(None)
nnanPost = numpy.isnan(dstFracPtrPost).sum()
nnan = numpy.isnan(dstFracPtrPost).sum()
if nnan > 0 or nnanPost > 0:
print "There are nan's in dstFracPtrPost", nnanPost
print "There are nan's in dstFracPtr", nnan
self.assertNotEqual(nnan, 0)
self.assertNotEqual(nnanPost, 0)
srcMass = (srcFeldPtr * srcAreas.flatten() * srcFracPtr).sum()
dstMass = (dstFeldPtr * dstAreas.flatten()).sum()
dstMassPtr = (dstFeldPtr * predstAreaPtr).sum()
diff = abs(srcMass - dstMass)
self.assertLess(diff/srcMass, 1.e-7)
if __name__ == '__main__':
print ""
ESMP.ESMP_Initialize()
suite = unittest.TestLoader().loadTestsFromTestCase(Test)
unittest.TextTestRunner(verbosity = 1).run(suite)
pylab.show()
| [
"aashish.chaudhary@kitware.com"
] | aashish.chaudhary@kitware.com |
b1b4c6f3cdff8fb9c2d1c39ee1f4d1670dd9d919 | 25bcf095a803470b4be9bba394569cb218b997ed | /book/算法和数据操作-回溯法-矩阵中的路径.py | 02aeb344a4c555b22f17c04134e17a8e2968819c | [] | no_license | lcqbit11/algorithms | c8a3da6d3a9d387d54639c0a08d14ca6b9118bb3 | ca4dacda39dc12d53ed8d4448b3356a3f2936603 | refs/heads/master | 2023-06-22T06:36:22.106780 | 2023-06-11T07:10:40 | 2023-06-11T07:10:40 | 245,633,883 | 0 | 0 | null | 2023-06-11T07:10:41 | 2020-03-07T13:02:24 | Python | UTF-8 | Python | false | false | 1,403 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
def matrix_path(matrix, s):
"""
:param matrix: List[List[int]]
:param s: str
:return: bool
"""
def new_matrix(m, row, col):
matrix = [[0] * len(m[0]) for k in range(len(m))]
for i in range(len(m)):
for j in range(len(m[0])):
if row == i and col == j:
matrix[i][j] = 0
else:
matrix[i][j] = m[i][j]
return matrix
def has_path(matrix, row, col, i, j, s):
if len(s) == 0:
return True
is_path = False
if i >= 0 and i < row and j >= 0 and j < col and matrix[i][j] == s[0]:
matrix = new_matrix(matrix, i, j)
is_path = has_path(matrix, row, col, i+1, j, s[1:]) or has_path(matrix, row, col, i, j+1, s[1:]) or has_path(matrix, row, col, i-1, j, s[1:]) or has_path(matrix, row, col, i, j-1, s[1:])
return is_path
row = len(matrix)
col = len(matrix[0])
length = len(s)
for i in range(row):
for j in range(col):
if matrix[i][j] == s[0]:
if has_path(matrix, row, col, i, j, s):
return True
return False
if __name__ == "__main__":
matrix = [['a', 'b', 't', 'g'],
['c', 'f', 'c', 's'],
['j', 'd', 'e', 'h']]
s = "bfce"
print(matrix_path(matrix, s)) | [
"lichangqing03@meituan.com"
] | lichangqing03@meituan.com |
eca5c374b5e021bedadf8e451f0537bea855034d | 7f24023d365e013ec0924844c1a872edfb0c75b4 | /tests/trac/test-trac-0091.py | 22249454c7a9b898e8f2d4b3e96a19e52251d7dc | [
"Python-2.0",
"MIT",
"Apache-2.0"
] | permissive | pabigot/pyxb | cd42c024607572c6363682d389e9296caf3f2857 | 5ee5ba54c9f702dc9c9efc2731ee547ecd4dae4a | refs/heads/next | 2023-05-11T03:23:19.599756 | 2023-04-29T20:38:15 | 2023-04-29T20:45:13 | 20,547,850 | 130 | 63 | Apache-2.0 | 2021-08-19T16:52:18 | 2014-06-06T01:49:03 | Python | UTF-8 | Python | false | false | 4,794 | py | # -*- coding: utf-8 -*-
import logging
if __name__ == '__main__':
logging.basicConfig()
_log = logging.getLogger(__name__)
import pyxb.binding.generate
import pyxb.binding.datatypes as xs
import pyxb.binding.basis
import pyxb.utils.domutils
import os.path
xsd='''<?xml version="1.0" encoding="UTF-8"?>
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema">
<xs:simpleType name="f13p8">
<xs:restriction base="xs:decimal">
<xs:totalDigits value="13"/>
<xs:fractionDigits value="8"/>
</xs:restriction>
</xs:simpleType>
<xs:element name="e13p8" type="f13p8"/>
<xs:simpleType name="f15p5">
<xs:restriction base="xs:decimal">
<xs:totalDigits value="15"/>
<xs:fractionDigits value="5"/>
</xs:restriction>
</xs:simpleType>
<xs:element name="e15p5" type="f15p5"/>
</xs:schema>'''
code = pyxb.binding.generate.GeneratePython(schema_text=xsd)
#open('code.py', 'w').write(code)
rv = compile(code, 'test', 'exec')
eval(rv)
from pyxb.exceptions_ import *
import unittest
class TestTrac_0091 (unittest.TestCase):
def assertAlmostEqual (self, v1, v2, *args, **kw):
from decimal import Decimal
if (isinstance(v1, Decimal)
or isinstance(v2, Decimal)):
if not isinstance(v1, Decimal):
v1 = Decimal(str(v1))
if not isinstance(v2, Decimal):
v2 = Decimal(str(v2))
return super(TestTrac_0091, self).assertAlmostEqual(v1, v2, *args, **kw)
def testBasic (self):
if sys.version_info[:2] >= (2, 7):
# Prior to 2.7 float/Decimal comparisons returned invalid results.
self.assertEqual(1.0, e13p8(1.0))
self.assertEqual(1.0, e13p8('1.0'))
self.assertEqual(1234567890123.0, e13p8('1234567890123'))
self.assertEqual(1234567890123.0, CreateFromDocument('<e13p8>1234567890123</e13p8>'))
self.assertRaises(SimpleFacetValueError, e13p8, '12345678901234')
self.assertAlmostEqual(1.00000001, e13p8('1.00000001'))
self.assertRaises(SimpleFacetValueError, e13p8, '1.000000001')
def testBadCase (self):
# Prior to fix, this raised a facet violation due to rounding
self.assertAlmostEqual(0.00790287, e13p8('0.00790287'))
def test15_5 (self):
from decimal import Decimal
# For compatibility in Python 2.6 PyXB will convert the float
# to a string before invoking the underlying decimal.Decimal
fv = e15p5(1234.56789)
self.assertTrue(fv.validateBinding())
dv = Decimal('1234.56789')
self.assertEqual(fv, dv)
# In Python 2.7 decimal.Decimal will create from the float,
# which will already be in a form that won't validate
if sys.version_info[:2] >= (2, 7):
self.assertRaises(SimpleFacetValueError, e15p5, Decimal(1234.56789))
sv = e15p5('1234.56789')
self.assertEqual(sv, dv)
self.assertTrue(sv.validateBinding())
self.assertTrue(e15p5('1000000.0').validateBinding())
def testRanges (self):
from decimal import Decimal
o14o = [1] + ([0] * 12) + [1]
self.assertEqual(14, len(o14o))
o15o = [1] + ([0] * 13) + [1]
self.assertEqual(15, len(o15o))
o16o = [1] + ([0] * 14) + [1]
self.assertEqual(16, len(o16o))
self.assertTrue(e15p5(Decimal((0, o14o, 0))).validateBinding())
self.assertTrue(e15p5(Decimal((0, o15o, 0))).validateBinding())
# Negative exponents do not reduce total digit count
with self.assertRaises(pyxb.SimpleFacetValueError) as cm:
e15p5(Decimal((0, o16o, 0)))
self.assertEqual(cm.exception.facet, f15p5._CF_totalDigits)
with self.assertRaises(pyxb.SimpleFacetValueError) as cm:
e15p5(Decimal((0, o16o, -1)))
self.assertEqual(cm.exception.facet, f15p5._CF_totalDigits)
# Positive exponents add to total digit count
self.assertTrue(e15p5(Decimal((0, o14o, 1))).validateBinding())
with self.assertRaises(pyxb.SimpleFacetValueError) as cm:
e15p5(Decimal((0, o15o, 1)))
self.assertEqual(cm.exception.facet, f15p5._CF_totalDigits)
with self.assertRaises(pyxb.SimpleFacetValueError) as cm:
e15p5(Decimal((0, o14o, 2)))
self.assertEqual(cm.exception.facet, f15p5._CF_totalDigits)
# Negative exponents affect fractionDigits only
self.assertTrue(e15p5(Decimal((0, o15o, -1))).validateBinding())
self.assertTrue(e15p5(Decimal((0, o15o, -5))).validateBinding())
with self.assertRaises(pyxb.SimpleFacetValueError) as cm:
e15p5(Decimal((0, o15o, -6)))
self.assertEqual(cm.exception.facet, f15p5._CF_fractionDigits)
if __name__ == '__main__':
unittest.main()
| [
"pab@pabigot.com"
] | pab@pabigot.com |
f95738894ed4e42648865c0d36d329579bf719eb | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/132/usersdata/239/41382/submittedfiles/al14.py | 5754055aa4ef2a253321a042cdc75ce2db4c6048 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 214 | py | # -*- coding: utf-8 -*-
n=int(input("Digite o número de pessoas: "))
i=0
termo=n
while termo>0:
idade=int(input("Digite a idade das pessoas: "))
i=i+idade
termo=termo-1
media=(i/n)
print("%.2f"%media)
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
4ff05fb289e34cc72623f03733a105c7aa80dbf4 | 38d6a6ac271fca4d5fff422541bc59df10e66ef5 | /landlab/_info.py | 0a320b3bd55222275db2d17e22a791571245c485 | [
"MIT"
] | permissive | AndresQuichimbo/landlab | 767d3c42eaa4e6477865594666dd49eb3eb684bb | 39fee962ec962a389ae4522a55a17f53a0d37a6e | refs/heads/master | 2020-04-01T21:31:11.422998 | 2020-02-06T23:12:26 | 2020-02-06T23:12:26 | 240,261,264 | 0 | 0 | MIT | 2020-02-13T13:00:25 | 2020-02-13T13:00:24 | null | UTF-8 | Python | false | false | 584 | py | name = "landlab"
cite_as = [
"""@article{hobley2017creative,
title={Creative computing with Landlab: an open-source toolkit
for building, coupling, and exploring two-dimensional
numerical models of Earth-surface dynamics},
author={Hobley, Daniel EJ and Adams, Jordan M and Nudurupati,
Sai Siddhartha and Hutton, Eric WH and Gasparini, Nicole M and
Istanbulluoglu, Erkan and Tucker, Gregory E},
journal={Earth Surface Dynamics},
volume={5},
number={1},
pages={21},
year={2017},
publisher={Copernicus GmbH}
}"""
]
| [
"mcflugen@gmail.com"
] | mcflugen@gmail.com |
ee0b438af7cec5ea6648945f35afe8bbfd233040 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/415/usersdata/296/86500/submittedfiles/exe11.py | 3e55da52506b436ee00c29807dd6008b54ef4d53 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 234 | py | # -*- coding: utf-8 -*-
n = int(input("Digite um número: "))
soma = 0
while n > 0:
if 10000000 <= n <= 90000000:
resto = n%10000000
n = (n - resto)/10000000
soma = soma + resto
print(soma)
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
c22bc6d09d1dba9ee889fd0d138c6d55c329be08 | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/BuildLinks1.10/test_input/CJ_16_1/16_1_2_Erisky_solve_B.py | 414d395289b8df01c731ab2d840c390f59fd2a13 | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 826 | py | import math
# raw_input() reads a string with a line of input, stripping the '\n' (newline) at the end.
# This is all you need for most Google Code Jam problems.
total = int(raw_input()) # read a line with a single integer
for i in xrange(1, total + 1):
N = int(raw_input())
output = {}
for j in range(2*N-1):
S = raw_input().split()
for x in S:
if x in output:
output[x] = output[x] + 1
else:
output[x] = 1
olist = []
for any in output:
if output[any] % 2 == 1:
olist.append(int(any))
olist.sort()
# print olist
os=""
for gg in olist:
os += str(gg)
os += " "
print "Case #{}: {}".format(i, "".join(os))
# check out .format's specification for more formatting options
| [
"[dhuo@tcd.ie]"
] | [dhuo@tcd.ie] |
54b141b59dad195d7bb823334914e9748de22edc | 18f8abb90efece37949f5b5758c7752b1602fb12 | /py/django_tools/django-exceptional/test/example/settings.py | 91dbfbd0d5b7de0bc9be14860552db1f2cde198f | [
"Unlicense"
] | permissive | marceltoben/evandrix.github.com | caa7d4c2ef84ba8c5a9a6ace2126e8fd6db1a516 | abc3fbfb34f791f84e9a9d4dc522966421778ab2 | refs/heads/master | 2021-08-02T06:18:12.953567 | 2011-08-23T16:49:33 | 2011-08-23T16:49:33 | 2,267,457 | 3 | 5 | null | 2021-07-28T11:39:25 | 2011-08-25T11:18:56 | C | UTF-8 | Python | false | false | 2,603 | py | # Django settings for example project.
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))), 'src'))
DEBUG = False
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('Zachary Voase', 'z@zacharyvoase.com'),
)
MANAGERS = ADMINS
DATABASE_ENGINE = 'sqlite3'
DATABASE_NAME = 'dev.sqlite3'
DATABASE_USER = ''
DATABASE_PASSWORD = ''
DATABASE_HOST = ''
DATABASE_PORT = ''
SESSION_ENGINE = "django.contrib.sessions.backends.cache"
CACHE_BACKEND = "dummy://"
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'Europe/London'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-gb'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = ''
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = '8@+k3lm3=s+ml6_*(cnpbg1w=6k9xpk5f=irs+&j4_6i=62fy^'
EXCEPTIONAL_API_KEY = open("exceptional.key").read().strip()
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
# 'django.template.loaders.eggs.load_template_source',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'djexceptional.ExceptionalMiddleware',
)
ROOT_URLCONF = 'example.urls'
TEMPLATE_DIRS = (
os.path.join(os.path.dirname(os.path.abspath(__file__)), 'templates'),
)
INSTALLED_APPS = (
'django.contrib.sessions',
'djexceptional', # So we can run the app tests.
)
| [
"evandrix@gmail.com"
] | evandrix@gmail.com |
612bc2e1e4649e4a7d9af7a4aabbdcc5d0eac099 | 80d50ea48e10674b1b7d3f583a1c4b7d0b01200f | /src/datadog_api_client/v2/model/opsgenie_service_response_attributes.py | 5d6473bfa2a5e8c186c521947868485560bb1cad | [
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"MPL-2.0"
] | permissive | DataDog/datadog-api-client-python | 3e01fa630278ad0b5c7005f08b7f61d07aa87345 | 392de360e7de659ee25e4a6753706820ca7c6a92 | refs/heads/master | 2023-09-01T20:32:37.718187 | 2023-09-01T14:42:04 | 2023-09-01T14:42:04 | 193,793,657 | 82 | 36 | Apache-2.0 | 2023-09-14T18:22:39 | 2019-06-25T22:52:04 | Python | UTF-8 | Python | false | false | 1,920 | py | # Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2019-Present Datadog, Inc.
from __future__ import annotations
from typing import Union, TYPE_CHECKING
from datadog_api_client.model_utils import (
ModelNormal,
cached_property,
none_type,
unset,
UnsetType,
)
if TYPE_CHECKING:
from datadog_api_client.v2.model.opsgenie_service_region_type import OpsgenieServiceRegionType
class OpsgenieServiceResponseAttributes(ModelNormal):
@cached_property
def openapi_types(_):
from datadog_api_client.v2.model.opsgenie_service_region_type import OpsgenieServiceRegionType
return {
"custom_url": (str, none_type),
"name": (str,),
"region": (OpsgenieServiceRegionType,),
}
attribute_map = {
"custom_url": "custom_url",
"name": "name",
"region": "region",
}
def __init__(
self_,
custom_url: Union[str, none_type, UnsetType] = unset,
name: Union[str, UnsetType] = unset,
region: Union[OpsgenieServiceRegionType, UnsetType] = unset,
**kwargs,
):
"""
The attributes from an Opsgenie service response.
:param custom_url: The custom URL for a custom region.
:type custom_url: str, none_type, optional
:param name: The name for the Opsgenie service.
:type name: str, optional
:param region: The region for the Opsgenie service.
:type region: OpsgenieServiceRegionType, optional
"""
if custom_url is not unset:
kwargs["custom_url"] = custom_url
if name is not unset:
kwargs["name"] = name
if region is not unset:
kwargs["region"] = region
super().__init__(kwargs)
| [
"noreply@github.com"
] | DataDog.noreply@github.com |
3e40c665d4a08a20bf0af0dfd2e3714d906f4c00 | 8ab61e98b8b4efa7378ad50ee12ea5ec81b8c310 | /thredo/requests.py | 6891e62a04fde54970e922350c53ce1b8bc23643 | [
"MIT"
] | permissive | RalphWalters/thredo | 5a791d0848067e2b028c3040874476d582509543 | ea109c693036764dd192527f9b6bba18d3b18042 | refs/heads/master | 2020-04-05T05:18:56.942365 | 2018-11-08T23:25:54 | 2018-11-08T23:25:54 | 156,589,958 | 1 | 0 | MIT | 2018-11-07T18:21:27 | 2018-11-07T18:21:27 | null | UTF-8 | Python | false | false | 2,746 | py | # requests.py
#
# Session adapter that allows requests to use thredo socket objects.
# This is a bit of plumbing, but it's a clean interface that doesn't
# require any monkeypatching or other low-level magic
__all__ = ['get_session']
# -- Thredo
from . import socket
# -- Requests/third party
import requests
from requests.adapters import HTTPAdapter
from requests.packages.urllib3 import PoolManager, HTTPConnectionPool
from requests.packages.urllib3 import HTTPSConnectionPool
from http.client import HTTPConnection, HTTPSConnection
class ThredoAdapter(HTTPAdapter):
def init_poolmanager(self, connections, maxsize, block):
self.poolmanager = ThredoPoolManager(num_pools=connections,
maxsize=maxsize,
block=block)
class ThredoPoolManager(PoolManager):
def _new_pool(self, scheme, host, port):
# Important!
if scheme == 'http':
return ThredoHTTPConnectionPool(host, port, **self.connection_pool_kw)
if scheme == 'https':
return ThredoHTTPSConnectionPool(host, port, **self.connection_pool_kw)
return super(PoolManager, self)._new_pool(self, scheme, host, port)
class ThredoHTTPConnectionPool(HTTPConnectionPool):
def _new_conn(self):
self.num_connections += 1
return ThredoHTTPConnection(host=self.host,
port=self.port)
class ThredoHTTPSConnectionPool(HTTPSConnectionPool):
def _new_conn(self):
self.num_connections += 1
return ThredoHTTPSConnection(host=self.host,
port=self.port)
class ThredoHTTPConnection(HTTPConnection):
def connect(self):
"""Connect to the host and port specified in __init__."""
# Uses thredo
self.sock = socket.create_connection((self.host, self.port),
self.timeout, self.source_address)
# Important!
if self._tunnel_host:
self._tunnel()
class ThredoHTTPSConnection(HTTPSConnection):
def connect(self):
"""Connect to the host and port specified in __init__."""
# Uses thredo
self.sock = socket.create_connection((self.host, self.port),
self.timeout, self.source_address)
# Important!
if self._tunnel_host:
server_hostname = self._tunnel_host
else:
server_hostname = self.host
self.sock = self._context.wrap_socket(self.sock, server_hostname=server_hostname)
def get_session():
s = requests.Session()
s.mount('http://', ThredoAdapter())
s.mount('https://', ThredoAdapter())
return s
| [
"dave@dabeaz.com"
] | dave@dabeaz.com |
7cbb1ee16366420627e8f8f83747002cf7163930 | f30b91db647dca1f77fffa4b7e26b6c6a68abbc6 | /8_kyu/Who is going to pay for the wall/test_who_is_paying.py | 3d95d5e8ac506893d12e7e9d460f4df00a45d1e8 | [] | no_license | estraviz/codewars | 73caf95519eaac6f34962b8ade543bf4417df5b7 | 5f8685e883cb78381c528a0988f2b5cad6c129c2 | refs/heads/master | 2023-05-13T07:57:43.165290 | 2023-05-08T21:50:39 | 2023-05-08T21:50:39 | 159,744,593 | 10 | 55 | null | null | null | null | UTF-8 | Python | false | false | 354 | py | from who_is_paying import who_is_paying
def test_who_is_paying():
assert who_is_paying("Mexico") == ["Mexico", "Me"]
assert who_is_paying("Melania") == ["Melania", "Me"]
assert who_is_paying("Melissa") == ["Melissa", "Me"]
assert who_is_paying("Me") == ["Me"]
assert who_is_paying("") == [""]
assert who_is_paying("I") == ["I"]
| [
"javier.estraviz@gmail.com"
] | javier.estraviz@gmail.com |
7e6b93f712239743b62500083227e02253458129 | 92993cff825da80a8ff601572a0c52b0b7d3cbde | /algorithms/Svm/ADMM/L2/ADMM_L2_m43.py | abb30b1ac1a4beb3a53b43d81d55a8428c66bbb9 | [] | no_license | yingzhuoy/MRs-of-linear-models | 06e8b1f84b08c6aa77553813824cf35c1806c5a7 | c3df8299e039a12613f2022b370b8c3e9c2dd822 | refs/heads/master | 2023-04-07T23:09:37.736952 | 2021-04-04T05:33:37 | 2021-04-04T05:33:37 | 265,124,549 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,061 | py | import numpy as np
from numpy import linalg
#import cvxopt
#from cvxopt import matrix,solvers
from algorithms.clf import Clf
"""
Preconditioned Conjugate Gradient Method
"""
def precond(M, r):
q = M * r
return q
def inner_prod(A, B):
A = np.matrix(A)
B = np.matrix(B)
return np.dot(A.reshape(-1,1).T, B.reshape(-1,1))
def cg(A, b, x=None, tol=1.0e-6, max_iter=128):
# precondition
A = np.matrix(A)
b = np.matrix(b)
normb = np.linalg.norm(b, 'fro')
m = b.shape[0]
M = np.eye(m)
x = np.zeros((m, m))
Aq = (A*x)
r = b - Aq # m x m
q = precond(M, r) # m x m
tau_old = np.linalg.norm(q, 'fro')
rho_old = inner_prod(r, q)
theta_old = 0
Ad = np.zeros((m, m))
d = np.zeros((m, m))
res = r.reshape(m, m)
tiny = 1e-30
for i in range(max_iter):
Aq = A * q
sigma = inner_prod(q, Aq)
if abs(sigma.item()) < tiny:
break
else:
alpha = rho_old / sigma;
alpha = alpha.item()
r = r - alpha * Aq
r = r.reshape(m, m)
u = precond(M, r)
theta = np.linalg.norm(u,'fro')/tau_old
c = 1 / np.sqrt(1+theta*theta)
#----bug----
#tau = tau_old * theta * c
tau = tau_old / theta * c
gam = c*c*theta_old*theta_old
eta = c*c*alpha
d = gam * d + eta * q
x = x + d
# stop
Ad = gam*Ad+eta*Aq
res = res - Ad
if np.linalg.norm(res, 'fro') < tol*normb:
break
else:
rho = inner_prod(r, u)
beta = rho / rho_old
beta = beta.item()
q = u + beta * q
rho_old = rho
tau_old = tau
theta_old = theta
return x
def admm(X, y, max_iter=5000):
# solve by inner point method
m, n = X.shape
X = np.column_stack((X, np.ones((m, 1))))
y = y.astype(np.float64)
data_num = len(y)
C = 1.0
kernel = np.dot(X, np.transpose(X))
p = np.matrix(np.multiply(kernel,np.outer(y, y))) + np.diag(np.ones(data_num, np.float64)) * .5/C
e = np.matrix(np.ones([data_num, 1], np.float64))
bounds = (0, np.inf)
low, up = bounds
x = np.ones((m,1))
tau = 1.618
sigma = 1
# initial
u = np.ones((m, 1))
t = x
A = p + sigma * np.eye(m)
I = np.eye(m)
invA = cg(A, I)
for it in range(max_iter):
# update x
b = e + u + sigma * t
x = invA * b
# update y
t = x - (1/sigma)*u
t[t < low] = low
t[t > up] = up
# update u
u = u - tau*sigma*(x-t)
dual = -(0.5*x.T*(p*x) - e.T*x)
dual = dual.item()
y1 = np.reshape(y, (-1, 1))
lambda1 = np.multiply(x, y1)
w = np.dot(X.T, lambda1)
w = np.matrix(w).reshape(-1, 1)
tmp = np.maximum(1-np.multiply(y1, X*w),0)
primal = 0.5*np.linalg.norm(w)**2 + 1 * np.sum(tmp)
primal = primal.item()
# stop criteria
if np.abs(dual-primal)/(1+np.abs(dual)+np.abs(primal)) < 1e-12:
break
# print(t, np.linalg.norm(gradient))
# print(np.min(x), np.max(x))
# print(np.sum(x < -1e-4), np.sum(x>1+1e-4))
# print(np.abs(dual-primal)/(1+np.abs(dual)+np.abs(primal)))
y1 = np.reshape(y, (-1, 1))
alpha1 = x
lambda1 = np.multiply(y1,alpha1)
w = np.dot(X.T, lambda1)
w = np.array(w).reshape(-1)
b = w[n]
w = w[0:n]
return w, b
#L2-svm
class ADMM_L2_m43():
def fit(self, X, y):
y[y == 0] = -1
# add logitR to verify the correctness
#from sklearn.svm import LinearSVC
#SVM = LinearSVC(loss='squared_hinge', tol=1e-6, max_iter=100000, verbose=1).fit(X, np.array(y).ravel())
#w1 = SVM.coef_; b1 = SVM.intercept_
#w1 = w1.reshape(-1); b1 = b1[0]
w, b = admm(X, y)
#print('diff', np.linalg.norm(w1-w), b, b1)
clf = Clf(w, b)
return clf | [
"yingzhuoy@qq.com"
] | yingzhuoy@qq.com |
e7e2dee5d2d5cf21363e6875097b7870703e164a | fa60618b9f68aee452b278c6efb6300e50d90dbc | /py_demo/默尼森数.py | 3ef57dd22c062c2bb03ddc9a5ffa9c3545cb59a4 | [] | no_license | ALICE5/Python | 668feaaf87e88df0d14252c0fcbc8c5b479cb7c8 | 6b2037bd3e6fa5689f355a128739343779d28fe5 | refs/heads/master | 2021-01-01T16:26:57.170646 | 2017-09-18T09:31:30 | 2017-09-18T09:31:30 | 97,832,226 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 718 | py | # Judge a number is a prime or not
import math
def isPrime(x):
if x==1:
return False
k = int(math.sqrt(x))
for j in range(2, k+1):
if x % j == 0:
return False
return True
# Generate the num Prime
def prime(num):
if num == 1:
return 2
k = 1
i = 1
while(k<num):
i += 2
if(isPrime(i)==True):
k += 1
return i
# Generate the no Monisen
def Monisen(no):
k = 0
i = 1
m = 0
while(k<no):
p=prime(i)
m = 2**p-1
if(isPrime(m)==True):
k+=1
i+=1
return m
if __name__ == '__main__':
print(Monisen(6))
# print(Monisen(int(input('Please input the number:')))) | [
"alice4u@bjtu.edu.cn"
] | alice4u@bjtu.edu.cn |
2b2a6d1bc2ac3e00c7327b4266870bcecb87ea3b | 6d36b40aa5a2fdfd068731b7ee657ec25279bdec | /uncluster/cluster_distributions/gnedin.py | 9821ec84c3c8a1c3e62379f581f647ecf90cdfd8 | [
"MIT"
] | permissive | adrn/uncluster | 5efee493c4319e4e0d07615fb6da6dbdcb172301 | 0e487f7cc17426bbd03ff1f02000bc3437075ab3 | refs/heads/master | 2021-04-30T18:30:48.693052 | 2017-02-21T14:08:54 | 2017-02-21T14:08:54 | 63,977,520 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,605 | py | from __future__ import division, print_function
__author__ = "adrn <adrn@astro.columbia.edu>"
# Third-party
import astropy.units as u
import numpy as np
from scipy.special import gammainc
from scipy.interpolate import InterpolatedUnivariateSpline
from scipy.misc import derivative
from ..config import M_min, M_max
__all__ = ['gc_prob_density', 'sample_masses', 'sample_radii']
def _sersic_frac_mass_enclosed(r, n_s=2.2, R_e=4.):
"""
This was adapted from Oleg's C code (uncluster/src/gc_evolution.c).
For the Milky Way, the parameters (r_s, n_s, etc.) don't evolve and z=0.
"""
a_n = 2 * n_s
b_n = 2. * n_s - 1./3. + 0.0098765/n_s + 0.0018/n_s**2 # approximation from Ciotti ...something
# rmax = r_max.to(u.kpc).value
# argc = b_n * (rmax/R_e) ** (1./n_s)
# gamnorm = gammainc(a_n, argc)
gamnorm = 1. # r_max = infinity
arg = b_n * (r/R_e)**(1./n_s)
return gammainc(a_n, arg) / gamnorm
# HACK: I need a function to evaluate the density profile, so I do this numerically...
# - this does a calculation on import (BAD) but it's very fast (~10s of ms)
n_grid = 4096 # MAGIC NUMBER
r_grid = np.logspace(-4, 2.5, n_grid) # kpc
m_grid = _sersic_frac_mass_enclosed(r_grid)
# if necessary, remove duplicate m's where it saturates
_idx, = np.where(m_grid >= 1.)
if len(_idx) > 1:
r_grid = r_grid[:_idx[0]+1]
m_grid = m_grid[:_idx[0]+1]
dm_dr = np.zeros(n_grid)
for i,r in enumerate(r_grid):
d = derivative(_sersic_frac_mass_enclosed, r, dx=1E-3*r)
dm_dr[i] = d
_idx = np.isfinite(dm_dr) & (dm_dr>0.)
_interp_ln_dens = InterpolatedUnivariateSpline(r_grid[_idx],
np.log(dm_dr[_idx]) - np.log(4*np.pi*r_grid[_idx]**2),
k=1)
def gc_prob_density(r):
r"""
Evaluate the **probability** density of the spatial distribtuon
of globular clusters following a Hernquist profile.
This is *not* the mass-density or number-density, but:
.. math::
\nu (r) = \int {\rm d}v \, f(r,v)
.. note::
This function computes the density numerically using linear interpolation.
Parameters
----------
r : float
Radius in kpc.
"""
return np.exp(_interp_ln_dens(r))
@u.quantity_input(M_min=u.Msun, M_max=u.Msun)
def sample_masses(M_min=M_min, M_max=M_max, size=1):
r"""
Use inverse transform sampling to generate samples from a power-law
initial mass distribution with :math:`\beta = -2`:
.. math::
p(M) = A M^{-2}
M \in [M_{\rm min}, M_{\rm max}]
Parameters
----------
M_min : `~astropy.units.Quantity` [mass]
The minimum mass or lower-bound for sampling.
M_max : `~astropy.units.Quantity` [mass]
The maximum mass or upper-bound for sampling.
size : int, tuple (optional)
The shape of the output array.
Returns
-------
masses : `~astropy.units.Quantity` [Msun]
Masses sampled from the mass function.
"""
A = 1 / (1/M_min - 1/M_max)
R = np.random.uniform(size=size)
return 1 / (1/M_min - R/A)
def sample_radii(size=1):
"""
Use inverse transform sampling to generate samples from a Sersic
mass profile (following Gnedin et al. 2014).
Parameters
----------
size : int, tuple (optional)
The shape of the output array.
Returns
-------
radii : `~astropy.units.Quantity` [kpc]
"""
interp_func = InterpolatedUnivariateSpline(m_grid, np.log(r_grid), k=1)
return np.exp(interp_func(np.random.uniform(0, 1, size=size))) * u.kpc
| [
"adrian.prw@gmail.com"
] | adrian.prw@gmail.com |
ea703a24032f314d7e01b11a64fa900fc1c2c605 | b7683c108e68ee2d28573edf55923eb34cc2f5ee | /5_Video_Analysis/3_Background_substraction/3_GMG.py | e72711d3c0643816ef3b230ff18d92f712be427c | [] | no_license | aCuissot/openVC_win_py_tutorial | cc42ab1a1fb6eaefe5a91c7e1bb1926a776b0e01 | 7186b629747cb16f2bf42a03d2339d3dc3ea77bd | refs/heads/master | 2020-05-18T12:17:04.619047 | 2019-07-10T13:45:00 | 2019-07-10T13:45:00 | 184,403,715 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 454 | py | import numpy as np
import cv2 as cv
cap = cv.VideoCapture('../../Data/in/vtest.avi')
kernel = cv.getStructuringElement(cv.MORPH_ELLIPSE, (3, 3))
fgbg = cv.bgsegm.createBackgroundSubtractorGMG()
while True:
ret, frame = cap.read()
fgmask = fgbg.apply(frame)
fgmask = cv.morphologyEx(fgmask, cv.MORPH_OPEN, kernel)
cv.imshow('frame', fgmask)
k = cv.waitKey(30) & 0xff
if k == 27:
break
cap.release()
cv.destroyAllWindows()
| [
"harrypotter9752@gmail.com"
] | harrypotter9752@gmail.com |
c74b74d06dfb216f30a2b9bb4f52d9be55005706 | 429211c01057abcd51e5120d566c7daa0a8e2f33 | /1804/二/day4/class4.py | adce94b9a462cd19775068f56485909388f5f106 | [] | no_license | LDZ-RGZN/b1804 | 2788c922a6d1a6dc11267920a90336d1df93a453 | c57f8b7cf14686036cae3c30a30f07514622b5ca | refs/heads/master | 2021-07-19T12:54:07.031858 | 2018-10-12T02:48:39 | 2018-10-12T02:48:39 | 133,500,880 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,017 | py | import time
class cat:
def __init__(self,lun_zi,color):
self.lunzi = lun_zi
self.color = color
def pao(self):
print ('一辆%d个轮子的%s色%s汽车正在公里上飞驰'%(self.lunzi,self.color,self.name))
def jiao(self):
print ('这辆%s汽车正在冲着前面的那辆%s汽车鸣笛'%(self.name,self.name2))
def zw(self):
print ('后面的%s汽车成功的追尾了前面的%s汽车,鼓掌'%(self.name,self.name2))
def __str__(self):
s = '车的品牌是:'+self.name+' 车的颜色是:'+self.color
return s
bm = cat(4,'红')
bm.name = '宝马'
bm.name2 = '奥迪'
bm.pao()
bm.jiao()
print ('请稍等.即将追尾')
time.sleep(1)
bm.zw()
print (bm)
print ('内存位置是%d'%(id(bm)))
print ('下一波操作即将进行')
time.sleep(1)
bm1 = cat(8,'蓝')
bm1.name = '奥迪'
bm1.name2 = '宝马'
bm1.pao()
bm1.jiao()
print ('请稍等.即将追尾')
time.sleep(1)
bm1.zw()
print (bm1)
print ('内存位置是%d'%(id(bm1)))
| [
"2654213432@qq.com"
] | 2654213432@qq.com |
aa37b24dc0979c643a0174561ca08482a852901f | f8d2521a88e465eed01adc3981c7a173d5c2554b | /round/round0326-0350/round0335/c1.py | 0851e5dcae97aadd46d36170be48457946510b85 | [] | no_license | clarinet758/codeforces | b2a8a349bba40e7761a8ce50dd5ff9a57477b60d | d79870c47bdb109547891a0d076dd173d6d647cf | refs/heads/main | 2021-12-15T05:46:51.000160 | 2021-12-01T12:01:33 | 2021-12-01T12:01:33 | 41,968,658 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 873 | py | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
import time
import sys
import io
import re
import math
import itertools
import collections
import bisect
#sys.stdin=file('input.txt')
#sys.stdout=file('output.txt','w')
#10**9+7
mod=1000000007
#mod=1777777777
pi=3.141592653589
IS=float('inf')
xy=[(1,0),(-1,0),(0,1),(0,-1)]
bs=[(-1,-1),(-1,1),(1,1),(1,-1)]
def niten(a,b): return abs(a-b) if a>=0 and b>=0 else a+abs(b) if a>=0 else abs(a)+b if b>=0 else abs(abs(a)-abs(b))
def gcd(a,b): return a if b==0 else gcd(b,a%b)
def lcm(a,b): return a*b/gcd(a,b)
def euclid_dis(x1,y1,x2,y2): return ((x1-x2)**2+(y1-y2)**2)**0.5
def choco(xa,ya,xb,yb,xc,yc,xd,yd): return 1 if abs((yb-ya)*(yd-yc)+(xb-xa)*(xd-xc))<1.e-10 else 0
n=int(raw_input())
l=map(int,raw_input().split())
a=[0]*(n+1)
for i in l:
a[i]=a[i-1]+1
print n-max(a)
ans=chk=0
#end = time.clock()
#print end - start
| [
"clarinet758@gmail.com"
] | clarinet758@gmail.com |
28a3bfc7da27cf64ba42e18b5e5435e5e8ca427d | bd9d82f616f8658d8431bedaed524647f0f2d7a5 | /commands/delete_empty_dirs_fm.py | d0a52082cf2bf783c1e5d20a8dccb62bdb85588d | [] | no_license | alclass/PyMirrorFileSystemsByHash | d4331523e9948c119f98c248ca3daf3a71eb8f0b | d59d35dfef5107183309fde825ab082fbb4dc465 | refs/heads/master | 2023-02-20T15:38:24.432268 | 2023-02-17T20:58:39 | 2023-02-17T20:58:39 | 49,807,870 | 0 | 0 | null | 2021-12-12T21:31:28 | 2016-01-17T07:40:28 | Python | UTF-8 | Python | false | false | 1,232 | py | #!/usr/bin/env python3
"""
delete_empty_dirs_fm.py
Deletes / removes empty directory in the target dirtree.
Usage:
$delete_empty_dirs_fm.py <prunepath>
Example:
$delete_empty_dirs_fm.py "/Science/Physics/Einsteinian Relativity"
Explanation:
The above (hypothetical) example will remove all empty directories inside folder "Einsteinian Relativity".
"""
import os
import sys
import default_settings as defaults
import fs.dirfilefs.dir_n_file_fs_mod as df
def show_help_cli_msg_if_asked():
for arg in sys.argv:
if arg in ['-h', '--help']:
print(__doc__)
sys.exit(0)
def prune_dirtree_from_prunepath(prunepath):
if not os.path.isdir(prunepath):
error_msg = 'Error: prunepath (%s) is not a directory' % prunepath
raise OSError(error_msg)
n_visited, n_removed, n_failed = df.prune_dirtree_deleting_empty_folders(prunepath)
print('Report delete_empty_dirs_fm.py')
print('prune path', prunepath)
print('n_visited =', n_visited, 'n_removed =', n_removed, 'n_failed =', n_failed)
def process():
"""
"""
show_help_cli_msg_if_asked()
prunepath, _ = defaults.get_src_n_trg_mountpath_args_or_default()
prune_dirtree_from_prunepath(prunepath)
if __name__ == '__main__':
process()
| [
"livrosetc@yahoo.com.br"
] | livrosetc@yahoo.com.br |
5d77d8ac4fa0e33cef967a4ca48b0dde1c4f790f | b3729186993105603a77016d6a911932fec06630 | /recruiters/admin.py | eaa3ca4b800202fd737efd8323b6ab526769977c | [] | no_license | MichalDrosio/grades | 05425498bbee3873ebe7128f96227491861b1395 | a6fa146ef1ac06f23f423eaf2de43c23f0faf754 | refs/heads/main | 2022-12-30T06:31:04.100722 | 2020-10-16T13:22:10 | 2020-10-16T13:22:10 | 302,673,025 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 217 | py | from django.contrib import admin
from recruiters.models import Recruiter
# Register your models here.
@admin.register(Recruiter)
class RecruiterAdmin(admin.ModelAdmin):
list_display = ['first_name', 'last_name'] | [
"drosio.michal@gmail.com"
] | drosio.michal@gmail.com |
ddeb531997001b9fb93293cd39b1753ce9651b68 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03829/s592787585.py | ebd0d0601946a683251cf998983ed53be8aa1d91 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 691 | py | #!/usr/bin/env python3
import sys
INF = float("inf")
def solve(N: int, A: int, B: int, X: "List[int]"):
tot = 0
for i in range(N-1):
sub = X[i+1]-X[i]
if sub*A < B:
tot += sub*A
else:
tot += B
print(tot)
return
def main():
def iterate_tokens():
for line in sys.stdin:
for word in line.split():
yield word
tokens = iterate_tokens()
N = int(next(tokens)) # type: int
A = int(next(tokens)) # type: int
B = int(next(tokens)) # type: int
X = [int(next(tokens)) for _ in range(N)] # type: "List[int]"
solve(N, A, B, X)
if __name__ == '__main__':
main()
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
f374c7cef3de091b5e831f7f98e75da674e18e19 | 003ffcf8144565404636f3d74590a8d6b10a90a4 | /236-lowest-common-ancestor-of-a-binary-tree/236-lowest-common-ancestor-of-a-binary-tree.py | 60b7ab8908ab9f3753694aa14207fbaaac761668 | [] | no_license | congve1/leetcode | fb31edf93049e21210d73f7b3e7b9b82057e1d7a | ce1e802b5052da2cdb919d6d7e39eed860e0b61b | refs/heads/master | 2020-05-13T19:19:58.835432 | 2019-05-06T00:44:07 | 2019-05-06T00:44:07 | 181,652,371 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 411 | py | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def lowestCommonAncestor(self, root, p, q):
"""
:type root: TreeNode
:type p: TreeNode
:type q: TreeNode
:rtype: TreeNode
"""
| [
"congve1@live.com"
] | congve1@live.com |
54c8b17645a160598c0851c1e30eb074dbff22f8 | 1398346d986f4b3b79a557854ec9293690a20cc7 | /Tools/BuildSlaveSupport/ews-app/ews/urls.py | 2010c72b63e80288c93f9af2c823b33a8e6abc3d | [] | no_license | feltnerm/webkit | 49607fc29ccb2082654f6a5eb97e4f51c78527d5 | f50e47081541dcf3f69bcfc36d55867441aa3325 | refs/heads/master | 2023-03-18T08:41:03.727396 | 2020-02-10T21:14:39 | 2020-02-10T21:14:39 | 239,619,580 | 1 | 0 | null | 2020-02-10T21:43:21 | 2020-02-10T21:43:20 | null | UTF-8 | Python | false | false | 2,056 | py | # Copyright (C) 2018-2019 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from django.conf.urls import url
from ews.views.index import Index
from ews.views.results import Results
from ews.views.statusbubble import StatusBubble
from ews.views.retrypatch import RetryPatch
from ews.views.submittoews import SubmitToEWS
app_name = 'ews'
urlpatterns = [
# ex: /
url(r'^$', Index.as_view(), name='index'),
# ex: /results/
url(r'^results/$', Results.as_view(), name='results'),
# ex: /retry/
url(r'^retry/$', RetryPatch.as_view(), name='retrypatch'),
# ex: /status-bubble/5
url(r'^status-bubble/(?P<patch_id>[0-9]+)/$', StatusBubble.as_view(), name='statusbubble'),
# ex: /submit-to-ews/
url(r'^submit-to-ews/$', SubmitToEWS.as_view(), name='submittoews'),
]
| [
"aakash_jain@apple.com@268f45cc-cd09-0410-ab3c-d52691b4dbfc"
] | aakash_jain@apple.com@268f45cc-cd09-0410-ab3c-d52691b4dbfc |
3815b2c20d699b436ba4bc74ed8c267903c0ac7b | ddcf878cca43d49f73fd673279a97e82ced521e8 | /examples/phylesystem/ott_status_report.py | 0fd8436424d6ce7f141fc38e84a759947ba49e8f | [
"BSD-2-Clause",
"Python-2.0"
] | permissive | OpenTreeOfLife/peyotl | ca5fcbc4f1754c3da7a25c93d89cfeaaad17057f | b50f7217966c18195c9b52be42454513ffa3e7f3 | refs/heads/master | 2023-08-03T14:35:46.793662 | 2023-07-26T20:30:08 | 2023-07-26T20:30:08 | 16,637,087 | 6 | 4 | BSD-2-Clause | 2023-07-24T20:02:30 | 2014-02-08T05:52:12 | Jupyter Notebook | UTF-8 | Python | false | false | 2,568 | py | #!/usr/bin/env python
"""Trying to make a report that corresponds to
https://github.com/OpenTreeOfLife/germinator/wiki/Overview-of-repository-statistics
"""
import time
start_clock = time.time()
from peyotl.phylesystem.phylesystem_umbrella import Phylesystem
from peyotl.nexson_syntax import get_nexml_el
from peyotl import gen_otu_dict, iter_node
from peyotl.manip import iter_trees
import codecs
import json
import sys
out = codecs.getwriter('utf-8')(sys.stdout)
phy = Phylesystem()
# Start all of the properties for the report at 0
report_properties = ['reported_study_count',
'study_count',
'OTU_count',
'unmapped_OTU_count',
'unique_OTU_count',
'nominated_study_count',
'nominated_study_OTU_count',
'nominated_study_unique_OTU_count',
'nominated_study_unmapped_OTU_count',
'run_time']
reported_study_count = 0
study_count = 0
OTU_count = 0
unmapped_OTU_count = 0
unique_OTU_count = 0
nominated_study_count = 0
nominated_study_OTU_count = 0
nominated_study_unique_OTU_count = 0
nominated_study_unmapped_OTU_count = 0
run_time = 0
ott_id_set = set()
nominated_ott_id_set = set()
for study_id, n in phy.iter_study_objs():
reported_study_count += 1
otu_dict = gen_otu_dict(n)
if not bool(otu_dict):
continue
nex_obj = get_nexml_el(n)
study_count += 1
not_intended_for_synth = nex_obj.get('^ot:notIntendedForSynthesis')
intended_for_synth = (not_intended_for_synth is None) or (not_intended_for_synth is False)
if intended_for_synth:
nominated_study_count += 1
nominated_study_OTU_count += len(otu_dict)
OTU_count += len(otu_dict)
for oid, o in otu_dict.items():
ott_id = o.get('^ot:ottId')
if ott_id is None:
unmapped_OTU_count += 1
if intended_for_synth:
nominated_study_unmapped_OTU_count += 1
else:
ott_id_set.add(ott_id)
if intended_for_synth:
nominated_ott_id_set.add(ott_id)
unique_OTU_count = len(ott_id_set)
nominated_study_unique_OTU_count = len(nominated_ott_id_set)
end_clock = time.time()
run_time = end_clock - start_clock
#################################################
# write variables in local scope in a JSON blob
report = {}
for prop in report_properties:
report[prop] = locals()[prop]
json.dump(report, out, sort_keys=True, indent=2, separators=(',', ': '))
out.write('\n')
| [
"mtholder@gmail.com"
] | mtholder@gmail.com |
7849d81916ebfbd27f3cf13b5e2beedef0e36485 | 2c74bb301f1ed83b79254944183ac5a18a639fdf | /homeassistant/components/uptime/const.py | bbce80214747bdfbdf17642a0a22328308f9e3e1 | [
"Apache-2.0"
] | permissive | Adminiuga/home-assistant | 5bec93007ddac1a268cc359bf7e48530c5f73b38 | dcf68d768e4f628d038f1fdd6e40bad713fbc222 | refs/heads/dev | 2023-02-22T22:03:31.013931 | 2022-11-09T00:27:20 | 2022-11-09T00:27:20 | 123,929,062 | 5 | 4 | Apache-2.0 | 2023-02-22T06:14:31 | 2018-03-05T14:11:09 | Python | UTF-8 | Python | false | false | 206 | py | """Constants for the Uptime integration."""
from typing import Final
from homeassistant.const import Platform
DOMAIN: Final = "uptime"
PLATFORMS: Final = [Platform.SENSOR]
DEFAULT_NAME: Final = "Uptime"
| [
"noreply@github.com"
] | Adminiuga.noreply@github.com |
65b07c011b7740295f45d257c0828bc529be2270 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2281/60722/273569.py | 6d9e698fb32bc6a21a6cd03a7a25481fe3bf9c89 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 385 | py | T=int(input())
for m in range(T):
N=int(input())
string=input().split(" ")
result=[]
for i in range(N):
string[i]=int(string[i])
for i in range(N):
index=1
for j in range(i+1,N):
if string[j]>string[i]:
index=0
if index==1:
result.append(string[i])
print(" ".join(str(i) for i in result))
| [
"1069583789@qq.com"
] | 1069583789@qq.com |
86f0dcff18cacb525ba4a9744c6204c57e8a82bf | caf192dbc1ca90fee18bb4ce170d37eb14870ec5 | /Chapter-5/8. Caesar cipher improved.py | e8e8417f0af034fccd1069c3f4c9c7d4851dc931 | [] | no_license | Dfredude/PythonZelle | 858b00f5eacce841173c64b3cecd978dedbeb145 | 1923fe84df604968eebc5269f23b7c0f167d55f0 | refs/heads/main | 2023-08-30T21:45:57.070344 | 2021-10-17T01:32:57 | 2021-10-17T01:32:57 | 359,041,963 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 626 | py | def main():
#Get plaintext(p_text) and key(x) from the user
p_text = str(input("Enter the message you'd like encrypted:\n"))
key = int(input("What's the key?: "))
p_text = p_text.upper()
#Create string of letters
table = "abcdefghijklmnopqrstuvwxyz"
full_table = table * 6
#Check if there are spaces
#Convert plaintext to ciphertext(c_text) using cipher loop
result = ""
for ch in p_text:
if ch == ' ':
result += ' '
else:
new_n = ord(ch) + 13
result += full_table[new_n+key]
print("Your encoded message is {0}.".format(result))
main() | [
"dominguezlucio@outlook.com"
] | dominguezlucio@outlook.com |
84db13e8ee0d75e7c968eabc624c915b95174b9a | ba054fa1ec409011444e9c6b963309745e150d6f | /ave_SR/prueba_carga/loadStateData.py | ea4caae0a5653c6b78ca817bb8466e5022a68528 | [] | no_license | berndhahnebach/XCmodels | a6500fdde253dea10ef2bb64b7ebc3dbfc2577c2 | 4acdd7747abd7cd71f5ef580f65e93359560e5a9 | refs/heads/master | 2020-04-02T23:36:36.385054 | 2018-10-20T16:49:21 | 2018-10-20T16:49:21 | 154,873,006 | 0 | 0 | null | 2018-10-26T17:52:36 | 2018-10-26T17:52:35 | null | UTF-8 | Python | false | false | 7,436 | py | # -*- coding: utf-8 -*-
'''In this script we define default data of load cases to be used (or changed)
while displaying loads or results associated to single load cases
'''
from postprocess.reports import graphical_reports
'''
Definition of record objects with these attributes:
loadCaseName: name of the load case to be depicted
loadCaseDescr: description text of the load case
loadCaseExpr: mathematical expression to define the load case (ex:
'1.0*GselfWeight+1.0*DeadLoad')
setsToDispLoads: ordered list of sets of elements to display loads
setsToDispBeamLoads: ordered list of sets of beam elements to display loads
(defaults to [])
compElLoad: component of load on beam elements to be represented
available components: 'axialComponent', 'transComponent',
'transYComponent','transZComponent'
unitsScaleLoads: factor to apply to loads if we want to change
the units (defaults to 1).
unitsLoads: text to especify the units in which loads are
represented (defaults to 'units:[m,kN]')
vectorScaleLoads: factor to apply to the vectors length in the
representation of loads (defaults to 1 -> auto-scale).
vectorScalePointLoads: factor to apply to the vectors length in the
representation of nodal loads (defaults to 1).
multByElemAreaLoads: boolean value that must be True if we want to
represent the total load on each element
(=load multiplied by element area) and False if we
are going to depict the value of the uniform load
per unit area (defaults to False)
listDspRot: ordered list of displacement or rotations to be displayed
available components: 'uX', 'uY', 'uZ', 'rotX', rotY', 'rotZ'
(defaults to ['uX', 'uY', 'uZ'])
setsToDispDspRot: ordered list of sets of elements to display displacements
or rotations
unitsScaleDispl: factor to apply to displacements if we want to change
the units (defaults to 1).
unitsDispl: text to especify the units in which displacements are
represented (defaults to '[m]'
listIntForc: ordered list of internal forces to be displayed as scalar field
over «shell» elements
available components: 'N1', 'N2', 'M1', 'M2', 'Q1', 'Q2'
(defaults to ['N1', 'N2', 'M1', 'M2', 'Q1', 'Q2'])
setsToDispIntForc: ordered list of sets of elements (of type «shell»)to
display internal forces
listBeamIntForc: ordered list of internal forces to be displayed
as diagrams on lines for «beam» elements
available components: 'N', 'My', 'Mz', 'Qy', 'Qz','T'
(defaults to ['N', 'My', 'Mz', 'Qy', 'Qz','T'])
setsToDispBeamIntForc: ordered list of sets of elements (of type «beam»)to
display internal forces (defaults to [])
scaleDispBeamIntForc: tuple (escN,escQ,escM) correponding to the scales to
apply to displays of, respectively, N Q and M beam internal
forces (defaults to (1.0,1.0,1.0))
unitsScaleForc: factor to apply to internal forces if we want to change
the units (defaults to 1).
unitsForc: text to especify the units in which forces are
represented (defaults to '[kN/m]')
unitsScaleMom: factor to apply to internal moments if we want to change
the units (defaults to 1).
unitsMom: text to especify the units in which bending moments are
represented (defaults to '[kN.m/m]')
viewName: name of the view that contains the renderer (available standard
views: "XYZPos", "XYZNeg", "XPos", "XNeg","YPos", "YNeg",
"ZPos", "ZNeg", "+X+Y+Z", "+X+Y-Z", "+X-Y+Z", "+X-Y-Z",
"-X+Y+Z", "-X+Y-Z",
"-X-Y+Z", "-X-Y-Z") (defaults to "XYZPos")
hCamFct: factor that applies to the height of the camera position
in order to change perspective of isometric views
(defaults to 1, usual values 0.1 to 10)
viewNameBeams: name of the view for beam elements displays (defaults to "XYZPos")
hCamFctBeams: factor that applies to the height of the camera position for
beam displays (defaults to 1)
'''
Q1=graphical_reports.RecordLoadCaseDisp(loadCaseName='Q1',loadCaseDescr='Q1: Prueba de carga estática',loadCaseExpr='1.0*Q1',setsToDispLoads=[overallSet],setsToDispDspRot=[dintel],setsToDispIntForc=[dintel])
Q1.unitsScaleLoads=1e-3
Q1.unitsScaleDispl=1e3
Q1.unitsDispl='[mm]'
Q1.unitsScaleMom=1e-3
Q1.unitsMom='[m.kN]'
Q1.unitsScaleForc=1e-3
Q1.unitsForc='[kN]'
Q1.viewName="XYZPos"
Q1.hCamFct=1
Q1.listDspRot=[]
Q1.listIntForc=['M2']
Q2=graphical_reports.RecordLoadCaseDisp(loadCaseName='Q2',loadCaseDescr='Q2: tren de cargas ferroviario (2 vías)',loadCaseExpr='1.00*Q2',setsToDispLoads=[overallSet],setsToDispDspRot=[dintel],setsToDispIntForc=[dintel])
Q2.unitsScaleLoads=1e-3
Q2.unitsScaleDispl=1e3
Q2.unitsDispl='[mm]'
Q2.unitsScaleMom=1e-3
Q2.unitsMom='[m.kN]'
Q2.unitsScaleForc=1e-3
Q2.unitsForc='[kN]'
Q2.viewName="XYZPos"
Q2.hCamFct=1
Q2.listDspRot=[]
Q2.listIntForc=['M2']
Q3=graphical_reports.RecordLoadCaseDisp(loadCaseName='Q3',loadCaseDescr='Q3: tren de cargas ferroviario (1 vía)',loadCaseExpr='1.00*Q3',setsToDispLoads=[dintel],setsToDispDspRot=[dintel],setsToDispIntForc=[dintel])
Q3.unitsScaleLoads=1e-3
Q3.unitsScaleDispl=1e3
Q3.unitsDispl='[mm]'
Q3.unitsScaleMom=1e-3
Q3.unitsMom='[m.kN]'
Q3.unitsScaleForc=1e-3
Q3.unitsForc='[kN]'
Q3.viewName="XYZPos"
Q3.hCamFct=1
Q3.listDspRot=[]
Q3.listIntForc=['M2']
PrueCarga=graphical_reports.RecordLoadCaseDisp(loadCaseName='PC',loadCaseDescr='PrueCarga: Prueba de carga estática',loadCaseExpr='1.0*G1+1.00*G3+1.0*Q1',setsToDispLoads=[dintel],setsToDispDspRot=[dintel],setsToDispIntForc=[dintel])
PrueCarga.unitsScaleLoads=1e-3
PrueCarga.unitsScaleDispl=1e3
PrueCarga.unitsDispl='[mm]'
PrueCarga.unitsScaleMom=1e-3
PrueCarga.unitsMom='[m.kN]'
PrueCarga.unitsScaleForc=1e-3
PrueCarga.unitsForc='[kN]'
PrueCarga.viewName="XYZPos"
PrueCarga.hCamFct=1
PrueCarga.listDspRot=[]
PrueCarga.listIntForc=['M2']
Qtren2vias=graphical_reports.RecordLoadCaseDisp(loadCaseName='TF2',loadCaseDescr='Qtren: tren de cargas ferroviario',loadCaseExpr='1.00*G1 + 1.00*G2 + 1.00*G3 + 1.00*Q2',setsToDispLoads=[dintel],setsToDispDspRot=[dintel],setsToDispIntForc=[dintel])
Qtren2vias.unitsScaleLoads=1e-3
Qtren2vias.unitsScaleDispl=1e3
Qtren2vias.unitsDispl='[mm]'
Qtren2vias.unitsScaleMom=1e-3
Qtren2vias.unitsMom='[m.kN]'
Qtren2vias.unitsScaleForc=1e-3
Qtren2vias.unitsForc='[kN]'
Qtren2vias.viewName="XYZPos"
Qtren2vias.hCamFct=1
Qtren2vias.listDspRot=[]
Qtren2vias.listIntForc=['M2']
Qtren1via=graphical_reports.RecordLoadCaseDisp(loadCaseName='TF1',loadCaseDescr='Qtren: tren de cargas ferroviario',loadCaseExpr='1.00*G1 + 1.00*G2 + 1.00*G3 + 1.00*Q3',setsToDispLoads=[dintel],setsToDispDspRot=[dintel],setsToDispIntForc=[dintel])
Qtren1via.unitsScaleLoads=1e-3
Qtren1via.unitsScaleDispl=1e3
Qtren1via.unitsDispl='[mm]'
Qtren1via.unitsScaleMom=1e-3
Qtren1via.unitsMom='[m.kN]'
Qtren1via.unitsScaleForc=1e-3
Qtren1via.unitsForc='[kN]'
Qtren1via.viewName="XYZPos"
Qtren1via.hCamFct=1
Qtren1via.listDspRot=[]
Qtren1via.listIntForc=['M2']
| [
"ana.Ortega.Ort@gmail.com"
] | ana.Ortega.Ort@gmail.com |
bedc758c9a51a568cc9c7b658fc0ecc6f2e0d95f | 9a257043e72660ba8ba64bafbd626c7cd2638fad | /demos/demo3.py | 0445986140b7e45ee5705195ce62abfbcb846da7 | [
"MIT"
] | permissive | desecho/django-tqdm | 1b38a4955b8205ca18a215eea9ea4ba969c0baaa | 053d2a259a503a574fa29960d66568feba85d25e | refs/heads/master | 2023-08-07T18:40:52.643836 | 2022-11-01T05:08:47 | 2022-11-03T04:12:35 | 88,812,309 | 15 | 0 | MIT | 2023-09-01T04:02:46 | 2017-04-20T02:33:56 | Python | UTF-8 | Python | false | false | 294 | py | """Demo 3 - Vanilla tqdm with default settings for comparison."""
import sys
from time import sleep
from tqdm import tqdm
t = tqdm(total=50)
for x in range(50):
sleep(0.02)
t.update(1)
if x == 25:
t.write("info")
if x == 40:
t.write("error", file=sys.stderr)
| [
"desecho@gmail.com"
] | desecho@gmail.com |
975f5ad103c66ee683287f7f8c490cd1822a58f7 | cb1fb3bf87b3f7006b564a0f2acd2d68e5d5ffaa | /pyram/gsf/run.py | aa7a2ce83652d1e311fa6636ac4285f33e5b39e0 | [
"GPL-3.0-only",
"MIT"
] | permissive | Hoseung/pyRamAn | 2778f8b12ca966e7586ebf077a964aecd1654223 | f9386fa5a9f045f98590039988d3cd50bc488dc2 | refs/heads/master | 2021-06-22T18:35:06.478492 | 2021-06-05T03:26:31 | 2021-06-05T03:26:31 | 227,741,934 | 1 | 1 | MIT | 2020-03-04T12:39:06 | 2019-12-13T02:49:30 | Jupyter Notebook | UTF-8 | Python | false | false | 1,146 | py | import sys, os, numexpr
import gsf
print('This script calls gsf given a valid simulation output file, snaphot_file (required input)')
print('gsf.py is a collection of functions that cover the computation of the gravitational potential,')
print('a wrapper for the Gaussian Mixture Models of scikit-learn and various plotting options.')
print('To have an idea of the various arguments that can be set use verbose=True.')
print('If no optional argument is given, the code will look for 2 clusters in the kinematic stellar space of')
print('(jz/jc,jp/jc,binding_energy) of the most massive halo in snaphot_file.')
print('The runtime for the computation of the potential scales as ~N^2')
print('If you want to run this part of gsf in parallel, set the system variable OMP_NUM_THREADS first.')
snaphot_file = '/scratch/database/nihao/gasoline2.1/g8.26e11/g8.26e11.01024'
out_dir='./'
print('Running gsf for the sim in %s'%snaphot_file)
gsf.wgsf(snaphot_file, out_dir=out_dir, number_of_clusters=2, covariance_type='full', whiten_data=None,
halo_id=1, radius_align=0.1, align_with='baryon', n_init=1, plot=True, verbose=True) | [
"hopung@gmail.com"
] | hopung@gmail.com |
9670b3eacf994b8b048b41b1e35eb381b467e6fe | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_200/194.py | 52851c11321931979d7349e69ceeae6308d70d7a | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 692 | py | file_in = open('B-large.in', 'r')
file_out = open('b.out', 'w')
T = int(file_in.readline())
for t in range(1, T+1):
n = list(file_in.readline().strip())
n = [int(i) for i in n]
i = 1
k = None
while(i < len(n)):
if(n[i] < n[i-1]):
k = i
n[i-1] -= 1
break
i += 1
while(i < len(n)):
n[i] = 9
i += 1
if k is not None:
while(k > 1):
k -= 1
if(n[k] < n[k-1]):
n[k] = 9
n[k-1] -= 1
else:
break
while n[0] is 0:
n.pop(0)
n = [str(i) for i in n]
ans = ''.join(n)
file_out.write("Case #{}: ".format(t))
file_out.write(ans)
file_out.write('\n') | [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
af84c590c0b37d96166d6afa222258397d069324 | d9232bb31eb01eb97014bb5c769e0dd210512769 | /python2/classes/person.py | 49b723a7627db3a17556b313fcce5f1e07d523cc | [
"MIT"
] | permissive | hygull/pyrandocs | c5d0defff6a9465678f713deed384a097bbe0e02 | 31cd2a4fc23b91c692e104f533ce3c4d21698ff5 | refs/heads/master | 2020-04-20T10:44:59.921958 | 2019-02-10T10:34:13 | 2019-02-10T10:34:13 | 168,797,735 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,163 | py | from datetime import date
class Person(object):
"""
Person
======
- A class to represent a person class
"""
def __init__(self, fullname, username, dob, email, profession, address):
super(Person, self).__init__()
self.fullname = fullname
self.username = username
self.dob = self.__get_dob(dob)
self.age = self.__get_age()
self.email = email
self.profession = profession
self.address = address
def __get_dob(self, dob):
"""
>>> arr = [1, 3, 4]
>>> x, y, z = a
>>> x
1
>>> y
3
>>> z
4
>>>
>>> def func(a, b, c):
... print(a, b, c)
...
>>> func(*[55, 67, 88])
(55, 67, 88)
>>>
>>> func(*[55, 67, 88][::-1])
(88, 67, 55)
>>>
"""
dob = date(*[int(data) for data in dob.split('-')][::-1])
return dob
def __get_age(self):
today = date.today()
delta = today - self.dob
years = delta.days / 365.25
days = delta.days % 365.25
message = ''
if years or days:
message = '{0:d} years {1:d} days (approx.)'.format(int(years), int(days))
else:
message = 'Unborn baby'
return message
def details(self):
for atrribute_name, atrribute_value in self.__dict__.items():
print('{0:15s} - {1}'.format(atrribute_name, atrribute_value))
if __name__ == "__main__":
person = Person(
'Rishikesh Agrawani', 'hygull',
'14-05-1992', 'rishikesh0014051992@gmail.com',
'Python/Node developer', 'Banaglore, India'
)
person.details()
"""
username - hygull
dob - 1992-05-14
age - 26 years 263 days (approx.)
profession - Python/Node developer
address - Banaglore, India
fullname - Rishikesh Agrawani
email - rishikesh0014051992@gmail.com
"""
| [
"rishikesh0014051992@gmail.com"
] | rishikesh0014051992@gmail.com |
a80ac9bcd6682244b0f36d5f03d259cfe6d63e1c | 3e06c2e64c14c3e3486cd3604268f12510fdeb56 | /chatbot/actions/actions.py | a89ec89ba87288f0c31ed895a8ea90df9b8a36f2 | [
"Apache-2.0"
] | permissive | exactpro/nostradamus | 42296e9d4762ac6d7364a665dd5cd74117caacc8 | 80df847a012374ad2b702cc9f9c9cb46c1153ee7 | refs/heads/master | 2022-09-29T08:49:14.505795 | 2021-12-21T12:43:01 | 2021-12-21T12:43:01 | 162,601,150 | 32 | 8 | Apache-2.0 | 2022-09-13T23:04:20 | 2018-12-20T15:58:05 | TypeScript | UTF-8 | Python | false | false | 5,976 | py | from typing import Dict, Text, Any, List, Union, Optional
from json import loads, JSONDecodeError
from rasa_sdk.events import EventType, SlotSet
from rasa_sdk.forms import FormAction, REQUESTED_SLOT, Form
from rasa_sdk.executor import CollectingDispatcher
from rasa_sdk.events import AllSlotsReset
from .responses import RESPONSES, WORKFLOW
from rasa_sdk import Tracker, Action
from .api.report_generator import (
generate_report,
request_values,
generate_report_payload,
check_issues,
)
def get_action_with_help_intent(latest_intent: str) -> list:
""" Get action name for intent name.
Parameters
----------
latest_intent:
Bot intent.
Returns
----------
Actions name list.
"""
actions = []
for action, intents in WORKFLOW.items():
for intent in intents:
if intent == latest_intent:
actions.append(action)
return actions
class ReportForm(FormAction):
"""Collects data for report."""
def name(self) -> Text:
return "report_form"
def validate_period(
self,
value: Text,
dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any],
) -> Dict[Text, Any]:
"""Validate period value."""
try:
if isinstance(loads(value), list):
return {"period": value}
else:
dispatcher.utter_message("Incorrect date. Please try again")
return {"period": None}
except (JSONDecodeError, TypeError):
dispatcher.utter_message("Incorrect date. Please try again")
return {"period": None}
@staticmethod
def required_slots(tracker):
required_slots = ["project", "period"]
if tracker.get_slot("project") == "Pick a project":
required_slots.insert(1, "project_selection")
return required_slots
def slot_mappings(self) -> Dict[Text, Union[Dict, List[Dict]]]:
return {
"project": self.from_entity(entity="project"),
"project_selection": self.from_text(),
"period": self.from_text(),
}
def request_next_slot(
self,
dispatcher: "CollectingDispatcher",
tracker: "Tracker",
domain: Dict[Text, Any],
) -> Optional[List[EventType]]:
for slot in self.required_slots(tracker):
if self._should_request_slot(tracker, slot):
if not check_issues():
dispatcher.utter_message(
text="Oops! Bugs haven't been uploaded yet. Please try again later"
)
return [Form(None), AllSlotsReset()]
if slot == "project_selection":
response = request_values("Project")
response["operation"] = "filtration"
dispatcher.utter_message(
json_message=response, timeout=100,
)
elif slot == "period":
response = {
"operation": "calendar",
"title": "Please choose a date",
}
dispatcher.utter_message(json_message=response)
else:
dispatcher.utter_message(
template=f"utter_ask_{slot}", **tracker.slots
)
return [SlotSet(REQUESTED_SLOT, slot)]
# no more required slots to fill
return
def submit(
self,
dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any],
) -> List[Dict]:
payload = generate_report_payload(tracker)
message = generate_report(payload)
if not message.get("filename"):
message = "Oops! There is no data you’re looking for 😔"
dispatcher.utter_message(text=message, timeout=100)
else:
message["operation"] = "report"
message["filters"] = payload
dispatcher.utter_message(json_message=message, timeout=100)
return [AllSlotsReset()]
class ActionFAQSelector(Action):
"""Basic FAQ response selector."""
def name(self) -> Text:
return "action_faq_selector"
def run(
self,
dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any],
) -> List[Dict]:
intent = tracker.latest_message["intent"].get("name")
messages = RESPONSES.get("faq").get(intent)
for message in messages:
dispatcher.utter_message(text=message)
return []
class ActionCustomFallback(Action):
"""Action custom fallback."""
def name(self) -> Text:
return "action_custom_fallback"
def run(
self,
dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any],
) -> List[Dict]:
latest_intent = tracker.latest_message["intent"].get("name")
actions = get_action_with_help_intent(latest_intent)
if actions:
for action in actions:
if action != tracker.latest_action_name:
if action == "action_faq_selector":
ActionFAQSelector().run(
dispatcher=dispatcher,
tracker=tracker,
domain=domain,
)
else:
dispatcher.utter_message(template=action)
elif (
latest_intent == "affirm"
and tracker.events[-4].get("text") == "Do you want to learn more?"
):
dispatcher.utter_message(
template="utter_more_details_analysis_and_training"
)
else:
dispatcher.utter_message(template="utter_cannot_help")
return [Form(None), AllSlotsReset()]
| [
"litvinov.ivan44@gmail.com"
] | litvinov.ivan44@gmail.com |
761a7acc9b2a233cc75164049125415fb69cae73 | 4c5608f20fa2580774d734d94198dd10648e4339 | /test/vpp_ipip_tun_interface.py | 6e5ade6eb3df3d311f1a783d8d2216bccaf237fb | [
"Apache-2.0"
] | permissive | mojtaba-eshghie/VPP-In-Situ-IOAM | 3d1c3d01752a7934d2f060326674280e0bd93413 | efebd91195eb1b0d98a4a1f5efd962ae79c77be6 | refs/heads/master | 2022-12-10T13:37:04.644952 | 2020-05-29T11:42:36 | 2020-05-29T11:42:36 | 194,249,816 | 2 | 0 | Apache-2.0 | 2022-12-08T05:17:31 | 2019-06-28T09:50:05 | C | UTF-8 | Python | false | false | 1,159 | py | from vpp_tunnel_interface import VppTunnelInterface
from ipaddress import ip_address
class VppIpIpTunInterface(VppTunnelInterface):
"""
VPP IP-IP Tunnel interface
"""
def __init__(self, test, parent_if, src, dst):
super(VppIpIpTunInterface, self).__init__(test, parent_if)
self.src = src
self.dst = dst
def add_vpp_config(self):
r = self.test.vapi.ipip_add_tunnel(
tunnel={
'src': self.src,
'dst': self.dst,
'table_id': 0,
'instance': 0xffffffff,
})
self.set_sw_if_index(r.sw_if_index)
self.test.registry.register(self, self.test.logger)
def remove_vpp_config(self):
self.test.vapi.ipip_del_tunnel(sw_if_index=self._sw_if_index)
def query_vpp_config(self):
ts = self.test.vapi.ipip_tunnel_dump(sw_if_index=0xffffffff)
for t in ts:
if t.tunnel.sw_if_index == self._sw_if_index:
return True
return False
def __str__(self):
return self.object_id()
def object_id(self):
return "ipip-%d" % self._sw_if_index
| [
"mojtaba.eshghi@ut.ac.ir"
] | mojtaba.eshghi@ut.ac.ir |
44346aeefb47e47c8a5bb430be1212dd468b75c0 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /WLKF79mxKnhjtrFRB_15.py | ec801d798caa324b8d8c58381efa49d628ae96e0 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 192 | py |
def is_good_match(lst):
if len(lst) % 2 == 1:
return "bad match"
else:
arr = []
i = 0
while i < len(lst):
arr.append(lst[i] + lst[i+1])
i += 2
return arr
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
b7df96df10ecc11c9aba8c67da97b71ad4c027a4 | bb150497a05203a718fb3630941231be9e3b6a32 | /models/PaddleHub/hub_all_func/all_module/all_spinalnet_res50_gemstone.py | 067d964d381f6ca7b6630d4f597a3c9537170d28 | [] | no_license | PaddlePaddle/PaddleTest | 4fb3dec677f0f13f7f1003fd30df748bf0b5940d | bd3790ce72a2a26611b5eda3901651b5a809348f | refs/heads/develop | 2023-09-06T04:23:39.181903 | 2023-09-04T11:17:50 | 2023-09-04T11:17:50 | 383,138,186 | 42 | 312 | null | 2023-09-13T11:13:35 | 2021-07-05T12:44:59 | Python | UTF-8 | Python | false | false | 559 | py | """spinalnet_res50_gemstone"""
import os
import paddle
import paddlehub as hub
import cv2
if paddle.is_compiled_with_cuda():
paddle.set_device("gpu")
use_gpu = True
else:
paddle.set_device("cpu")
use_gpu = False
def test_spinalnet_res50_gemstone_predict():
"""spinalnet_res50_gemstone predict"""
os.system("hub install spinalnet_res50_gemstone")
classifier = hub.Module(name="spinalnet_res50_gemstone")
result = classifier.predict(["doc_img.jpeg"])
print(result)
os.system("hub uninstall spinalnet_res50_gemstone")
| [
"noreply@github.com"
] | PaddlePaddle.noreply@github.com |
6971b11f655b673532cf1a066881ac27f4e2a1b9 | 6fcfb638fa725b6d21083ec54e3609fc1b287d9e | /python/eliben_deep-learning-samples/deep-learning-samples-master/ud730/assign5_cbow.py | d84f68e0e772e796b8949a82b3fde5dd5521b697 | [] | no_license | LiuFang816/SALSTM_py_data | 6db258e51858aeff14af38898fef715b46980ac1 | d494b3041069d377d6a7a9c296a14334f2fa5acc | refs/heads/master | 2022-12-25T06:39:52.222097 | 2019-12-12T08:49:07 | 2019-12-12T08:49:07 | 227,546,525 | 10 | 7 | null | 2022-12-19T02:53:01 | 2019-12-12T07:29:39 | Python | UTF-8 | Python | false | false | 7,497 | py | from __future__ import print_function
import itertools
import math
import numpy as np
import os
import random
import tensorflow as tf
from six.moves import cPickle as pickle
from timer import Timer
from word_utils import read_data, build_dataset, report_words_distance
def generate_batch_cbow(data, batch_size, context_size):
"""
Args:
data: List of IDs - the input sequence.
batch_size: Number of samples to generate.
context_size:
How many words to consider around the target word, left and right.
With context_size=2, in the sentence above for "consider" as the
target word, the context will be [words, to, around, the].
Yields:
Pairs of (context, label) where context is an array with shape
(batch_size, context_size * 2) and label is an array with shape
(batch_size,). For each context vector, a single label is matched
(target ID).
"""
data_index = 0
window_size = 2 * context_size + 1
while True:
context = np.zeros((batch_size, context_size * 2), dtype=np.int32)
label = np.zeros((batch_size, 1), dtype=np.int32)
for b in range(batch_size):
window_end = (data_index + window_size) % len(data)
window = data[data_index:window_end]
context[b, 0:context_size] = window[:context_size]
context[b, context_size:] = window[context_size + 1:]
label[b, 0] = window[context_size]
data_index = (data_index + 1) % len(data)
yield (context, label)
pickle_filename = 'textdata.pickle'
# Only the vocabulary_size most common words are retained in the dictionary.
# All others are mapped to UNK.
vocabulary_size = 50000
try:
with Timer('Loading pickle...'):
with open(pickle_filename, 'rb') as pickle_file:
save = pickle.load(pickle_file)
data = save['data']
count = save['count']
dictionary = save['dictionary']
reverse_dictionary = save['reverse_dictionary']
except:
print('No pickle... recomputing data.')
filename = 'text8.zip'
with Timer('read_data'):
words = read_data(filename)
with Timer('build_dataset'):
data, count, dictionary, reverse_dictionary = build_dataset(words)
save = {
'data': data,
'count': count,
'dictionary': dictionary,
'reverse_dictionary': reverse_dictionary,
}
with open(pickle_filename, 'wb') as pickle_file:
pickle.dump(save, pickle_file, pickle.HIGHEST_PROTOCOL)
print('First words in data:')
print(data[:50])
gen = generate_batch_cbow(data, 10, 2)
for i in range(5):
print(gen.next())
batch_size = 128
embedding_size = 128 # Dimension of the embedding vector.
context_size = 2 # How many words to take for context, left and right
# Number of input words to the network
context_full_size = context_size * 2
# We pick a random validation set to sample nearest neighbors. here we limit the
# validation samples to the words that have a low numeric ID, which by
# construction are also the most frequent.
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100 # Only pick dev samples in the head of the distribution.
valid_examples = np.array(random.sample(range(valid_window), valid_size))
num_sampled = 64 # Number of negative examples to sample.
graph = tf.Graph()
with graph.as_default(), tf.device('/cpu:0'):
# Input data.
train_dataset = tf.placeholder(tf.int32,
shape=[batch_size, context_full_size])
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
# Variables.
# The embeddings is a VxN matrix, where V is the vocabulary size and N
# is the embedding dimensionality.
embeddings = tf.Variable(tf.random_uniform([vocabulary_size, embedding_size
], -1.0, 1.0))
softmax_weights = tf.Variable(tf.truncated_normal(
[vocabulary_size, embedding_size],
stddev=1.0 / math.sqrt(embedding_size)))
softmax_biases = tf.Variable(tf.zeros([vocabulary_size]))
# Model.
# Look up embeddings for inputs, for each input...
# The shape should be (batch_size, context_full_size, embedding_size).
# We want to average all the context vectors within each batch, so we
# reduce-mean along dimension 1.
embed = tf.nn.embedding_lookup(embeddings, train_dataset)
embed_mean = tf.reduce_mean(embed, reduction_indices=[1])
# Compute the softmax loss, using a sample of the negative labels each time.
loss = tf.reduce_mean(
tf.nn.sampled_softmax_loss(softmax_weights, softmax_biases, embed_mean,
train_labels, num_sampled, vocabulary_size))
# Optimizer.
# Note: The optimizer will optimize the softmax_weights AND the embeddings.
# This is because the embeddings are defined as a variable quantity and the
# optimizer's `minimize` method will by default modify all variable
# quantities that contribute to the tensor it is passed. See docs on
# `tf.train.Optimizer.minimize()` for more details.
optimizer = tf.train.AdagradOptimizer(1.0).minimize(loss)
# Compute the similarity between minibatch examples and all embeddings.
# We use the cosine distance:
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
normalized_embeddings = embeddings / norm
valid_embeddings = tf.nn.embedding_lookup(normalized_embeddings,
valid_dataset)
similarity = tf.matmul(valid_embeddings,
tf.transpose(normalized_embeddings))
num_steps = 23001
with tf.Session(graph=graph) as session:
tf.initialize_all_variables().run()
print('Initialized')
print('embed shape:', embed.get_shape())
print('embed_mean shape:', embed_mean.get_shape())
initial_embeddings = embeddings.eval()
#do_report_distances(initial_embeddings)
average_loss = 0
batch_gen = generate_batch_cbow(data, batch_size, context_size)
for step, batch in itertools.izip(range(num_steps), batch_gen):
batch_data, batch_labels = batch
feed_dict = {train_dataset: batch_data, train_labels: batch_labels}
_, l = session.run([optimizer, loss], feed_dict=feed_dict)
average_loss += l
if step % 2000 == 0:
if step > 0:
average_loss = average_loss / 2000
# The average loss is an estimate of the loss over the last 2000
# batches.
print('Average loss at step %d: %f' % (step, average_loss))
average_loss = 0
# note that this is expensive (~20% slowdown if computed every 500
# steps)
if step % 10000 == 0:
sim = similarity.eval()
for i in range(valid_size):
valid_word = reverse_dictionary[valid_examples[i]]
top_k = 8 # number of nearest neighbors
nearest = (-sim[i, :]).argsort()[1:top_k + 1]
log = 'Nearest to %s:' % valid_word
for k in range(top_k):
close_word = reverse_dictionary[nearest[k]]
log = '%s %s,' % (log, close_word)
print(log)
final_embeddings = normalized_embeddings.eval()
print('final_embeddings shape:', final_embeddings.shape)
| [
"659338505@qq.com"
] | 659338505@qq.com |
357382c47b4901fd489af666983de3e30ee3f162 | f00e077c1acbed3a99d73fcc0880f9eb40b396fb | /examples/menus.py | d83b87b31b75032ef36cf99a4d07d28fd9cf35d3 | [] | no_license | rasql/cocos2d-tutorial | 089f6ddc1d2c26f2786fb78a97e1690c4ead57ff | 92bad88d51c37b30a74b32d35dac07b24a8073d6 | refs/heads/master | 2020-04-28T13:10:27.444561 | 2019-04-17T07:11:36 | 2019-04-17T07:11:36 | 175,299,934 | 1 | 4 | null | null | null | null | UTF-8 | Python | false | false | 2,215 | py | """
Raphael Holzer
16. 02. 2019
Displaying a main menu and an options menu.
"""
import cocos
import sys
from cocos.layer import *
from cocos.menu import *
from cocos.scene import *
import pyglet
pyglet.resource.path = ['../animals']
pyglet.resource.reindex()
class OptionsMenu(Menu):
def __init__(self):
super(OptionsMenu, self).__init__('Menus')
items = []
self.values = ['Mute','10','20','30','40','50','60','70','80','90','100']
self.colors = [(255, 255, 255), (255, 0, 50), (0, 255, 50), (0, 50, 255)]
items.append(MenuItem('Menu', self.cb0))
items.append(MultipleMenuItem('Multiple:', self.cb, self.values, 5))
items.append(ToggleMenuItem('Toggle:', self.cb))
items.append(EntryMenuItem('Entry:', self.cb, 'abc', 10))
items.append(ColorMenuItem('Color:', self.cb, self.colors))
items.append(ImageMenuItem('bird-icon.png', self.cb0))
self.create_menu( items, zoom_in(), zoom_out() )
def cb0(self):
"""Callback function without callback value."""
print('cb')
def cb(self, val):
"""Callback function with callback value."""
print('cb value =', val)
class MainMenu(Menu):
def __init__(self):
super(MainMenu, self).__init__('Main')
items = []
items.append(MenuItem('New', self.on_new_game))
items.append(MenuItem('Options', self.on_options))
items.append(MenuItem('Scores', self.on_scores))
items.append(MenuItem('Quit', self.on_quit))
self.create_menu(items, shake(), shake_back())
def on_new_game(self):
print('new game')
def on_options(self):
self.parent.switch_to(1)
def on_scores(self):
print('scores')
def on_quit(self):
print('quit')
cocos.director.pyglet.app.exit()
sys.exit()
def main():
director.init(resizable=True)
bg = cocos.layer.ColorLayer(0, 127, 127, 255)
scene = Scene()
scene.add(MultiplexLayer(MainMenu(), OptionsMenu()), z=1)
scene.add(bg, z=0 )
director.run(scene)
if __name__ == "__main__":
main() | [
"raphael.holzer@citycable.ch"
] | raphael.holzer@citycable.ch |
3134ee871bcf3dc40961cafeb278a697a4097357 | 9da79ead6d0dda0b4959c3e7f59c603c085b1f8d | /tests/conftest.py | 8e5d0ee28807793d1d9ea9ad37dc0a42e256095f | [
"MIT"
] | permissive | fariddarabi/fastapi-chameleon | 34e99385e0b66b72e30af5c2ba16d4d65e0fb3f4 | 8012037480b402d5881760c4c9f01b6c7969c086 | refs/heads/main | 2023-08-26T08:56:00.294057 | 2021-11-14T06:27:25 | 2021-11-14T06:27:25 | 427,679,374 | 0 | 0 | MIT | 2021-11-14T06:43:05 | 2021-11-13T13:56:17 | null | UTF-8 | Python | false | false | 380 | py | import pytest
from pathlib import Path
import fastapi_chameleon as fc
@pytest.fixture
def test_templates_path(pytestconfig):
return Path(pytestconfig.rootdir, "tests", "templates")
@pytest.fixture
def setup_global_template(test_templates_path):
fc.global_init(str(test_templates_path))
yield
# Clear paths so as to no affect future tests
fc.engine.clear()
| [
"antonio.feregrino@gmail.com"
] | antonio.feregrino@gmail.com |
2080b5395f84f98f67296c859811a86d994f1c2c | dac7095e7b5ad4dae993871c1ae45cbb7a5ce5f7 | /Character/23.Mayuri/Mayuri_SSS_Re.py | 2c73df441ab8236d477c19d7b2c0c843f3fb5a24 | [] | no_license | Lastation/RenewalAniChaos | d12a8423f4b83cb019495c59ed059451e67e0483 | c3edb29af58925de55c11110ccaf927d2b5d1b39 | refs/heads/master | 2023-08-24T11:28:35.614844 | 2023-08-22T21:23:14 | 2023-08-22T21:23:14 | 246,617,812 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,999 | py | import Function as f;
const s = StringBuffer();
function main(cp)
{
if (f.delay[cp] == 0)
{
if (f.count[cp] == 0)
{
if (f.loop[cp] == 0)
{
f.SquareShape(cp, 1, "Target", 50, 50);
f.SquareShape(cp, 1, " Creep. Dunkelheit", 50, 50);
MoveLocation(f.location[cp], f.heroID[cp], cp, "Anywhere");
MoveUnit(All, " Creep. Dunkelheit", cp, "[Skill]Unit_Wait_ALL", f.location[cp]);
Order(" Creep. Dunkelheit", cp, "Anywhere", Attack, f.location[cp]);
KillUnitAt(All, "Target", "Anywhere", cp);
f.SkillWait(cp, 80);
f.loop[cp] += 1;
}
else if (f.loop[cp] == 1)
{
RemoveUnitAt(All, " Creep. Dunkelheit", "Anywhere", cp);
f.SquareShape(cp, 1, "Target", 50, 50);
f.SquareShape(cp, 1, " Creep. Dunkelheit", 50, 50);
MoveLocation(f.location[cp], f.heroID[cp], cp, "Anywhere");
MoveUnit(All, " Creep. Dunkelheit", cp, "[Skill]Unit_Wait_ALL", f.location[cp]);
Order(" Creep. Dunkelheit", cp, "Anywhere", Attack, f.location[cp]);
KillUnitAt(All, "Target", "Anywhere", cp);
f.SkillWait(cp, 80);
f.loop[cp] += 1;
}
else if (f.loop[cp] == 2)
{
RemoveUnitAt(All, " Creep. Dunkelheit", "Anywhere", cp);
f.SquareShape(cp, 1, "Target", 50, 0);
f.SquareShape(cp, 1, "Target", 100, 0);
KillUnitAt(All, "Target", "Anywhere", cp);
f.SkillWait(cp, 80);
f.loop[cp] += 1;
}
else if (f.loop[cp] == 3)
{
f.EdgeShape(cp, 1, "Protoss Dark Templar", 45, 5, 100);
KillUnitAt(All, "Protoss Dark Templar", "Anywhere", cp);
f.SkillWait(cp, 80);
f.loop[cp] += 1;
}
else if (f.loop[cp] == 4)
{
f.EdgeShape(cp, 1, "40 + 1n Mutalisk", 0, 5, 100);
KillUnitAt(All, "40 + 1n Mutalisk", "Anywhere", cp);
f.EdgeShape(cp, 1, "40 + 1n Ghost", 0, 3, 100);
MoveLocation(f.location[cp], f.heroID[cp], cp, "Anywhere");
MoveUnit(All, "40 + 1n Ghost", cp, "[Skill]Unit_Wait_ALL", f.location[cp]);
Order("40 + 1n Ghost", cp, "Anywhere", Attack, f.location[cp]);
f.SkillWait(cp, 80);
f.loop[cp] += 1;
}
else if (f.loop[cp] == 5)
{
f.EdgeShape(cp, 1, "60 + 1n Danimoth", 0, 7, 150);
KillUnitAt(All, "60 + 1n Danimoth", "Anywhere", cp);
f.EdgeShape(cp, 1, "60 + 1n Archon", 0, 7, 150);
KillUnitAt(All, "60 + 1n Archon", "Anywhere", cp);
f.EdgeShape(cp, 1, "Kakaru (Twilight)", 0, 5, 100);
KillUnitAt(All, "Kakaru (Twilight)", "Anywhere", cp);
f.SkillWait(cp, 80);
f.count[cp] += 1;
f.loop[cp] = 0;
}
}
else if (f.count[cp] == 1)
{
if (f.loop[cp] < 4)
{
f.DotShape(cp, 1, "Protoss Dark Templar", 100 - 25 * f.loop[cp], 25 * f.loop[cp]);
f.DotShape(cp, 1, "Protoss Dark Templar", -100 + 25 * f.loop[cp], -25 * f.loop[cp]);
f.DotShape(cp, 1, "40 + 1n Zealot", -25 * f.loop[cp], 100 - 25 * f.loop[cp]);
f.DotShape(cp, 1, "40 + 1n Zealot", 25 * f.loop[cp], -100 + 25 * f.loop[cp]);
KillUnitAt(All, "40 + 1n Zealot", "Anywhere", cp);
KillUnitAt(All, "Protoss Dark Templar", "Anywhere", cp);
f.SkillWait(cp, 80);
f.loop[cp] += 1;
}
else if (f.loop[cp] == 4)
{
KillUnitAt(All, "40 + 1n Ghost", "Anywhere", cp);
f.SkillWait(cp, 320);
f.count[cp] += 1;
f.loop[cp] = 0;
}
}
else if (f.count[cp] == 2)
{
if (f.loop[cp] == 0)
{
f.Voice_Routine(cp, 2);
f.SquareShape(cp, 1, "50 + 1n Battlecruiser", 75, 150);
f.SquareShape(cp, 1, "60 + 1n Archon", 50, 100);
f.SquareShape(cp, 1, "50 + 1n Battlecruiser", 150, 75);
f.SquareShape(cp, 1, "60 + 1n Archon", 100, 50);
KillUnitAt(All, "50 + 1n Battlecruiser", "Anywhere", cp);
KillUnitAt(All, "60 + 1n Archon", "Anywhere", cp);
f.SkillWait(cp, 80);
f.loop[cp] += 1;
}
else if (f.loop[cp] == 1)
{
f.SquareShape(cp, 1, "Kakaru (Twilight)", 75, 150);
f.SquareShape(cp, 1, "40 + 1n Mojo", 50, 100);
f.SquareShape(cp, 1, "Kakaru (Twilight)", 150, 75);
f.SquareShape(cp, 1, "40 + 1n Mojo", 100, 50);
KillUnitAt(All, "Kakaru (Twilight)", "Anywhere", cp);
KillUnitAt(All, "40 + 1n Mojo", "Anywhere", cp);
f.SquareShape(cp, 1, "40 + 1n Ghost", 50, 100);
f.SquareShape(cp, 1, "40 + 1n Ghost", 100, 50);
MoveLocation(f.location[cp], f.heroID[cp], cp, "Anywhere");
MoveUnit(All, "40 + 1n Ghost", cp, "[Skill]Unit_Wait_ALL", f.location[cp]);
Order("40 + 1n Ghost", cp, "Anywhere", Attack, f.location[cp]);
f.SkillWait(cp, 160);
f.loop[cp] += 1;
}
else if (f.loop[cp] == 2)
{
f.SquareShape(cp, 1, "40 + 1n Mutalisk", 50, 0);
f.SquareShape(cp, 1, "40 + 1n Mutalisk", 100, 0);
KillUnitAt(All, "40 + 1n Mutalisk", "Anywhere", cp);
f.SkillWait(cp, 80);
f.loop[cp] += 1;
}
else if (f.loop[cp] == 3)
{
f.SquareShape(cp, 1, "40 + 1n Guardian", 50, 100);
f.SquareShape(cp, 1, "40 + 1n Guardian", 100, 50);
KillUnitAt(All, "40 + 1n Guardian", "Anywhere", cp);
f.SkillWait(cp, 80);
f.loop[cp] += 1;
}
else if (f.loop[cp] == 4)
{
KillUnitAt(All, "40 + 1n Ghost", "Anywhere", cp);
f.SquareShape(cp, 1, "50 + 1n Battlecruiser", 75, 150);
f.SquareShape(cp, 1, "40 + 1n Ghost", 75, 150);
f.SquareShape(cp, 1, "50 + 1n Battlecruiser", 150, 75);
f.SquareShape(cp, 1, "40 + 1n Ghost", 150, 75);
KillUnitAt(All, "50 + 1n Battlecruiser", "Anywhere", cp);
MoveLocation(f.location[cp], f.heroID[cp], cp, "Anywhere");
MoveUnit(All, "40 + 1n Ghost", cp, "[Skill]Unit_Wait_ALL", f.location[cp]);
Order("40 + 1n Ghost", cp, "Anywhere", Attack, f.location[cp]);
f.SkillWait(cp, 320);
f.count[cp] += 1;
f.loop[cp] = 0;
}
}
else if (f.count[cp] == 3)
{
KillUnitAt(All, "40 + 1n Ghost", "Anywhere", cp);
f.SkillEnd(cp);
}
}
} | [
"ghtjd000129@naver.com"
] | ghtjd000129@naver.com |
0885548cdf5bbe0cf59c4dd0eec8952cdf0b8ec4 | 0ec8af8988245d864c63d923e5524403090cd7e0 | /mitm/gifshow/addons.py | 16b3430b8b46d322369d126b417009aed7b866dc | [] | no_license | radtek/Spider | d26b685cb5e41c67c6a7ce0d632072f3cac5f061 | 5a419e8ec77915804d3e659631f09b19aa90a088 | refs/heads/master | 2022-10-30T13:13:29.736256 | 2020-06-11T03:34:58 | 2020-06-11T03:34:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 561 | py | import json
import mitmproxy.http
from mitmproxy import ctx
class Download:
def response(self, flow: mitmproxy.http.HTTPFlow):
if flow.request.host != 'api.gifshow.com' or flow.request.method != 'POST':
return
url = flow.request.url
data = flow.response.content.decode()
if 'hot' in url and 'feeds' in json.loads(data):
ctx.log.info('get a url:{}'.format(url))
with open('GifShow.json', 'a', encoding='utf8')as f:
f.write(json.dumps(data) + '\n')
addons = [Download()]
| [
"1733776802@qq.com"
] | 1733776802@qq.com |
88419024324527e193a5c6b09a90736869273d4a | db8f0b2db2fe8c9b5d2cc2e33f669028da7830a8 | /multidimensional_lists/snake_moves.py | dea7167f02c1d7548342216f18fc2746f3691145 | [] | no_license | yordan-marinov/advanced_python | 9694184eed31a9769fa1b552b087fb7b7736af84 | 63ad860f356978c13f208f18502a39a8f23cd18b | refs/heads/master | 2023-03-29T18:04:08.252558 | 2021-03-26T13:39:39 | 2021-03-26T13:39:39 | 309,930,564 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 447 | py | def snake_moves() -> str:
matrix = []
rows, cols = [int(i) for i in input().split()]
string = list(input())
for row_index in range(rows):
row = ""
for col_index in range(cols):
letter = string.pop(0)
row += letter
string.append(letter)
if row_index % 2 != 0:
row = row[::-1]
matrix.append(row)
return "\n".join(matrix)
print(snake_moves())
| [
"jordanmarinov8@gmail.com"
] | jordanmarinov8@gmail.com |
c01708c178cb7f4fb3e079d15cf5fe767d065105 | a4653fb6c5a7a9e6db0457480e9e860c5304b2b8 | /list/add_data.py | cd7bcc9d527b542225e807d86bed20426b453b10 | [] | no_license | pratik-iiitkalyani/Python | c315ca1f3a2446ccb871b74026edae97daec3773 | 082ae6d833f151054567d737de543898ebfe1d87 | refs/heads/master | 2020-08-07T05:59:21.930089 | 2020-01-04T17:05:27 | 2020-01-04T17:05:27 | 213,324,642 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 378 | py | # insert method(pos, element) - add data at any position in the list
fruits = ["mango","grapes"]
fruits.insert(1,"orange")
print(fruits)
# join two list
fruits1 =[1,3]
c = fruits1+fruits # fruits1 element come first
print(c)
#extend method
fruits.extend(fruits1) # data of fruits1 added in fruits
print(fruits)
fruits.append(fruits1) # list inside list
print(fruits)
| [
"ts-pratik.kumar@rakuten.com"
] | ts-pratik.kumar@rakuten.com |
76b439eb64e7052b75da1d1be31c934fb3855f54 | 71d608a5e6cc30bef2004a4008d62143de07f50c | /plots/Techinical_indicators_for_Goldman_Sachs_last_400_days.py | dd9cb186948e5a5eedbad60bf8c3673ba5bb96d3 | [] | no_license | webclinic017/quant-3 | f4fc462b3964717e2435e09dd0be35548066a967 | 98b31d92b89da10a409947b4bea653ceb4319af6 | refs/heads/master | 2022-01-27T21:06:39.345394 | 2019-07-21T10:35:12 | 2019-07-21T10:35:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,705 | py | import matplotlib.pyplot as plt
from Data import Data, get_technical_indicators
def plot_technical_indicators(dataset,
last_days):
ticker = dataset.columns.values[1]
plt.figure(figsize=(16, 10), dpi=100)
shape_0 = dataset.shape[0]
xmacd_ = shape_0 - last_days
dataset = dataset.iloc[-last_days:, :]
x_ = range(3, dataset.shape[0])
x_ = list(dataset.index)
# Plot first subplot
plt.subplot(2, 1, 1)
plt.plot(dataset['ma7'], label='MA 7', color='g', linestyle='--')
plt.plot(dataset[ticker], label='Closing Price', color='b')
plt.plot(dataset['ma21'], label='MA 21', color='r', linestyle='--')
plt.plot(dataset['upper_band'], label='Upper Band', color='c')
plt.plot(dataset['lower_band'], label='Lower Band', color='c')
plt.fill_between(x_, dataset['lower_band'], dataset['upper_band'],
alpha=0.35)
plt.title('Technical indicators for Goldman Sachs - last {} days.'.format(
last_days))
plt.ylabel('USD')
plt.legend()
# Plot second subplot
plt.subplot(2, 1, 2)
plt.title('MACD')
plt.plot(dataset['MACD'], label='MACD', linestyle='-.')
plt.hlines(15, xmacd_, shape_0, colors='g', linestyles='--')
plt.hlines(-15, xmacd_, shape_0, colors='g', linestyles='--')
plt.plot(dataset['log_momentum'], label='Momentum', color='b',
linestyle='-')
plt.savefig(
'../assets/Techinical_indicators_for_Goldman_Sachs_last_400_days.png')
plt.legend()
plt.show()
if __name__ == "__main__":
data = Data('GS')
df = data.get_close_data()
df_ti = get_technical_indicators(df)
plot_technical_indicators(df_ti, 400)
| [
"syyun@snu.ac.kr"
] | syyun@snu.ac.kr |
6ee1904358d0968c67a404a23c8686c263e0b35c | 4007632edd395d243bca022418848a2ff54409c8 | /学习/atest6.py | 54471b4561bb9f98d4319de5c82e87a0702bb3bf | [] | no_license | 549982170/python_learning | d80a9403cbe2eb8304aba50ff373b2b67df095e2 | 2c3f73718e0a6d9d4923a2e0f22ff2d4230357e9 | refs/heads/master | 2021-06-22T04:32:06.286691 | 2020-12-10T03:29:56 | 2020-12-10T03:29:56 | 101,596,379 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 430 | py | # coding:utf-8
# !/user/bin/python
import requests
import json
headers = {'Authorization': "Token 03f7409574a55e98cd33b7cc44bcbe968fd0988f"}
host = "http://127.0.0.1:8000/"
url = "v5/discovery/search/service/"
# url = "v1/generator/test/mytest/"
url = host + url
data = {"keyword": "ss"}
re = requests.post(url)
re = requests.post(url, data=data, headers=headers)
print re.text
#
# a = "ssddda"
# print a[-1:]
| [
"549982170@qq.com"
] | 549982170@qq.com |
d5526e5e11970b9cf7389f8167209a965ef78e49 | 7bc54bae28eec4b735c05ac7bc40b1a8711bb381 | /src/misc/show_checkpoint_vars.py | d869d35e1eb9f214c813932df22cedf891373257 | [] | no_license | clover3/Chair | 755efd4abbd5f3f2fb59e9b1bc6e7bc070b8d05e | a2102ebf826a58efbc479181f1ebb5de21d1e49f | refs/heads/master | 2023-07-20T17:29:42.414170 | 2023-07-18T21:12:46 | 2023-07-18T21:12:46 | 157,024,916 | 0 | 0 | null | 2023-02-16T05:20:37 | 2018-11-10T21:55:29 | Python | UTF-8 | Python | false | false | 642 | py | from collections import OrderedDict
import tensorflow as tf
def load_checkpoint_vars(checkpoint_path):
ckpt_reader = tf.train.load_checkpoint(checkpoint_path)
d = OrderedDict()
for x in tf.train.list_variables(checkpoint_path):
(name, var) = (x[0], x[1])
d[name] = ckpt_reader.get_tensor(name)
return d
def load_checkpoint_vars_v2(checkpoint_path):
ckpt_reader = tf.compat.v1.train.load_checkpoint(checkpoint_path)
d = OrderedDict()
for x in tf.compat.v1.train.list_variables(checkpoint_path):
(name, var) = (x[0], x[1])
d[name] = ckpt_reader.get_tensor(name)
return d
| [
"lesterny@gmail.com"
] | lesterny@gmail.com |
e9a860e399fac3764ecab8ebb8b31901390468c7 | 491c7a6b2eeaccf9e8421c0b1ec5a2d9d77407e2 | /ceres/consensus/all_coins_default_constants/chaingreen_default_constants.py | 7fde41f6c0352db6f6c46ba63b01b94d2a6a068e | [
"Apache-2.0"
] | permissive | zhenglcc/ceres-combineharvester | 25609627a4a16b3e3067c2e4296de82a65f3b2bd | 47b685a63fb5f38ac9e2eceb9724a562feb06ee9 | refs/heads/main | 2023-08-25T04:49:31.531840 | 2021-10-30T09:06:08 | 2021-10-30T09:06:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,781 | py | from ceres.util.ints import uint64
from ceres.consensus.constants import ConsensusConstants
testnet_kwargs = {
"SLOT_BLOCKS_TARGET": 32,
"MIN_BLOCKS_PER_CHALLENGE_BLOCK": 16, # Must be less than half of SLOT_BLOCKS_TARGET
"MAX_SUB_SLOT_BLOCKS": 128, # Must be less than half of SUB_EPOCH_BLOCKS
"NUM_SPS_SUB_SLOT": 64, # Must be a power of 2
"SUB_SLOT_ITERS_STARTING": 2 ** 16,
# DIFFICULTY_STARTING is the starting difficulty for the first epoch, which is then further
# multiplied by another factor of DIFFICULTY_CONSTANT_FACTOR, to be used in the VDF iter calculation formula.
"DIFFICULTY_CONSTANT_FACTOR": 2 ** 20,
"DIFFICULTY_STARTING": 1,
"DIFFICULTY_CHANGE_MAX_FACTOR": 81, # The next difficulty is truncated to range [prev / FACTOR, prev * FACTOR]
# "DIFFICULTY_CHANGE_MAX_FACTOR_v1_2_0": 81, # This difficulty change to be applied with v1.2.0
# "v1_2_0_ACTIVATION_BLOCK": 170496, # activation of v1.2.0 rules height
# These 3 constants must be changed at the same time
"SUB_EPOCH_BLOCKS": 384, # The number of blocks per sub-epoch, mainnet 384
"EPOCH_BLOCKS": 4608, # The number of blocks per epoch, mainnet 4608. Must be multiple of SUB_EPOCH_BLOCKS
"SIGNIFICANT_BITS": 8, # The number of bits to look at in difficulty and min iters. The rest are zeroed
"DISCRIMINANT_SIZE_BITS": 1024, # Max is 1024 (based on ClassGroupElement int size)
"NUMBER_ZERO_BITS_PLOT_FILTER": 9, # H(plot signature of the challenge) must start with these many zeroes
"MIN_PLOT_SIZE": 32, # 32 for mainnet
"MAX_PLOT_SIZE": 50,
"SUB_SLOT_TIME_TARGET": 600, # The target number of seconds per slot, mainnet 600
"NUM_SP_INTERVALS_EXTRA": 3, # The number of sp intervals to add to the signage point
"MAX_FUTURE_TIME": 5 * 60, # The next block can have a timestamp of at most these many seconds in the future
"NUMBER_OF_TIMESTAMPS": 11, # Than the average of the last NUMBER_OF_TIMESTAMPS blocks
# Used as the initial cc rc challenges, as well as first block back pointers, and first SES back pointer
# We override this value based on the chain being run (testnet0, testnet1, mainnet, etc)
# Default used for tests is std_hash(b'')
"GENESIS_CHALLENGE": bytes.fromhex("2cef93a2ebf7c0546609311684e484f08555d3c0e4336a30a044e1ba7f26f691"),
# Forks of chia should change this value to provide replay attack protection. This is set to mainnet genesis chall
"AGG_SIG_ME_ADDITIONAL_DATA": bytes.fromhex("0235e47be80dbba72e8e105f87776fe16690838dde7f71e8a77086c0374bcaf3"),
"GENESIS_PRE_FARM_POOL_PUZZLE_HASH": bytes.fromhex(
"42e52e6fac97b0f409c372865989bc9443755887ab24cab3b00bcd1fdacad7f5"
),
"GENESIS_PRE_FARM_FARMER_PUZZLE_HASH": bytes.fromhex(
"427930199ca2f1d7669de76057fc215abed4cf28e33ffdf662873f6d6622e3e6"
),
"MAX_VDF_WITNESS_SIZE": 64,
# Size of mempool = 50x the size of block
"MEMPOOL_BLOCK_BUFFER": 50,
# Max coin amount, fits into 64 bits
"MAX_COIN_AMOUNT": uint64((1 << 64) - 1),
# Max block cost in clvm cost units
"MAX_BLOCK_COST_CLVM": 11000000000,
# The cost per byte of generator program
"COST_PER_BYTE": 12000,
"WEIGHT_PROOF_THRESHOLD": 2,
"BLOCKS_CACHE_SIZE": 4608 + (128 * 4),
"WEIGHT_PROOF_RECENT_BLOCKS": 1000,
"MAX_BLOCK_COUNT_PER_REQUESTS": 32, # Allow up to 32 blocks per request
# "INITIAL_FREEZE_END_TIMESTAMP": 1621630800, # 2021-05-21T21:00:00Z
"NETWORK_TYPE": 0,
"MAX_GENERATOR_SIZE": 1000000,
"MAX_GENERATOR_REF_LIST_SIZE": 512, # Number of references allowed in the block generator ref list
"POOL_SUB_SLOT_ITERS": 37600000000, # iters limit * NUM_SPS
}
DEFAULT_CONSTANTS = ConsensusConstants(**testnet_kwargs) # type: ignore
| [
"hulatang_eric@163.com"
] | hulatang_eric@163.com |
ec35ca8c45e83ee2346b6490fe0798b8a2c19343 | 177464b6a6199b72d5f9342423fd507630e7ef8c | /setup.py | 857b0b44d0307a6e4622d21dbb085cd8f1d2f053 | [
"BSD-2-Clause"
] | permissive | yyr93520/sne | 19f58f40840d18665fbdaff552396c0ce9ccd180 | caaa2cce9b623f5ffc411ece4c9176872a672793 | refs/heads/master | 2021-08-23T14:48:42.730451 | 2017-12-05T08:52:05 | 2017-12-05T08:52:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,358 | py | from os import path
from setuptools import setup, find_packages
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md')) as f:
long_description = f.read()
setup(
name='learning sparse network embedding for identity user retrieval',
version='1.0.0.dev',
url='https://github.com/Allen517/sne',
description='',
long_description=long_description,
keywords='',
author='',
maintainer='King Wang',
maintainer_email='wangyongqing.casia@gmail.com',
license='BSD',
packages=find_packages(exclude=('tests', 'tests.*')),
package_data={
},
entry_points={
},
classifiers=[
'Framework :: SNE',
'Development Status :: 2 - Pre-Alpha',
'Environment :: Web Environment',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Natural Language :: Chinese (Simplified)',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Softwamax_shinglere Development :: Libraries :: Application Frameworks',
'Topic :: Software Development :: Libraries :: Python Modules',
],
install_requires=[
'tensorflow==1.3.0',
'numpy',
],
)
| [
"wangyongqing.casia@gmail.com"
] | wangyongqing.casia@gmail.com |
1418f12136d730e578a93b5499f6ad816057a019 | 5ce1c0ab1b6147428fc30bcd1698e4d0e53b688e | /829.py | 4fe73b828e6983e64730ae6da4385af8d2cf971f | [] | no_license | junyang10734/leetcode-python | 035b12df3f7d9fc33553140d1eb0692750b44f0a | eff322f04d22ffbc4f9b10e77f97c28aac5c7004 | refs/heads/master | 2023-07-22T11:16:38.740863 | 2023-07-14T00:22:00 | 2023-07-14T00:22:00 | 189,197,380 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 392 | py | # 829. Consecutive Numbers Sum
# Math
# https://leetcode.com/problems/consecutive-numbers-sum/solution/
# runtime: O(N**0.5)
class Solution:
def consecutiveNumbersSum(self, N: int) -> int:
count = 0
upper = ceil((2 * N + 0.25) ** 0.5 - 0.5) + 1
for k in range(1, upper):
if (N - (k+1)*k // 2) % k == 0:
count += 1
return count | [
"48000364+junyang10734@users.noreply.github.com"
] | 48000364+junyang10734@users.noreply.github.com |
c14a556a8fc7a81e031a648406e986ae7db52409 | 9e5bf5e7d0bdfa4ff2aca65ac306ed801d146608 | /python-06-stdlib-review/chapter-03-Algorithms/3.2-itertools/py_01_itertools.py | ed3063a35bb616b55116f551571022cacd42e8f5 | [] | no_license | AuroraBoreas/python_advanced_tricks | 90b07967789960beec381de676459c1e84b95860 | ba0940e25eda52345a27cf9ddffed9d18fa2a031 | refs/heads/master | 2022-11-27T19:09:45.666017 | 2020-08-11T17:33:11 | 2020-08-11T17:33:11 | 275,083,831 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 357 | py | """
P163-P176, itertools: iterator functions
! What?
! Why?
! How?
itertools
|-- merging and splitting iterators "分合"
|-- converting inputs "转"
|-- producing new valeus "生"
|-- filtering "滤"
|-- grouping data "群"
|-- combining inputs "主"
""" | [
"noreply@github.com"
] | AuroraBoreas.noreply@github.com |
402f8b11f4f79126a7238680b4d8b3c9291b6525 | 2d930aadf19b2ad6ea49725099d2f37475cd57f8 | /test/functional/signrawtransactions.py | 01b71fc8133e8036171530c690526d90558605d1 | [
"MIT"
] | permissive | stratton-oakcoin/oakcoin | ea83774c9f6ea64adb8832770e6219ffb31edef6 | fe53193a50bd3674211448f1dcc39c6f9f042bb2 | refs/heads/master | 2021-01-20T13:22:05.877005 | 2017-05-07T10:09:57 | 2017-05-07T10:09:57 | 90,477,972 | 1 | 2 | null | 2017-05-07T10:09:57 | 2017-05-06T16:58:05 | C++ | UTF-8 | Python | false | false | 5,919 | py | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The Oakcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test transaction signing using the signrawtransaction RPC."""
from test_framework.test_framework import OakcoinTestFramework
from test_framework.util import *
class SignRawTransactionsTest(OakcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 1
def successful_signing_test(self):
"""Create and sign a valid raw transaction with one input.
Expected results:
1) The transaction has a complete set of signatures
2) No script verification error occurred"""
privKeys = ['cUeKHd5orzT3mz8P9pxyREHfsWtVfgsfDjiZZBcjUBAaGk1BTj7N', 'cVKpPfVKSJxKqVpE9awvXNWuLHCa5j5tiE7K6zbUSptFpTEtiFrA']
inputs = [
# Valid pay-to-pubkey scripts
{'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 0,
'scriptPubKey': '76a91460baa0f494b38ce3c940dea67f3804dc52d1fb9488ac'},
{'txid': '83a4f6a6b73660e13ee6cb3c6063fa3759c50c9b7521d0536022961898f4fb02', 'vout': 0,
'scriptPubKey': '76a914669b857c03a5ed269d5d85a1ffac9ed5d663072788ac'},
]
outputs = {'mpLQjfK79b7CCV4VMJWEWAj5Mpx8Up5zxB': 0.1}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
rawTxSigned = self.nodes[0].signrawtransaction(rawTx, inputs, privKeys)
# 1) The transaction has a complete set of signatures
assert 'complete' in rawTxSigned
assert_equal(rawTxSigned['complete'], True)
# 2) No script verification error occurred
assert 'errors' not in rawTxSigned
# Check that signrawtransaction doesn't blow up on garbage merge attempts
dummyTxInconsistent = self.nodes[0].createrawtransaction([inputs[0]], outputs)
rawTxUnsigned = self.nodes[0].signrawtransaction(rawTx + dummyTxInconsistent, inputs)
assert 'complete' in rawTxUnsigned
assert_equal(rawTxUnsigned['complete'], False)
# Check that signrawtransaction properly merges unsigned and signed txn, even with garbage in the middle
rawTxSigned2 = self.nodes[0].signrawtransaction(rawTxUnsigned["hex"] + dummyTxInconsistent + rawTxSigned["hex"], inputs)
assert 'complete' in rawTxSigned2
assert_equal(rawTxSigned2['complete'], True)
assert 'errors' not in rawTxSigned2
def script_verification_error_test(self):
"""Create and sign a raw transaction with valid (vin 0), invalid (vin 1) and one missing (vin 2) input script.
Expected results:
3) The transaction has no complete set of signatures
4) Two script verification errors occurred
5) Script verification errors have certain properties ("txid", "vout", "scriptSig", "sequence", "error")
6) The verification errors refer to the invalid (vin 1) and missing input (vin 2)"""
privKeys = ['cUeKHd5orzT3mz8P9pxyREHfsWtVfgsfDjiZZBcjUBAaGk1BTj7N']
inputs = [
# Valid pay-to-pubkey script
{'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 0},
# Invalid script
{'txid': '5b8673686910442c644b1f4993d8f7753c7c8fcb5c87ee40d56eaeef25204547', 'vout': 7},
# Missing scriptPubKey
{'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 1},
]
scripts = [
# Valid pay-to-pubkey script
{'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 0,
'scriptPubKey': '76a91460baa0f494b38ce3c940dea67f3804dc52d1fb9488ac'},
# Invalid script
{'txid': '5b8673686910442c644b1f4993d8f7753c7c8fcb5c87ee40d56eaeef25204547', 'vout': 7,
'scriptPubKey': 'badbadbadbad'}
]
outputs = {'mpLQjfK79b7CCV4VMJWEWAj5Mpx8Up5zxB': 0.1}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
# Make sure decoderawtransaction is at least marginally sane
decodedRawTx = self.nodes[0].decoderawtransaction(rawTx)
for i, inp in enumerate(inputs):
assert_equal(decodedRawTx["vin"][i]["txid"], inp["txid"])
assert_equal(decodedRawTx["vin"][i]["vout"], inp["vout"])
# Make sure decoderawtransaction throws if there is extra data
assert_raises(JSONRPCException, self.nodes[0].decoderawtransaction, rawTx + "00")
rawTxSigned = self.nodes[0].signrawtransaction(rawTx, scripts, privKeys)
# 3) The transaction has no complete set of signatures
assert 'complete' in rawTxSigned
assert_equal(rawTxSigned['complete'], False)
# 4) Two script verification errors occurred
assert 'errors' in rawTxSigned
assert_equal(len(rawTxSigned['errors']), 2)
# 5) Script verification errors have certain properties
assert 'txid' in rawTxSigned['errors'][0]
assert 'vout' in rawTxSigned['errors'][0]
assert 'scriptSig' in rawTxSigned['errors'][0]
assert 'sequence' in rawTxSigned['errors'][0]
assert 'error' in rawTxSigned['errors'][0]
# 6) The verification errors refer to the invalid (vin 1) and missing input (vin 2)
assert_equal(rawTxSigned['errors'][0]['txid'], inputs[1]['txid'])
assert_equal(rawTxSigned['errors'][0]['vout'], inputs[1]['vout'])
assert_equal(rawTxSigned['errors'][1]['txid'], inputs[2]['txid'])
assert_equal(rawTxSigned['errors'][1]['vout'], inputs[2]['vout'])
def run_test(self):
self.successful_signing_test()
self.script_verification_error_test()
if __name__ == '__main__':
SignRawTransactionsTest().main()
| [
"s.matthew.english@gmail.com"
] | s.matthew.english@gmail.com |
678efd8416616f304423b9610caa21b3b0aa6bb3 | b70f99aee135ea27a03951e75524ede3be18d316 | /test/tob_pyner/fill_fields/bin/fill/customer_bin.py | f3be56157b686a98e42d5ab2d0c2ff980456bc57 | [] | no_license | zlbl/AutoTestPytestN | 968e11b205d824eca0fcb73a166f21208ce405d5 | a86f71a4ad50796f5031cdb40c4d5a808f421383 | refs/heads/master | 2020-11-28T19:27:21.056844 | 2019-12-24T08:25:45 | 2019-12-24T08:25:45 | 229,902,483 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,096 | py | from datetime import datetime
from test.tob_pyner.fill_fields.bin.fill.update_cust_info_sql import *
from test.tob_pyner.fill_fields.util.MySqlHelper import select_insert, update
def generate_customer():
"""生成客户id"""
# ALTER TABLE pyner_customer AUTO_INCREMENT = 1000000001;
select_insert(generateSQL)
"""生成平台客户ID"""
update(updatePlatformCredInfoUSCCSQL)
update(updatePlatformCredInfoBLCSQL)
select_insert(generatePlatformSQL)
print('generate_customer Finish', datetime.now())
def update_user_customer_id():
"""更新用户表客户Id"""
update(updateUserCustomerIdSQL)
print('update_user_customer_id Finish', datetime.now())
def update_person_customer_id():
update(updatePersonCustomerIdSQL)
print('update_person_customer_id Finish', datetime.now())
def update_enterprise_customer_id():
update(updateEnterpriseCustomerIdSQL)
print('update_enterprise_customer_id Finish', datetime.now())
def update_platform_customer_id():
update(updatePlatformCustomerIdSQL)
print('update_platform_customer_id Finish', datetime.now())
def update_account_customer_id():
# 更新用户账户表
update(updateAccountCustomerIdSQL1)
# 更新平台账户表
update(updateAccountCustomerIdSQL2)
print('update_account_customer_id Finish', datetime.now())
def update_bid_customer_id():
update(updateBidCustomerIdSQL)
print('update_bid_customer_id Finish', datetime.now())
def update_obligatory_customer_id():
update(updateObligatoryCustomerIdSQL)
print('update_obligatory_customer_id Finish', datetime.now())
def update_trade_pay_customer_id():
update(updateTradePayCustomerIdSQL)
print('update_trade_pay_customer_id Finish', datetime.now())
def update_trade_receive_customer_id():
update(updateTradeReceiveCustomerIdSQL)
print('update_trade_receive_customer_id Finish', datetime.now())
if __name__ == '__main__':
print('开始时间:', datetime.now())
# generate_customer()
update_user_customer_id()
update_person_customer_id()
update_enterprise_customer_id()
update_platform_customer_id()
update_account_customer_id()
update_bid_customer_id()
update_obligatory_customer_id()
update_trade_pay_customer_id()
update_trade_receive_customer_id()
print('结束时间:', datetime.now())
def generate_customer_id():
# 生成customerId
generate_customer()
# 更新customerId到User表
update_user_customer_id()
# 更新customerId到Person表
update_person_customer_id()
# 更新customerId到Enterprise表
update_enterprise_customer_id()
# 更新customerId到Platform表
update_platform_customer_id()
# 更新customerId到Account表
update_account_customer_id()
# 更新customerId到Bid表
update_bid_customer_id()
# 更新customerId到Obligatory表
update_obligatory_customer_id()
# 更新付款方customerId到Trade表
update_trade_pay_customer_id()
# 更新收款方customerId到Trade表
update_trade_receive_customer_id()
| [
"zhanwei@treefinance.com.cn"
] | zhanwei@treefinance.com.cn |
78f5536e736c9e86de6b0ca888abfd4a50ce2276 | 22a243d9535602e2f56b4e8d78f743b6240afc94 | /image_captioning/coco_caption/pycocoevalcap/bleu/bleu.py | 1c3a2c5a4780bbba09e046123fced9affa79d062 | [
"MIT",
"BSD-2-Clause-Views"
] | permissive | RitaRamo/memory-augmented-nn | 05bc969fa28225e95b38d6f1d5f0fa09a94b30db | 0553df245a6976ad1c15075a65df93591e2c2632 | refs/heads/main | 2023-03-08T13:59:52.530224 | 2021-02-22T19:03:25 | 2021-02-22T19:03:25 | 309,144,074 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,311 | py | #!/usr/bin/env python
#
# File Name : bleu.py
#
# Description : Wrapper for BLEU scorer.
#
# Creation Date : 06-01-2015
# Last Modified : Thu 19 Mar 2015 09:13:28 PM PDT
# Authors : Hao Fang <hfang@uw.edu> and Tsung-Yi Lin <tl483@cornell.edu>
from bleu_scorer import BleuScorer
class Bleu:
def __init__(self, n=4):
# default compute Blue score up to 4
self._n = n
self._hypo_for_image = {}
self.ref_for_image = {}
def compute_score(self, gts, res):
assert(gts.keys() == res.keys())
imgIds = gts.keys()
bleu_scorer = BleuScorer(n=self._n)
for id in imgIds:
hypo = res[id]
ref = gts[id]
print("image id", id)
print("hypo", hypo)
# Sanity check.
assert(type(hypo) is list)
assert(len(hypo) == 1)
assert(type(ref) is list)
assert(len(ref) >= 1)
bleu_scorer += (hypo[0], ref)
#score, scores = bleu_scorer.compute_score(option='shortest')
score, scores = bleu_scorer.compute_score(option='closest', verbose=1)
#score, scores = bleu_scorer.compute_score(option='average', verbose=1)
# return (bleu, bleu_info)
return score, scores
def method(self):
return "Bleu"
| [
"rita.mparada.ramos@gmail.com"
] | rita.mparada.ramos@gmail.com |
67b81c79caf9d6e7e0ca40ffe64b68a22a98c497 | 529694a395c88a1c0c097f80456a0eb01144131f | /tensorpac/spectral.py | 994abb93382c017eed859bd9f28e43ec0298d8f4 | [
"BSD-3-Clause"
] | permissive | cjayb/tensorpac | 0d83c4dcec2b40c75c119f243070faacb3b220bf | e4ee18ca3faa8ad9f3cdd4f5233be224f12523af | refs/heads/master | 2023-01-27T12:49:32.841580 | 2020-12-08T14:44:35 | 2020-12-08T14:44:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,860 | py | """Extract spectral informations from data."""
import numpy as np
from joblib import Parallel, delayed
from scipy.signal import hilbert, filtfilt
from scipy import fftpack
from tensorpac.config import CONFIG
def hilbertm(x):
"""Faster Hilbert fix.
x must have a shape of (..., n_pts)
"""
n_pts = x.shape[-1]
fc = fftpack.helper.next_fast_len(n_pts)
return hilbert(x, fc, axis=-1)[..., 0:n_pts]
def spectral(x, sf, f, stype, dcomplex, cycle, width, n_jobs):
"""Extract spectral informations from data.
Parameters
----------
x : array_like
Array of data
sf : float
Sampling frequency
f : array_like
Frequency vector of shape (N, 2)
stype : string
Spectral informations to extract (use either 'pha' or 'amp')
dcomplex : string
Complex decomposition type. Use either 'hilbert' or 'wavelet'
cycle : int
Number of cycles to use for fir1 filtering.
width : int
Width of the wavelet.
n_jobs : int
Number of jobs to use. If jobs is -1, all of them are going to be
used.
"""
n_freqs = f.shape[0]
# Filtering + complex decomposition :
if dcomplex is 'hilbert':
# get filtering coefficients
b = []
a = np.zeros((n_freqs,), dtype=float)
forder = np.zeros((n_freqs,), dtype=int)
for k in range(n_freqs):
forder[k] = fir_order(sf, x.shape[-1], f[k, 0], cycle=cycle)
_b, a[k] = fir1(forder[k], f[k, :] / (sf / 2.))
b += [_b]
# Filt each time series :
xf = Parallel(n_jobs=n_jobs, **CONFIG['JOBLIB_CFG'])(delayed(filtfilt)(
b[k], a[k], x, padlen=forder[k], axis=-1) for k in range(n_freqs))
# Use hilbert for the complex decomposition :
xd = np.asarray(xf)
if stype is not None:
xd = hilbertm(xd)
elif dcomplex is 'wavelet':
f = f.mean(1) # centered frequencies
xd = Parallel(n_jobs=n_jobs, **CONFIG['JOBLIB_CFG'])(delayed(morlet)(
x, sf, k, width) for k in f)
# Extract phase / amplitude :
if stype is 'pha':
return np.angle(xd).astype(np.float64)
elif stype is 'amp':
return np.abs(xd).astype(np.float64)
elif stype is None:
return xd.astype(np.float64)
###############################################################################
###############################################################################
# FILTERING
###############################################################################
###############################################################################
def fir_order(fs, sizevec, flow, cycle=3):
filtorder = cycle * (fs // flow)
if (sizevec < 3 * filtorder):
filtorder = (sizevec - 1) // 3
return int(filtorder)
def n_odd_fcn(f, o, w, l):
"""Odd case."""
# Variables :
b0 = 0
m = np.array(range(int(l + 1)))
k = m[1:len(m)]
b = np.zeros(k.shape)
# Run Loop :
for s in range(0, len(f), 2):
m = (o[s + 1] - o[s]) / (f[s + 1] - f[s])
b1 = o[s] - m * f[s]
b0 = b0 + (b1 * (f[s + 1] - f[s]) + m / 2 * (
f[s + 1] * f[s + 1] - f[s] * f[s])) * abs(
np.square(w[round((s + 1) / 2)]))
b = b + (m / (4 * np.pi * np.pi) * (
np.cos(2 * np.pi * k * f[s + 1]) - np.cos(2 * np.pi * k * f[s])
) / (k * k)) * abs(np.square(w[round((s + 1) / 2)]))
b = b + (f[s + 1] * (m * f[s + 1] + b1) * np.sinc(2 * k * f[
s + 1]) - f[s] * (m * f[s] + b1) * np.sinc(2 * k * f[s])) * abs(
np.square(w[round((s + 1) / 2)]))
b = np.insert(b, 0, b0)
a = (np.square(w[0])) * 4 * b
a[0] = a[0] / 2
aud = np.flipud(a[1:len(a)]) / 2
a2 = np.insert(aud, len(aud), a[0])
h = np.concatenate((a2, a[1:] / 2))
return h
def n_even_fcn(f, o, w, l):
"""Even case."""
# Variables :
k = np.array(range(0, int(l) + 1, 1)) + 0.5
b = np.zeros(k.shape)
# # Run Loop :
for s in range(0, len(f), 2):
m = (o[s + 1] - o[s]) / (f[s + 1] - f[s])
b1 = o[s] - m * f[s]
b = b + (m / (4 * np.pi * np.pi) * (np.cos(2 * np.pi * k * f[
s + 1]) - np.cos(2 * np.pi * k * f[s])) / (
k * k)) * abs(np.square(w[round((s + 1) / 2)]))
b = b + (f[s + 1] * (m * f[s + 1] + b1) * np.sinc(2 * k * f[
s + 1]) - f[s] * (m * f[s] + b1) * np.sinc(2 * k * f[s])) * abs(
np.square(w[round((s + 1) / 2)]))
a = (np.square(w[0])) * 4 * b
h = 0.5 * np.concatenate((np.flipud(a), a))
return h
def firls(n, f, o):
# Variables definition :
w = np.ones(round(len(f) / 2))
n += 1
f /= 2
lo = (n - 1) / 2
nodd = bool(n % 2)
if nodd: # Odd case
h = n_odd_fcn(f, o, w, lo)
else: # Even case
h = n_even_fcn(f, o, w, lo)
return h
def fir1(n, wn):
# Variables definition :
nbands = len(wn) + 1
ff = np.array((0, wn[0], wn[0], wn[1], wn[1], 1))
f0 = np.mean(ff[2:4])
lo = n + 1
mags = np.array(range(nbands)).reshape(1, -1) % 2
aa = np.ravel(np.tile(mags, (2, 1)), order='F')
# Get filter coefficients :
h = firls(lo - 1, ff, aa)
# Apply a window to coefficients :
wind = np.hamming(lo)
b = h * wind
c = np.exp(-1j * 2 * np.pi * (f0 / 2) * np.array(range(lo)))
b /= abs(c @ b)
return b, 1
###############################################################################
###############################################################################
# FILTERING
###############################################################################
###############################################################################
def morlet(x, sf, f, width=7.):
"""Complex decomposition of a signal x using the morlet wavelet.
Parameters
----------
x : array_like, shape (N,)
The signal to use for the complex decomposition. Must be
a vector of length N.
sf : float
Sampling frequency
f : array_like, shape (2,)
Frequency vector
width : float | 7.
Width of the wavelet
Returns
-------
xout: array_like, shape (N,)
The complex decomposition of the signal x.
"""
dt = 1 / sf
sf = f / width
st = 1 / (2 * np.pi * sf)
# Build morlet's wavelet :
t = np.arange(-width * st / 2, width * st / 2, dt)
a = 1 / np.sqrt((st * np.sqrt(np.pi)))
m = a * np.exp(-np.square(t) / (2 * np.square(st))) * np.exp(
1j * 2 * np.pi * f * t)
def ndmorlet(xt):
# Compute morlet :
y = np.convolve(xt, m)
return y[int(np.ceil(len(m) / 2)) - 1:int(len(y) - np.floor(
len(m) / 2))]
return np.apply_along_axis(ndmorlet, -1, x)
| [
"e.combrisson@gmail.com"
] | e.combrisson@gmail.com |
f79f65f0b11e74e90409fdb59e7626a53d9164e7 | a0643c775515499dd9492464384b05e82da608a8 | /test_interface/CEM/marketingManagement/marketingManagement/nodeDefineRecord_test.py | 64d559fe436d1d8ca8fff826bd8e7489a6ea7554 | [] | no_license | EmmySmith/autoInterfaceTest | e4935e5fbe85324502f1c8b9ba33ede51cfee81b | 56d31476994bfc276cc53ab32c9b27b8d2447e0b | refs/heads/master | 2023-05-28T05:27:22.615719 | 2020-05-09T03:40:04 | 2020-05-09T03:40:04 | 262,480,416 | 0 | 0 | null | 2023-05-22T22:44:16 | 2020-05-09T03:26:26 | HTML | UTF-8 | Python | false | false | 1,120 | py | #!/usr/bin/python
# coding=utf-8
import requests
import unittest
import json,time,random
from common.public import *
from mysqlHandle.common_mysql import *
class ICEM_Interface(unittest.TestCase):
@classmethod
def setUpClass(self):
self.headers = headers
self.host = host
self.path = "/api/icem-activity/workflow/nodeDefineRecord"
self.random = random.randint(1000,99999)
self.sql = "SELECT id FROM t_activity ORDER BY id DESC LIMIT 1;"
self.dbname = "geek_icem_activity"
print("----------开始测试----------")
#节点列表接口
def test_nodeDefineRecord(self):
'''节点列表接口'''
self.url = self.host + self.path
self.activityId = DB_ICEM_proc(self.dbname).get_vslues(self.sql)
data = {"activityId":self.activityId}
print(self.url)
response = requests.post(url=self.url,data= json.dumps(data), headers=self.headers)
print (response.text)
assert response.json()['error'] == 0
def tearDown(self):
pass
if __name__ == "__main__":
sms = ICEM_Interface() | [
"renming@jiekecloud.com"
] | renming@jiekecloud.com |
d93b2b2d13b8c15e3517aaffa07d6e531fbcc33b | 39689ee725bc7183d5d59fb34f7d2ffe5fd6ad36 | /ABC_B/ABC152B.py | 35c5db735fa5cc883c21d5980d6836368cc1fc04 | [] | no_license | yu5shi8/AtCoder | b6eb920a9046bdfa98012dd3fc65f75f16214ffe | f9ca69001ece8379e3a70c993c44b540f8be2217 | refs/heads/master | 2021-06-15T17:58:07.027699 | 2021-03-20T14:04:03 | 2021-03-20T14:04:03 | 177,757,053 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 221 | py | # -*- coding: utf-8 -*-
# B - Comparing Strings
# https://atcoder.jp/contests/abc152/tasks/abc152_b
a, b = map(int, input().split())
if a <= b:
print(str(a) * b)
else:
print(str(b) * a)
# 23:32 - 23:36(AC)
| [
"royal_unicorn411@hotmail.co.jp"
] | royal_unicorn411@hotmail.co.jp |
b654a1eafce5e94c9b993cdbd1877fa3a05eafcb | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03805/s743667282.py | 877dfca86e7d05d94725d8483f03ad1b21f93854 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 614 | py | from collections import deque
def dfs(G, visited, s, n, ans):
# すべての頂点を訪れた場合の処理
if all(visited) :
ans += 1
return ans
# 次の頂点の探索
for i in G[s] :
if visited[i] == False :
visited[i] = True
ans = dfs(G, visited, i, n, ans)
visited[i] = False
return ans
# グラフGの入力
n,m = map(int,input().split())
G = [[] for j in range(n)]
for i in range(m) :
a,b = map(int,input().split())
G[a-1].append(b-1)
G[b-1].append(a-1)
visited = [False for i in range(n)]
visited[0] = True
ans = 0
print(dfs(G, visited, 0, n, ans)) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
66c9edb88fbf7c5955f202577b420c7e013304bd | 8d3adc6867d80a75ac12984f52c78938221404a4 | /Bio/_utils.py | 77144e9015a72b4147ce67e6833712d851ccd8c7 | [
"LicenseRef-scancode-biopython"
] | permissive | chapmanb/biopython | 407b17f44f599d71818c5e16bd86a44cf323e062 | cade0aec7ecff7d052c42040f5cae8036e297941 | refs/heads/master | 2020-12-29T03:07:42.778627 | 2013-07-09T09:36:26 | 2013-07-09T09:36:26 | 412,030 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,568 | py | # Copyright 2010 by Eric Talevich. All rights reserved.
# Copyright 2012 by Wibowo Arindrarto. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Common utility functions for various Bio submodules."""
import os
def iterlen(items):
"""Count the number of items in an iterable.
If the argument supports len(items), and some iterators do, then
this returns len(items). Otherwise it will scan over the entries
in order to count them.
Exhausts a generator, but doesn't require creating a full list.
>>> iterlen("abcde")
5
>>> iterlen(iter("abcde"))
5
>>> iterlen(xrange(5))
5
"""
try:
#e.g. Under Python 2, the xrange iterator defines __len__
return len(items)
except TypeError:
for i, x in enumerate(items):
count = i
return count + 1
def read_forward(handle):
"""Reads through whitespaces, returns the first non-whitespace line."""
while True:
line = handle.readline()
# if line has characters and stripping does not remove them,
# return the line
if line and line.strip():
return line
# if line ends, return None
elif not line:
return line
def trim_str(string, max_len, concat_char):
"""Truncates the given string for display."""
if len(string) > max_len:
return string[:max_len - len(concat_char)] + concat_char
return string
def getattr_str(obj, attr, fmt=None, fallback='?'):
"""Returns a string of the given object's attribute, defaulting to the
fallback value if attribute is not present."""
if hasattr(obj, attr):
if fmt is not None:
return fmt % getattr(obj, attr)
return str(getattr(obj, attr))
return fallback
def find_test_dir(start_dir=None):
"""Finds the absolute path of Biopython's Tests directory.
Arguments:
start_dir -- Initial directory to begin lookup (default to current dir)
If the directory is not found up the filesystem's root directory, an
exception will be raised.
"""
if not start_dir:
# no callbacks in function signatures!
# defaults to the current directory
# (using __file__ would give the installed Biopython)
start_dir = "."
target = os.path.abspath(start_dir)
while True:
if os.path.isdir(os.path.join(target, "Bio")) \
and os.path.isdir(os.path.join(target, "Tests")):
#Good, we're in the Biopython root now
return os.path.abspath(os.path.join(target, "Tests"))
#Recurse up the tree
#TODO - Test this on Windows
new, tmp = os.path.split(target)
if target == new:
#Reached root
break
target = new
raise ValueError("Not within Biopython source tree: %r" % os.path.abspath(start_dir))
def run_doctest(target_dir=None, *args, **kwargs):
"""Runs doctest for the importing module."""
import doctest
# default doctest options
default_kwargs = {
'optionflags': doctest.ELLIPSIS,
}
kwargs.update(default_kwargs)
cur_dir = os.path.abspath(os.curdir)
print "Runing doctests..."
try:
os.chdir(find_test_dir(target_dir))
doctest.testmod(*args, **kwargs)
finally:
# and revert back to initial directory
os.chdir(cur_dir)
print "Done"
if __name__ == "__main__":
run_doctest()
| [
"p.j.a.cock@googlemail.com"
] | p.j.a.cock@googlemail.com |
ccd23976c670e92724953d4e4f62a84424828fb2 | 0bce7412d58675d6cc410fa7a81c294ede72154e | /Python3/0287. Find the Duplicate Number.py | 6fdd5f3ec43b8bdf3b456532302c0ad8a9ed6823 | [] | no_license | yang4978/LeetCode | 9ddf010b0f1dda32cddc7e94c3f987509dea3214 | 6387d05b619d403414bad273fc3a7a2c58668db7 | refs/heads/master | 2022-01-15T04:21:54.739812 | 2021-12-28T12:28:28 | 2021-12-28T12:28:28 | 182,653,666 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 386 | py | class Solution:
def findDuplicate(self, nums: List[int]) -> int:
left = 1
right = len(nums)-1
while left<right:
mid = left + (right-left)//2
temp = 0
for i in nums:
temp += (i<=mid)
if temp<=mid:
left = mid + 1
else:
right = mid
return left
| [
"noreply@github.com"
] | yang4978.noreply@github.com |
b3621c42eac9b9c4eb104fc6c10c7e9dd8bc2186 | bf99b1b14e9ca1ad40645a7423f23ef32f4a62e6 | /AtCoder/abc/142d_2.py | aaf992c2ab19b95ac0e17785ef21ba969269e9b6 | [] | no_license | y-oksaku/Competitive-Programming | 3f9c1953956d1d1dfbf46d5a87b56550ff3ab3db | a3ff52f538329bed034d3008e051f30442aaadae | refs/heads/master | 2021-06-11T16:14:12.635947 | 2021-05-04T08:18:35 | 2021-05-04T08:18:35 | 188,639,647 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 564 | py | A, B = map(int, input().split())
def primeCount(N):
R = int(N**(0.5)) + 1 # 素数の範囲
primes = {} # 素数のリスト
n = N
for num in range(2, R):
primes[num] = 0
while n % num == 0:
n //= num
primes[num] += 1
if n > 1 :
primes[n] = 1
return { key : val for key, val in primes.items() if val > 0} # フィルターをかける
primeA = primeCount(A)
ans = 1
prd = 1
for p in sorted(primeA.keys()):
if B % p == 0 and prd % p != 0:
prd *= p
ans += 1
print(ans)
| [
"y.oksaku@stu.kanazawa-u.ac.jp"
] | y.oksaku@stu.kanazawa-u.ac.jp |
731ff8381159eac4f3c4c5622d279515b577b095 | 74565d83bccccfae0d57d0e947f12db2bcae2999 | /phi/tests/test_nodsl.py | a9258eb9895c9db82cb8352393e4fd3274fe940a | [
"MIT"
] | permissive | iCodeIN/phi | 2fe9eaaf21615381e1b1815853adeb76a79fb696 | 87fd7100a76f823232f4fd8360498b4b80675265 | refs/heads/master | 2023-03-21T07:50:28.218564 | 2018-08-13T14:14:07 | 2018-08-13T14:14:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 480 | py | from phi.dsl import Expression
P = Expression()
class TestNoDSL(object):
def test_seq(self):
f = P.Seq(
P + 2,
P * 2
)
assert 10 == f(3)
def test_branch(self):
f = P.List(
P + 2,
P * 2
)
assert [5, 6] == f(3)
f = P.List(
P + 2,
P.Seq(
P + 1,
P * 2
)
)
assert [5, 8] == f(3)
| [
"cgarcia.e88@gmail.com"
] | cgarcia.e88@gmail.com |
df097ee6147b01ee82e411a12f00ca1fdff8c780 | 773e6eefd35d3fe8f86faed6f8de0470e9da722f | /tests/wire/test_msg_mempool.py | d8d4a0ef565bc4c7b0fc63cfe915eb85e3ed4c35 | [] | no_license | imnisen/pybtcd | 5a9e13b0461e41958e3d1c948100e20c92459d43 | b89ec2063348805755d9f2c474a1494f0fb6ef46 | refs/heads/master | 2020-03-28T03:11:01.877408 | 2019-11-05T07:07:26 | 2019-11-05T07:07:26 | 147,625,874 | 2 | 2 | null | 2019-11-05T07:07:27 | 2018-09-06T06:09:40 | Python | UTF-8 | Python | false | false | 2,180 | py | import unittest
import io
from wire.msg_mempool import *
class TestMsgMemPool(unittest.TestCase):
def setUp(self):
pver = ProtocolVersion
oldPver = BIP0035Version - 1
msgMemPool = MsgMemPool()
msgMemPoolEncoded = bytes([])
self.tests = [
# Latest protocol version.
{
"in": msgMemPool,
"out": msgMemPool,
"buf": msgMemPoolEncoded,
"pver": pver,
"enc": BaseEncoding,
"err": None
},
# Protocol version BIP0035Version -1 .
{
"in": msgMemPool,
"out": msgMemPool,
"buf": msgMemPoolEncoded,
"pver": oldPver,
"enc": BaseEncoding,
"err": MemPoolVerionBelowBIP35MsgErr
},
]
def test_command(self):
msg = MsgMemPool()
self.assertEqual(str(msg.command()), "mempool")
def test_max_payload_length(self):
msg = MsgMemPool()
want_payload = 0
self.assertEqual(msg.max_payload_length(ProtocolVersion), want_payload)
def test_btc_encode(self):
for c in self.tests:
if c["err"]:
try:
s = io.BytesIO()
c['in'].btc_encode(s, c['pver'], c['enc'])
except Exception as e:
self.assertEqual(type(e), c['err'])
else:
s = io.BytesIO()
c['in'].btc_encode(s, c['pver'], c['enc'])
self.assertEqual(s.getvalue(), c['buf'])
def test_btc_decode(self):
for c in self.tests:
if c['err']:
try:
s = io.BytesIO(c['buf'])
msg = MsgMemPool()
msg.btc_decode(s, c['pver'], c['enc'])
except Exception as e:
self.assertEqual(type(e), c['err'])
else:
s = io.BytesIO(c['buf'])
msg = MsgMemPool()
msg.btc_decode(s, c['pver'], c['enc'])
self.assertEqual(msg, c['out'])
| [
"imnisen@gmail.com"
] | imnisen@gmail.com |
71ce5358015438612bd7c9b6178dddfd3a3af130 | 5dfb9ca5e0c8cb4cb7a7a92d6f6a34b34a841869 | /LeetCodeSolutions/python/211_Add_and_Search_Word_Data_structure_design.py | 6fdd8ce15c0c90bf5632e2ebbd8b40ec1c9fee03 | [
"MIT"
] | permissive | ChuanleiGuo/AlgorithmsPlayground | 2f71d29e697a656562e3d2a2de783d964dc6a325 | 90b6287b742c8bfd3797540c408d679be2821a40 | refs/heads/master | 2021-01-11T18:30:43.218959 | 2018-11-19T02:20:31 | 2018-11-19T02:20:31 | 79,550,052 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,004 | py | class WordDictionary(object):
def __init__(self):
"""
initialize your data structure here.
"""
self.root = {}
def addWord(self, word):
"""
Adds a word into the data structure.
:type word: str
:rtype: void
"""
node = self.root
for c in word:
if c not in node:
node[c] = {}
node = node[c]
node['#'] = '#'
def search(self, word):
"""
Returns if the word is in the data structure. A word could
contain the dot character '.' to represent any one letter.
:type word: str
:rtype: bool
"""
def find(word, node):
if not word:
return '#' in node
c, word = word[0], word[1:]
if c != '.':
return c in node and find(word, node[c])
return any(find(word, d) for d in node.values() if d != '#')
return find(word, self.root)
| [
"chuanleiguo@gmail.com"
] | chuanleiguo@gmail.com |
a714dbeb5f32820c71f3d0aa1e6daf2db3de0648 | 8ceade7e485606cd2d2ea09a996f1e71c1b76f76 | /ejerciciosMejorados/p5.py | 92e2b94263a31a9ef3c24c00365f32c3cff6ed6a | [] | no_license | jesustr20/test | bdc9a0ffd313e5aad9e63b5df5e87abc2a7d9419 | 7439f1466995356a709066208fa1a9de21b2266b | refs/heads/master | 2023-01-05T17:58:48.601151 | 2020-11-01T19:24:35 | 2020-11-01T19:24:35 | 306,782,231 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,548 | py | #Pregunta 5:
# Escribir una funcion sum() y una función multip() que sumen y multipliquen respectivamente
# todos los números de una lista.
# Por ejemplo: sum([1,2,3,4]) debería devolver 10 y multip([1,2,3,4]) debería devolver 24.
#Funcion que genera una lista de un rango
def listas(rango):
#Genera la lista a partir del parametro "rango"
ls = [i for i in range(1,rango+1)]
#retorna el rango generado en una lista "ls"
return ls
#Funcion suma, suma la lista generada y las suma
def suma(rango):
#Lista generada a partir de la funcion "listas" donde lleva por paramerto rango para realizar la suma
ran = listas(rango)
#suma toda la lista
sumas = sum(ran)
#muestra la suma
return sumas
#Funcion de multiplicacion
def multiplicacion(rango):
#acumula y multiplica los numeros de la lista
n=1
#Lista generada a partir de la funcion "listas" donde lleva por paramerto rango para realizar la suma
ran = listas(rango)
#Bucle recorre la variable ran que contiene la lista generada a partir del parametro "rango"
for i in list(ran):
#Acumulador multiplicara el bucle para obtener el resultado final
n=n*i
t=n
#retorna la lista t
return t
#ingresar un numero que es el rango la cual sera la lista presentada
numero = int(input('Calcular la suma de numeros consecutivos de 1 hasta: '))
#Mostrara la lista
print(f'lista: {listas(numero)}')
#suma la lista
print(f'Suma: {suma(numero)}')
#multiplica la lista
print(f'Producto: {multiplicacion(numero)}')
| [
"jesus.tr61@gmail.com"
] | jesus.tr61@gmail.com |
9b29f05b1777653afa4489a9428e2fa3ad119fb4 | 8447b5f83be675c5de085d8824783ec0739690b0 | /mmdet/ops/roi_align/roi_align.py | 875e63d5b6432b257a7ea81289220762413d2c1b | [] | no_license | jie311/mmdetection_lite | bca7070ad88a04e4f6650292642f98f24f7ebeba | 1c151a7c44759d022fea1d85fb036a5b39409449 | refs/heads/master | 2022-04-19T06:18:42.282754 | 2020-04-13T07:41:10 | 2020-04-13T07:41:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,068 | py | from torch import nn
from torch.autograd import Function
from torch.autograd.function import once_differentiable
from torch.nn.modules.utils import _pair
from . import roi_align_cuda
class RoIAlignFunction(Function):
@staticmethod
def forward(ctx,
features,
rois,
out_size,
spatial_scale,
sample_num=0,
aligned=True):
out_h, out_w = _pair(out_size)
assert isinstance(out_h, int) and isinstance(out_w, int)
ctx.spatial_scale = spatial_scale
ctx.sample_num = sample_num
ctx.save_for_backward(rois)
ctx.feature_size = features.size()
ctx.aligned = aligned
if features.is_cuda:
if not aligned:
(batch_size, num_channels, data_height,
data_width) = features.size()
num_rois = rois.size(0)
output = features.new_zeros(num_rois, num_channels, out_h,
out_w)
roi_align_cuda.forward_v1(features, rois, out_h, out_w,
spatial_scale, sample_num, output)
else:
output = roi_align_cuda.forward_v2(features, rois,
spatial_scale, out_h, out_w,
sample_num, aligned)
else:
raise NotImplementedError
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
feature_size = ctx.feature_size
spatial_scale = ctx.spatial_scale
sample_num = ctx.sample_num
rois = ctx.saved_tensors[0]
aligned = ctx.aligned
assert (feature_size is not None and grad_output.is_cuda)
batch_size, num_channels, data_height, data_width = feature_size
out_w = grad_output.size(3)
out_h = grad_output.size(2)
grad_input = grad_rois = None
if not aligned:
if ctx.needs_input_grad[0]:
grad_input = rois.new_zeros(batch_size, num_channels,
data_height, data_width)
roi_align_cuda.backward_v1(grad_output.contiguous(), rois,
out_h, out_w, spatial_scale,
sample_num, grad_input)
else:
grad_input = roi_align_cuda.backward_v2(
grad_output, rois, spatial_scale, out_h, out_w, batch_size,
num_channels, data_height, data_width, sample_num, aligned)
return grad_input, grad_rois, None, None, None, None
roi_align = RoIAlignFunction.apply
class RoIAlign(nn.Module):
def __init__(self,
out_size,
spatial_scale,
sample_num=0,
use_torchvision=False,
aligned=False):
"""
Args:
out_size (tuple): h, w
spatial_scale (float): scale the input boxes by this number
sample_num (int): number of inputs samples to take for each
output sample. 2 to take samples densely for current models.
use_torchvision (bool): whether to use roi_align from torchvision
aligned (bool): if False, use the legacy implementation in
MMDetection. If True, align the results more perfectly.
Note:
The implementation of RoIAlign when aligned=True is modified from
https://github.com/facebookresearch/detectron2/
The meaning of aligned=True:
Given a continuous coordinate c, its two neighboring pixel
indices (in our pixel model) are computed by floor(c - 0.5) and
ceil(c - 0.5). For example, c=1.3 has pixel neighbors with discrete
indices [0] and [1] (which are sampled from the underlying signal
at continuous coordinates 0.5 and 1.5). But the original roi_align
(aligned=False) does not subtract the 0.5 when computing
neighboring pixel indices and therefore it uses pixels with a
slightly incorrect alignment (relative to our pixel model) when
performing bilinear interpolation.
With `aligned=True`,
we first appropriately scale the ROI and then shift it by -0.5
prior to calling roi_align. This produces the correct neighbors;
The difference does not make a difference to the model's
performance if ROIAlign is used together with conv layers.
"""
super(RoIAlign, self).__init__()
self.out_size = _pair(out_size)
self.spatial_scale = float(spatial_scale)
self.aligned = aligned
self.sample_num = int(sample_num)
self.use_torchvision = use_torchvision
assert not (use_torchvision and
aligned), 'Torchvision does not support aligned RoIAlgin'
def forward(self, features, rois):
"""
Args:
features: NCHW images
rois: Bx5 boxes. First column is the index into N. The other 4
columns are xyxy.
"""
assert rois.dim() == 2 and rois.size(1) == 5
# print(features.shape)
if self.use_torchvision:
from torchvision.ops import roi_align as tv_roi_align
return tv_roi_align(features, rois, self.out_size,
self.spatial_scale, self.sample_num)
else:
return roi_align(features, rois, self.out_size, self.spatial_scale,
self.sample_num, self.aligned)
def __repr__(self):
format_str = self.__class__.__name__
format_str += '(out_size={}, spatial_scale={}, sample_num={}'.format(
self.out_size, self.spatial_scale, self.sample_num)
format_str += ', use_torchvision={}, aligned={})'.format(
self.use_torchvision, self.aligned)
return format_str
| [
"760997646@qq.com"
] | 760997646@qq.com |
e6d28bb201f92f5cbd6c7cc8c1a7695415e2e239 | 8c162bcf0576316d1ce070e4a70f257a01c7ce4c | /piwheels/logger/__init__.py | 4f9f281a79e3aaa815449c41cebba5d020ffb514 | [
"BSD-3-Clause"
] | permissive | Lauszus/piwheels | 8e4a73b0a76715b45ab3586998c78fa3beb26b9f | e51b979f11c1d7a199c9f461312f959f23b5b979 | refs/heads/master | 2020-03-23T16:14:27.888940 | 2018-05-13T17:08:45 | 2018-05-13T17:08:45 | 141,799,946 | 0 | 0 | null | 2018-07-21T09:44:31 | 2018-07-21T09:44:31 | null | UTF-8 | Python | false | false | 7,857 | py | #!/usr/bin/env python
# The piwheels project
# Copyright (c) 2017 Ben Nuttall <https://github.com/bennuttall>
# Copyright (c) 2017 Dave Jones <dave@waveform.org.uk>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Contains the functions that implement the :program:`piw-log` script.
.. autofunction:: main
"""
import io
import sys
import gzip
import json
import logging
import datetime as dt
import ipaddress
from pathlib import PosixPath
import zmq
from lars.apache import ApacheSource, COMMON, COMMON_VHOST, COMBINED
from .. import __version__, terminal, const
# Workaround: lars bug; User-Agent instead of User-agent
COMBINED = '%h %l %u %t "%r" %>s %b "%{Referer}i" "%{User-Agent}i"'
def main(args=None):
"""
This is the main function for the :program:`piw-logger` script. It is
designed to be run as a `piped log script`_ under Apache, piping access
logs to the :class:`~.lumberjack.Lumberjack` task which stores them in the
database. However it can also be used to load pre-existing logs in to.
.. _piped log script: https://httpd.apache.org/docs/2.4/logs.html#piped
"""
logging.getLogger().name = 'logger'
parser = terminal.configure_parser("""\
The piw-logger script is intended for use as an Apache "piped log script"
but can also be used to feed pre-existing Apache logs to the master by
feeding logs to the script's stdin. This script must be run on the same node
as the piw-master script.
""")
parser.add_argument(
'--format', default='combined',
help="The Apache log format that log lines will be expected to be in "
"(default: %(default)s); the short-cuts common, combined and "
"common_vhost can be used in addition to Apache LogFormat strings")
parser.add_argument(
'files', nargs='*', default=['-'],
help="The log file(s) to load into the master; if omitted or - then "
"stdin will be read which is the default for piped log usage")
parser.add_argument(
'--logger-queue', metavar='ADDR', default=const.LOGGER_QUEUE,
help="The address of the queue used by piw-logger (default: "
"(%(default)s); this should always be an ipc address")
parser.add_argument(
'--drop', action='store_true',
help="Drop log records if unable to send them to the master after a "
"short timeout; this should generally be specified when piw-logger "
"is used as a piped log script")
try:
config = parser.parse_args(args)
terminal.configure_logging(config.log_level, config.log_file)
logging.info("PiWheels Logger version %s", __version__)
config.format = {
'common': COMMON,
'common_vhost': COMMON_VHOST,
'combined': COMBINED,
}.get(config.format, config.format)
ctx = zmq.Context.instance()
queue = ctx.socket(zmq.PUSH)
queue.connect(config.logger_queue)
try:
for filename in config.files:
log_file = log_open(filename)
try:
with ApacheSource(log_file, config.format) as src:
for row in src:
if log_filter(row):
if not config.drop or queue.poll(1000, zmq.POLLOUT):
queue.send_pyobj(['LOG'] + log_transform(row))
else:
logging.warning('dropping log entry')
finally:
log_file.close()
finally:
queue.close()
ctx.destroy(linger=1000)
ctx.term()
except RuntimeError as err:
logging.error(err)
return 1
except: # pylint: disable=bare-except
return terminal.error_handler(*sys.exc_info())
else:
return 0
def log_open(filename):
"""
Open the log-file specified by *filename*. If this is ``"-"`` then stdin
will be returned. If the filename ends with ``".gz"`` the file will be
extracted automatically. Otherwise, the file is opened regularly.
:param str filename:
The filename to open as a read-only file.
:returns:
The file-like object to read.
"""
if filename == '-':
logging.info('Processing log entries from stdin')
return sys.stdin
elif filename.endswith('.gz'):
logging.info('Processing gzipped log %s', filename)
return io.TextIOWrapper(gzip.open(filename, 'rb'), encoding='ascii')
else:
logging.info('Processing log %s', filename)
return io.open(filename, 'r', encoding='ascii')
def log_filter(row):
"""
Filters which log entries to include. Current criteria are: successful
downloads (status 200) only, user-agent must begin with ``"pip/"`` and
the accessed path must have an extension of ``".whl"``.
:param row:
A tuple containing the fields of the log entry, as returned by
:class:`lars.apache.ApacheSource`.
"""
return (
row.status == 200
and row.req_User_Agent is not None
and row.req_User_Agent.startswith('pip/')
and row.request.url.path_str.endswith('.whl')
)
def log_transform(row, decoder=json.JSONDecoder()):
"""
Extracts the relevant information from the specified *row*.
:param row:
A tuple containing the fields of the log entry, as returned by
:class:`lars.apache.ApacheSource`.
"""
path = PosixPath(row.request.url.path_str)
try:
json_start = row.req_User_Agent.index('{')
except ValueError:
user_data = {}
else:
try:
user_data = decoder.decode(row.req_User_Agent[json_start:])
except ValueError:
user_data = {}
return [
# Convert lars types into standard types (avoids some issues with
# some database backends)
path.name,
str(row.remote_host),
row.time.replace(),
user_data.get('cpu'),
user_data.get('distro', {}).get('name'),
user_data.get('distro', {}).get('version'),
user_data.get('system', {}).get('name'),
user_data.get('system', {}).get('version'),
user_data.get('implementation', {'name': 'CPython'}).get('name'),
user_data.get('implementation', {'version': user_data.get('python')}).get('version'),
]
| [
"dave@waveform.org.uk"
] | dave@waveform.org.uk |
d15144bfccfa3fec3d6c39d294d7d9f9379b012b | 3481023b43028c5ee9520a8be0978e914bdcb548 | /manga_py/base_classes/params.py | 5d90a0761f6bf49a2ebe1be019ec60ff77f0738c | [
"MIT"
] | permissive | manga-py/manga-py | 18f6818d8efc96c3e69efee7dff3f3d6c773e32a | 0db97123acab1f2fb99e808b0ba54db08977e5c8 | refs/heads/stable_1.x | 2023-08-20T03:04:06.373108 | 2023-04-16T08:28:15 | 2023-04-16T08:28:15 | 98,638,892 | 444 | 56 | MIT | 2023-07-27T13:21:40 | 2017-07-28T10:27:43 | Python | UTF-8 | Python | false | false | 1,394 | py | from logging import error
from urllib.parse import urlparse
class ProviderParams:
@property
def content(self):
content = self._storage.get('main_content', None)
if content is None:
content = self.get_content()
return content
@property
def manga_name(self) -> str:
name = self._storage.get('manga_name', None)
if name is None:
name = self.get_manga_name()
return name
@property
def name(self) -> str:
name = self._params.get('name', '')
if not len(name):
name = self.manga_name
return name
@property
def domain(self) -> str:
_url = self._params['url']
try:
if not self._storage.get('domain_uri', None):
parsed = urlparse(_url, 'https')
self._storage['domain_uri'] = '{}://{}'.format(
parsed.scheme,
parsed.netloc
)
return self._storage.get('domain_uri', '')
except Exception:
error('url "%s" is broken!' % _url)
exit()
@property
def chapter(self):
return self.chapters[self.chapter_id]
@property
def chapters(self):
return self._storage['chapters']
@chapters.setter
def chapters(self, chapters: list):
self._storage['chapters'] = chapters
| [
"sttv-pc@mail.ru"
] | sttv-pc@mail.ru |
c0729505d6bf9a498b456982548aa120de9d51a6 | 4cef47ddd432f2ab814a03fd89112b1dc8b5ab27 | /flow/message.py | 4c6579ea3c4a1e94e634e4ac2e1ed647ac980500 | [
"MIT"
] | permissive | h00shi/flow | 279d621cda72a8d2392c2dbe7735ce2ab6b39380 | ef45bdd4181d385b1b01042e9ce0b48e4cdc2318 | refs/heads/master | 2020-09-24T06:50:01.605337 | 2017-07-05T17:53:50 | 2017-07-05T17:53:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 433 | py | # -*- coding: utf-8 -*-
#
'''
Allows using DOLFIN's indented block messages with 'with', i.e.,
with Message('hello world'):
# do something
'''
from dolfin import begin, end
class Message(object):
def __init__(self, string):
self.string = string
return
def __enter__(self):
begin(self.string)
return
def __exit__(self, tpe, value, traceback):
end()
return
| [
"nico.schloemer@gmail.com"
] | nico.schloemer@gmail.com |
d127705f9dd239922fbb76363c5cc3ca519736c3 | 85c82274a3888fa61795bb0600ab96eaf7665b6a | /meet1/F_max3Angka.py | fa0a3546b66bbd03e24bf5cd643fb372eb6d9263 | [] | no_license | refeed/StrukturDataA | 8e5a214569f41b19c05842d003ede5941800482a | 4d3b77bbd28158f1f1e64a49b8e90da731859407 | refs/heads/master | 2023-06-03T08:22:12.442536 | 2021-07-01T03:24:29 | 2021-07-01T03:24:29 | 360,478,966 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 549 | py | '''
Max 3 angka
Batas Run-time: 1 detik / test-case
Batas Memori: 32 MB
DESKRIPSI SOAL
Buatlah program yang menerima 3 buah input nilai, outputkan nilai paling besar
diantara ketiga input tersebut.
PETUNJUK MASUKAN
Input terdiri atas 3 angka dalam 1 baris
PETUNJUK KELUARAN
Outputkan angka terbesar dari 3 angka yang dimasukkan
CONTOH MASUKAN
10 9 11
CONTOH KELUARAN
11
'''
input_int_list = list(map(int, input().split()))
biggest = input_int_list[0]
for num in input_int_list:
if num > biggest:
biggest = num
print(biggest)
| [
"rafidteam@gmail.com"
] | rafidteam@gmail.com |
1611a857f00cc4e23f65789639b9f6eef36aefe9 | 304f6ea579c2bda6f2a0a0d93c0ffcf332082450 | /since_JungleWeek06/정확한 순위.py | 0d5fb00f7b7be52e32e728b87a77981421fde412 | [] | no_license | Jeongseo21/algorithm | 941ae18bb17f6d9a0f3190d40f5f0ae212ffe0f6 | 0fbda0e320ca6616dfe21680601521549ed019e4 | refs/heads/master | 2023-04-17T21:37:25.607255 | 2021-04-25T13:51:02 | 2021-04-25T13:51:02 | 276,335,142 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 907 | py | import sys
# import numpy
# sys.stdin = open("input.txt", "r")
input = sys.stdin.readline
INF = int(1e9)
N, M = map(int, input().split())
graph = [[INF for _ in range(N+1)] for _ in range(N+1)]
#graph = numpy.array(graph)
for i in range(1, N+1):
for j in range(1, N+1):
if i == j:
graph[i][j] = 0
for i in range(M):
A, B = map(int, input().split())
graph[A][B] = 1
for k in range(1, N+1):
for i in range(1, N+1):
for j in range(1, N+1):
graph[i][j] = min(graph[i][k]+graph[k][j], graph[i][j])
for i in range(N+1):
for j in range(N+1):
if graph[i][j] == INF:
graph[i][j] = 0
result = [0 for _ in range(N+1)]
for i in range(1, N+1):
for j in range(1, N+1):
if graph[i][j] != 0:
result[i] += 1
result[j] += 1
answer = 0
for i in result:
if i == N-1:
answer += 1
print(answer)
| [
"jeongseo21@gmail.com"
] | jeongseo21@gmail.com |
6fa22ce338d3f8d57ecec002067180eaafbed94e | 4908b1d34d69c1cb652f25049552562574e1075f | /2020/Day-12/Rain_Risk/vector.py | cfe2acf5df39b6354ac84b44bc21a18195edfe11 | [
"MIT"
] | permissive | sreekesari-vangeepuram/adventofcode | 3d4ad98a25a30640182d928538b421e00ad8259d | 645531be0208affe042ac0328105b9ef3cfc9dbf | refs/heads/main | 2023-07-26T13:36:03.036721 | 2021-08-11T08:27:25 | 2021-08-11T08:27:25 | 317,850,039 | 1 | 0 | MIT | 2021-08-11T08:27:26 | 2020-12-02T12:08:13 | Go | UTF-8 | Python | false | false | 2,493 | py | class vector:
def __init__(self, x, y, pointing_direction):
self.x = x
self.y = y
self.direction = pointing_direction
def get_pos(self):
return (self.x, self.y, self.direction)
def change_position(self, ins):
d = self.direction
if d == 'E':
if ins[0] == 'E' or ins[0] == 'F': self.x += ins[1]
elif ins[0] == 'W': self.x -= ins[1]
elif ins[0] == 'N': self.y += ins[1]
elif ins[0] == 'S': self.y -= ins[1]
elif d == 'W':
if ins[0] == 'W' or ins[0] == 'F': self.x -= ins[1]
elif ins[0] == 'E': self.x += ins[1]
elif ins[0] == 'N': self.y += ins[1]
elif ins[0] == 'S': self.y -= ins[1]
elif d == 'N':
if ins[0] == 'N' or ins[0] == 'F': self.y += ins[1]
elif ins[0] == 'W': self.x -= ins[1]
elif ins[0] == 'E': self.x += ins[1]
elif ins[0] == 'S': self.y -= ins[1]
elif d == 'S':
if ins[0] == 'S' or ins[0] == 'F': self.y -= ins[1]
elif ins[0] == 'W': self.x -= ins[1]
elif ins[0] == 'N': self.y += ins[1]
elif ins[0] == 'E': self.x += ins[1]
def change_direction(self, ins):
d = self.direction
if ins[0] == 'R':
if d == 'E':
if ins[1] == 90:
self.direction = 'S'
elif ins[1] == 180:
self.direction = 'W'
elif ins[1] == 270:
self.direction = 'N'
elif d == 'W':
if ins[1] == 90:
self.direction = 'N'
elif ins[1] == 180:
self.direction = 'E'
elif ins[1] == 270:
self.direction = 'S'
elif d == 'N':
if ins[1] == 90:
self.direction = 'E'
elif ins[1] == 180:
self.direction = 'S'
elif ins[1] == 270:
self.direction = 'W'
elif d == 'S':
if ins[1] == 90:
self.direction = 'W'
elif ins[1] == 180:
self.direction = 'N'
elif ins[1] == 270:
self.direction = 'E'
elif ins[0] == 'L':
if d == 'E':
if ins[1] == 90:
self.direction = 'N'
elif ins[1] == 180:
self.direction = 'W'
elif ins[1] == 270:
self.direction = 'S'
elif d == 'W':
if ins[1] == 90:
self.direction = 'S'
elif ins[1] == 180:
self.direction = 'E'
elif ins[1] == 270:
self.direction = 'N'
elif d == 'N':
if ins[1] == 90:
self.direction = 'W'
elif ins[1] == 180:
self.direction = 'S'
elif ins[1] == 270:
self.direction = 'E'
elif d == 'S':
if ins[1] == 90:
self.direction = 'E'
elif ins[1] == 180:
self.direction = 'N'
elif ins[1] == 270:
self.direction = 'W'
def manhattan_distance(self):
return abs(self.x)+abs(self.y)
| [
"kesari.vangeepuram@gmail.com"
] | kesari.vangeepuram@gmail.com |
980323cf75ba26bfab0963870210bfedc895fa2d | ecff4b18a49ce5952c5f9125dc027cebdecf10a8 | /azure-mgmt-resource/azure/mgmt/resource/resources/models/dependency.py | c5db8825cb5df20c1b8bb96a3551b7981d632c2e | [
"Apache-2.0"
] | permissive | jehine-MSFT/azure-sdk-for-python | a56c18020ecd5f4c245c093fd6a33e1b1d7c95e1 | 6d0f94b39406eab374906c683bd2150217132a9c | refs/heads/master | 2020-12-06T19:17:38.153819 | 2016-04-08T21:03:16 | 2016-04-08T21:03:16 | 55,809,131 | 0 | 0 | null | 2016-04-08T20:54:00 | 2016-04-08T20:54:00 | null | UTF-8 | Python | false | false | 2,007 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft and contributors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class Dependency(Model):
"""
Deployment dependency information.
:param depends_on: Gets the list of dependencies.
:type depends_on: list of :class:`BasicDependency
<azure.mgmt.resource.resources.models.BasicDependency>`
:param id: Gets or sets the ID of the dependency.
:type id: str
:param resource_type: Gets or sets the dependency resource type.
:type resource_type: str
:param resource_name: Gets or sets the dependency resource name.
:type resource_name: str
"""
_attribute_map = {
'depends_on': {'key': 'dependsOn', 'type': '[BasicDependency]'},
'id': {'key': 'id', 'type': 'str'},
'resource_type': {'key': 'resourceType', 'type': 'str'},
'resource_name': {'key': 'resourceName', 'type': 'str'},
}
def __init__(self, depends_on=None, id=None, resource_type=None, resource_name=None, **kwargs):
self.depends_on = depends_on
self.id = id
self.resource_type = resource_type
self.resource_name = resource_name
| [
"lmazuel@microsoft.com"
] | lmazuel@microsoft.com |
5002a8c9e0c3a567b44de79a2522f9cb8e1dd214 | 614cd6a5336a6fe177d13a7cfd841a344f5d7a97 | /albumy/albumy/to_test/test.py | 3511682210d8dcaa4aef6ab8852c1c93a666dd3d | [
"MIT"
] | permissive | shidashui/MyFlaskWeb | 40bb80dea6aa9cc2729ee8e767062cbc519a472a | b861c76813e15250036477fca6570cef1d957af1 | refs/heads/master | 2022-12-10T08:32:53.657838 | 2019-08-26T15:29:03 | 2019-08-26T15:29:03 | 193,836,836 | 3 | 0 | MIT | 2022-12-08T05:55:00 | 2019-06-26T05:42:12 | JavaScript | UTF-8 | Python | false | false | 1,327 | py | import os
import PIL
from PIL import Image
from itsdangerous import Serializer
def A():
a = Serializer('mima')
token_a = a.dumps('test')
print(token_a)
b = Serializer('mima')
c = b.loads(token_a)
print(c)
roles_permissions_map = {
'Locked': ['FOLLOW', 'COLLECT'],
'User': ['FOLLOW', 'COLLECT','COMMENT','UPLOAD'],
'Moderator':['FOLLOW','COLLECT','COMMENT','UPLOAD','MODERATE'],
'Administrator':['FOLLOW','COLLECT','COMMENT','UPLOAD','MODERATE','ADMINISTER']
}
for key, value in roles_permissions_map.items():
print(key,value)
def B():
a = 'abc.img'
ext = os.path.splitext(a)
print(ext)
image = 'Bing_0001.jpeg'
def resize_image(image, filename, base_width):
filename, ext = os.path.splitext(filename)
img = Image.open(image)
if img.size[0] <= base_width:
return filename + ext
print(img.size)
w_percent = (base_width / float(img.size[0]))
print(w_percent)
h_size = int((float(img.size[1]) * float(w_percent)))
print(h_size)
img = img.resize((base_width, h_size), PIL.Image.ANTIALIAS)
filename += '_test' + ext
img.save(filename, optimize=True, quality=85)
return filename
if __name__ == '__main__':
resize_image(image,image,400) | [
"164635470@qq.com"
] | 164635470@qq.com |
0c5c2c43ad0c32f185649374d6b21c1754c06ef4 | 113f803b721984992bdc8f2177056b0f60de546a | /ex32/ex32.py | 36131ac8165a1e6521e3dcd80f8cca1ae549099b | [] | no_license | Na-Young-Lee/16PFA-Na-Young-Lee | ddb215b0dc9cb0572b96aa90d8db71fbbea13c13 | 6cdcea12fd46a5218f9b6a7cd4ac5ee5e347cbb7 | refs/heads/master | 2021-01-17T15:15:27.429510 | 2016-06-01T11:37:20 | 2016-06-01T11:37:20 | 53,923,583 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,007 | py | # -*- coding:utf8
# http://learnpythonthehardway.org/book/ex32.html
the_count = [1, 2, 3, 4, 5]
fruits = ['apples', 'oranges', 'pears', 'apricots']
change = [1, 'pennies', 2, 'dimes', 3, 'quarters']
# this first kind of for-loop goes through a list #loop
for number in the_count:
print("This is count %d" % number)
# same as above
for fruit in fruits:
print("A fruit of type: %s" % fruit)
# also we can go through mixed lists too
# notice we have to use %r since we don't know what's in it
for i in change:
print("I got %r" % i)
# we can also build lists, first start with an empty one
elements = []
# then use the range function to do 0 to 5 counts
for i in range(0,6):
print("Adding %d to the list." % i)
# append is a function that lists understand
elements.append(i)
# 또는 python 에서는 아래와 같이 할 수도 있다.
# list comprehension
elements2 = [i for i in range(0,6)]
# noe we can print them out too
for i in elements:
print ("Element was: %d" % i)
| [
"CAD Client"
] | CAD Client |
e5153619e02b587888247f69275781bbf39dd455 | a1951be7f25ad3dd7f1b6ae186d95dcaca2f0c0c | /selfpy/chap10/classvar_basic.py | a1ae05904dffbb1780c5d545b697f34855fcd116 | [] | no_license | kazscape/teach_yourself_python | 7742371c074e434ad77f4dd866f426d233b08e8b | 52802c12a99edaf9d061a14b34f09c0ff60bb190 | refs/heads/master | 2023-02-03T08:46:44.280067 | 2020-12-25T08:45:58 | 2020-12-25T08:45:58 | 323,297,721 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 181 | py | class Area:
PI = 3.14
@classmethod
def circle(cls, radius):
return radius * radius * cls.PI
if __name__ == '__main__':
print(Area.PI)
print(Area.circle(10)) | [
"yamauchi.kazuharu@fixer.co.jp"
] | yamauchi.kazuharu@fixer.co.jp |
96c547b852180157735d158a6db1edc39912fd5a | 622079460ebb4ba9b80f55624622e45fcbf064d1 | /DataAnalysis/day1_5/sec01.py | a64bf9b1c39d80d6886234dff8b43729c0d6e730 | [
"MIT"
] | permissive | yunjung-lee/class_python_numpy | 60bf24442e3f72a002ef25566e6982303dc0e12d | 589817c8bbca85d70596e4097c0ece093b5353c3 | refs/heads/master | 2020-04-02T09:50:08.854798 | 2018-10-23T10:53:00 | 2018-10-23T10:53:00 | 154,311,824 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,206 | py | count = '23,456'
#print(int(count))
#ValueError: invalid literal for int() with base 10: '23,456' => "," 때문에 숫자로 인식 못해서 생기는 에러
import re
from bs4 import BeautifulSoup
html = """
<ul>
<li> <a href="www.naver.com">naver</a></li>
<li> <a href="https://www.naver.com">naver</a></li>
<li> <a href="https://www.daum.com">daum</a></li>
<li> <a href="http://www.naver.com">naver</a></li>
</ul>
"""
# #정규식으로 href속성이 https인 것만 추출
# #https: 오고가는 문서를 엿보지 못하게 암호화해서 보내는 통신(비교적 안전하다.)
# soup = BeautifulSoup(html, "html.parser")
# li=soup.find_all(href=re.compile("^https://"))
# #print(li)
# for e in li :
# print(e.attrs['href'])
# #urljoin : 상대 주소를 표시
# #상대 경로로 웹 주소를 지정하는 방법
# #절대 경로 : 주소를 모두 표시
# #상대 경로 : 기준(필요하다) 으로 위치 표시
# #../ 로 경로 이동
# from urllib.parse import urljoin
# base="http://example.com/html/a.html"
# print(urljoin(base,"b.html"))
# #"http://example.com/html/sub/c.html"
# print(urljoin(base,"sub/c.html"))
# #"http://example.com/index.html"
# print(urljoin(base,"../index.html"))
# #"http://example.com/img/sky.png"
# print(urljoin(base,"../img/sky.png"))
#
# print(urljoin(base,"http://other.com/test"))
# #http://를 사용하기 때문에 urljoin을 무시하고 그대로 사용하게 된다.
# #//를 사용하기 때문에 urljoin을 무시하고 그대로 사용하게 된다.
# print(urljoin(base,"//other.com/test"))
#
"""
1.http통신
-http는 통신규약
-클라이언트는 주소를 통해 URL에 접근
http://www.naver.com: naver.com 에 있는 www라는 이름의 컴퓨터(서버)
-서버는 index.html문서(홈페이지 문서)를 클라이언트에게 전송
-클라이언트는 전송된 html문서를 해석하는 프로그램(웹브라우저)이 해석을 하여 결과를 화면에 출력
-서버와 클라이언트 간 통신 과정에서 서버가 클라이언트에게 응답코드를 html문서와 함께 전송
-대표적인 응답코드 : 200(정상), 4xx(페이지 주소 잘못, 없거나..),5xx(서버 내부 오류)
-쿠키,세션 정보 생성
-쿠기의 예 :id입력 란에 자동완성 기능 체크 설정 정보 등등(클라이언트 pc에 저장되어짐)
-세션 : 쿠키 정보를 서버에 저장
-실습주소
메인페이지 :http://www.hanbit.co.kr/index.html
로그인 페이지 주소 : http://www.hanbit.co.kr/member/login.html
마이 페이지 주소 : http://www.hanbit.co.kr/myhanbit/myhanbit.html
로그인 박스 정보 : 아이디 m-id 비밀번호 m-passwd
1)아이디와 /비밀번호 입력(화면 출력,login.html)
2)로그인 단추(화면 출력)
3)로그인 처리 (화면 출력 안됨, id/pw <->db서버 조회, login_proc.php)
4)로그인 된 상태(화면 출력)
"""
#파이썬으로 사이트 로그인 -> 개인정보 추출 -> 화면출력
#info = {"id":"test","pw":"1234"}
import requests
USER = "python96"
PASS = "gg244055"
session = requests.session() #세션 객체 생성(송화기-통신선-수화기)
#세션 : 서버와 클라이언트가 연결됨
#세션 유지 : 연결 상태를 유지
#클라이언트에서 서버에 데이터를 데이터를 연결하기 위한 목저긍로 연결할때는 SESSION을 사용.
login_info = {
"m_id":USER,
"m_passwd":PASS
}
url_login="http://www.hanbit.co.kr/member/login_proc.php"
#실제 로그인 페이지 : 접근이 되지 않는 페이지(ID와PW가 같이 들어 와야 로그인됨)
#세션 연결 시도
res = session.post(url_login, data = login_info)
print(res)
url_mypage = "http://www.hanbit.co.kr/myhanbit/myhanbit.html"
res = session.get(url_mypage)
print(res)
#print(res.text)
soup = BeautifulSoup(res.text, 'html.parser')
mileage = soup.select_one("#container > div > div.sm_mymileage > dl.mileage_section1 > dd > span").string
print("마일리지 :" +mileage+"점")
soup = BeautifulSoup(res.text, 'html.parser')
ecoin = soup.select_one("#container > div > div.sm_mymileage > dl.mileage_section2 > dd > span").string
print("이코인 :" +ecoin+"원")
| [
"python966@gmail.com"
] | python966@gmail.com |
c55f0e64b63446f992db9226cc9d25f28a8f4509 | 25219f56a7958d2fdbd2c08172ef7b91e31e2b5a | /calibration_scalars/PFISR/201302/PLCal30/20130221.003/do_Cal.py | 9dad646897bf2fa7ec6ee3566a88572613e98052 | [] | no_license | amisr/overspread | d82d3adc8f6d4981b3b13f54e39e69411711e3ee | c26a736c9b356f55fd7845ad5a093504e684f434 | refs/heads/main | 2023-04-03T10:47:13.975207 | 2021-03-30T23:43:22 | 2021-03-30T23:43:22 | 431,037,982 | 1 | 0 | null | 2021-11-23T19:02:13 | 2021-11-23T09:29:47 | Python | UTF-8 | Python | false | false | 6,557 | py | import datetime
import os
import numpy
import scipy
import matplotlib.pyplot as plt
import tables
from scipy.optimize import leastsq
import scipy.io as sio
def get_BS_angle(az,el):
az_bs = 15.0*scipy.pi/180.0
el_bs = 74.0*scipy.pi/180.0
k = numpy.array([[scipy.cos(el)*scipy.cos(az)],
[scipy.cos(el)*scipy.sin(az)],
[scipy.sin(el)]])
tk = rotmat(k,3,az_bs)
tk2 = rotmat(tk,2,scipy.pi/2.0-el_bs)
alphaBS=90.0-scipy.arcsin(tk2[2])*180.0/scipy.pi
return alphaBS
def rotmat(input, dir, angle):
if dir == 1:
rotmat = numpy.array([ [1,0,0],
[0, scipy.cos(angle), scipy.sin(angle)],
[0, -scipy.sin(angle), scipy.cos(angle)]])
if dir == 2:
rotmat = numpy.array([ [scipy.cos(angle), 0, -scipy.sin(angle)],
[0, 1, 0],
[scipy.sin(angle), 0, scipy.cos(angle)]])
if dir == 3:
rotmat = numpy.array([ [scipy.cos(angle), scipy.sin(angle), 0],
[-scipy.sin(angle), scipy.cos(angle), 0],
[0, 0, 1]])
return scipy.dot(rotmat,input)
def func(x,a,b,c):
return a*scipy.power(scipy.cos(x*scipy.pi/180.0+c), b)
def residual(p, y, x):
a,b,c = p
return y - func(x,a,b,c)
if __name__ == '__main__':
now = datetime.datetime.now()
date = now.strftime("%m.%d.%Y")
#change experiment month here
exp = 'cal-201302'
GL = numpy.loadtxt('GratingLimits.txt')
read_from_file = True
if read_from_file == True:
# From processed file
#fname = '/Volumes/ISR_DATA_02/processed_data/PFISR/2010/04/Lyons30/20100406.001/20100406.001_ac_2min.h5'
# From raw file
filelist = []
filepath = '/Volumes/AMISR_017/Data AMISR Poker/20130221.003/'
dirList = os.listdir(filepath)
for fname in dirList:
if fname.endswith('.dt0.h5'):
filelist.append(fname)
h5file=tables.openFile(filepath + filelist[0])
output={}
for array in h5file.listNodes('/',classname = 'Array'):
output[array.name]=array.read()
for group in h5file.walkGroups("/"):
output[group._v_pathname]={}
for array in h5file.listNodes(group, classname = 'Array'):
output[group._v_pathname][array.name]=array.read()
h5file.close()
try:
BM = numpy.array(output['BeamCodes'])
except:
BM = numpy.array(output['/Setup']['BeamcodeMap'])
else:
BM = numpy.loadtxt('BeamCodeMap.txt')
###Change filelist here###
#fname = ['filelist_ac.txt']
fname = ['filelist_lp.txt']
#get filelist
FILES = []
for files in fname:
f = open(files)
FPATH = f.readline().strip()
FILES = f.readlines()
FILES = [line.strip() for line in FILES]
FILES = [FPATH + line for line in FILES]
thbs = scipy.linspace(0,40,100)
A = 0.98395
B = 3.8781
oldksys = A * scipy.power(scipy.cos(thbs*scipy.pi/180.0),B)
ksysbco= BM[:,0]
ksystab= BM[:,3]*1e19
#print ksystab
fig = plt.figure()
alphaBS = scipy.zeros(len(FILES))
aGL = scipy.zeros(len(FILES))
KSYScorr = scipy.zeros(len(FILES))
KSYS = scipy.zeros(len(FILES))
eKSYS = scipy.zeros(len(FILES))
for aa in range(len(FILES)):
f = open(FILES[aa])
TLINE1 = numpy.array([value for value in f.readline().split()],dtype='float')
TLINE2 = numpy.array([value for value in f.readline().split()],dtype='float')
az = TLINE1[1]*scipy.pi/180.0
el = TLINE1[2]*scipy.pi/180.0
alphaBS[aa] = get_BS_angle(az,el)
a = numpy.min(scipy.absolute(az*180.0/scipy.pi-GL[:,0]))
I = scipy.absolute(az*180.0/scipy.pi-GL[:,0]).argmin()
aGL[aa] = GL[I,2]-alphaBS[aa]
KSYScorr[aa] = TLINE2[0]
I = scipy.where(TLINE1[0] == ksysbco)
if not I:
xxx
KSYS[aa] = KSYScorr[aa]*A*scipy.power(cos(alphaBS[aa]*scipy.pi/180.0), B)
else:
KSYS[aa] = ksystab[I]*KSYScorr[aa]
print 'Beam: %d, %f, %f, %f, %f\n' % (TLINE1[0], az*180/scipy.pi, el*180/scipy.pi,alphaBS[aa], KSYS[aa])
eKSYS[aa] = TLINE2[1]*KSYS[aa]
plt.plot(alphaBS[aa],KSYS[aa],'k.', hold=True)
plt.plot([alphaBS[aa], alphaBS[aa]], [KSYS[aa]-eKSYS[aa], KSYS[aa]+eKSYS[aa]], 'b', hold=True)
for i in range(len(ksysbco)):
I = scipy.where(BM[:,0] == ksysbco[i])
az = scipy.float64(BM[I,1]*scipy.pi/180.0)
el = scipy.float64(BM[I,2]*scipy.pi/180.0)
if el>0:
tbs = get_BS_angle(az,el)
tksys = ksystab[i]
plt.plot(tbs,tksys,'rx',hold=True)
plt.plot(thbs,oldksys,'r-', hold=True)
plt.xlabel('Angle off Boresight')
plt.ylabel('Ksys')
sc = scipy.ones(len(alphaBS))
alphaBS[0] = 0
num = numpy.unique(alphaBS)
y = scipy.zeros(len(num))
for x in range(len(num)):
I = scipy.where(num[x] == alphaBS)
y[x] = scipy.median(KSYS[I])
#initial guess
[a,b,c],flag = leastsq(residual,[0,0,0],args=(y,num))
yn = a*scipy.power(scipy.cos(thbs*scipy.pi/180.0+c), b) #*.5265
plt.plot(thbs,yn,'k')
plt.plot(thbs,yn*1.1, '--k')
plt.plot(thbs,yn*.9,'--k')
#load AC
try:
dat = sio.loadmat('cal-201302-filelist_ac.txt_2.01_3.07-03.04.2013.mat')
acx = dat['x'][0]
acCal = acx[0]*scipy.power(scipy.cos(thbs*scipy.pi/180.0+acx[2]),acx[1])
plt.plot(thbs,acCal,'g')
except:
pass
#load previous month
try:
dat = sio.loadmat('cal-201206-filelist_lp.txt_5.55_12.16-09.28.2012.mat')
acx = dat['x'][0]
acCal = acx[0]*scipy.power(scipy.cos(thbs*scipy.pi/180.0+acx[2]),acx[1])
plt.plot(thbs,acCal,'m')
except:
pass
plt.title('x=%2.3f %2.3f %2.3f' % (a,b,c))
plt.xlim((numpy.min(thbs)-1, numpy.max(thbs)-1))
oname = '%s-%s_%2.2f_%2.2f-%s' % (exp,fname[0], a,b,date)
sio.savemat(oname +'.mat',{'x':[a,b,c]},oned_as='row')
plt.savefig(oname + '.png', dpi=200)
plt.show()
| [
"fitter@heaviside.local"
] | fitter@heaviside.local |
9933ea10946149d245387f2a9a519a90495e4858 | 155cbccc3ef3b8cba80629f2a26d7e76968a639c | /thelma/repositories/rdb/schema/tables/target.py | c21e56dfe6f9f876b42d6da2123e6a18e7e5cd8a | [
"MIT"
] | permissive | papagr/TheLMA | 1fc65f0a7d3a4b7f9bb2d201259efe5568c2bf78 | d2dc7a478ee5d24ccf3cc680888e712d482321d0 | refs/heads/master | 2022-12-24T20:05:28.229303 | 2020-09-26T13:57:48 | 2020-09-26T13:57:48 | 279,159,864 | 1 | 0 | MIT | 2020-07-12T22:40:36 | 2020-07-12T22:40:35 | null | UTF-8 | Python | false | false | 868 | py | """
This file is part of the TheLMA (THe Laboratory Management Application) project.
See LICENSE.txt for licensing, CONTRIBUTORS.txt for contributor information.
Target table.
"""
from sqlalchemy import Column
from sqlalchemy import ForeignKey
from sqlalchemy import Integer
from sqlalchemy import Table
__docformat__ = "reStructuredText en"
__all__ = ['create_table']
def create_table(metadata, transcript_tbl, molecule_design_tbl):
"Table factory."
tbl = Table('target', metadata,
Column('target_id', Integer, primary_key=True),
Column('molecule_design_id', Integer,
ForeignKey(molecule_design_tbl.c.molecule_design_id),
nullable=False),
Column('transcript_id', Integer,
ForeignKey(transcript_tbl.c.transcript_id), nullable=False)
)
return tbl
| [
"fogathmann@gmail.com"
] | fogathmann@gmail.com |
0fcf8214c7084c69e3dd3f68532b58fcb26ac6e3 | bc54edd6c2aec23ccfe36011bae16eacc1598467 | /simscale_sdk/models/one_of_velocity_field_selection_velocity_type.py | 03a7e6c7257973e8d346a387920c8da883e7e035 | [
"MIT"
] | permissive | SimScaleGmbH/simscale-python-sdk | 4d9538d5efcadae718f12504fb2c7051bbe4b712 | 6fe410d676bf53df13c461cb0b3504278490a9bb | refs/heads/master | 2023-08-17T03:30:50.891887 | 2023-08-14T08:09:36 | 2023-08-14T08:09:36 | 331,949,105 | 17 | 5 | null | null | null | null | UTF-8 | Python | false | false | 4,118 | py | # coding: utf-8
"""
SimScale API
The version of the OpenAPI document: 0.0.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from simscale_sdk.configuration import Configuration
class OneOfVelocityFieldSelectionVelocityType(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'type': 'str'
}
attribute_map = {
'type': 'type'
}
discriminator_value_class_map = {
'ABSOLUTE': 'AbsoluteHarmonicVelocityFieldType',
'RELATIVE': 'RelativeHarmonicVelocityFieldType'
}
def __init__(self, type='RELATIVE', local_vars_configuration=None): # noqa: E501
"""OneOfVelocityFieldSelectionVelocityType - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._type = None
self.discriminator = 'type'
self.type = type
@property
def type(self):
"""Gets the type of this OneOfVelocityFieldSelectionVelocityType. # noqa: E501
Schema name: RelativeHarmonicVelocityFieldType # noqa: E501
:return: The type of this OneOfVelocityFieldSelectionVelocityType. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this OneOfVelocityFieldSelectionVelocityType.
Schema name: RelativeHarmonicVelocityFieldType # noqa: E501
:param type: The type of this OneOfVelocityFieldSelectionVelocityType. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501
raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
self._type = type
def get_real_child_model(self, data):
"""Returns the real base class specified by the discriminator"""
discriminator_key = self.attribute_map[self.discriminator]
discriminator_value = data[discriminator_key]
return self.discriminator_value_class_map.get(discriminator_value)
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, OneOfVelocityFieldSelectionVelocityType):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, OneOfVelocityFieldSelectionVelocityType):
return True
return self.to_dict() != other.to_dict()
| [
"simscale"
] | simscale |
f74f89014de6b788ad040b5d142c3e87489d3fb6 | 990b92264109dc01dbfddeb6f5e75675037fd829 | /app/api/json_responses.py | aa53ddbc8b10a312799484ff3ca05a950f83b22d | [
"Apache-2.0"
] | permissive | CitoEngine/cito_engine | 20efa189abab1b684b60b260c1ea9ed16f6ea0f2 | 95852dd109d86a344726d7b11ed1132d4e48426b | refs/heads/master | 2020-05-21T15:04:24.011603 | 2019-02-08T04:51:42 | 2019-02-08T04:51:42 | 17,123,947 | 9 | 13 | Apache-2.0 | 2019-02-08T04:51:43 | 2014-02-24T03:17:04 | Python | UTF-8 | Python | false | false | 647 | py | import json
from django.http import HttpResponseBadRequest, HttpResponse
def json_error(msg):
return HttpResponseBadRequest('{"status": "error", "reason": "%s"}' % msg,
content_type="application/json")
def json_ok(msg):
return HttpResponse('{"status": "ok", "reason": "%s"}' % msg,
content_type="application/json")
def json_warning(msg):
return HttpResponse('{"status": "warning", "reason": "%s"}' % msg,
content_type="application/json")
def json_response(msg_dict):
return HttpResponse(json.dumps(msg_dict), content_type="application/json") | [
"cyrus@extremeunix.com"
] | cyrus@extremeunix.com |
3a31448503cb98d12724ebcc831f4fb83774e782 | 2f989d067213e7a1e19904d482a8f9c15590804c | /lib/python3.4/site-packages/storages/backends/mogile.py | 5a31f663af9af93674069dd8a01eb71499f442fa | [
"MIT"
] | permissive | levabd/smart4-portal | beb1cf8847134fdf169ab01c38eed7e874c66473 | 2c18ba593ce7e9a1e17c3559e6343a14a13ab88c | refs/heads/master | 2023-02-18T05:49:40.612697 | 2022-08-02T09:35:34 | 2022-08-02T09:35:34 | 116,001,098 | 0 | 1 | MIT | 2023-02-15T21:34:01 | 2018-01-02T10:00:07 | Roff | UTF-8 | Python | false | false | 3,904 | py | from __future__ import print_function
import mimetypes
from django.conf import settings
from django.core.cache import cache
from django.utils.text import force_text
from django.http import HttpResponse, HttpResponseNotFound
from django.core.exceptions import ImproperlyConfigured
from storages.compat import urlparse, BytesIO, Storage
try:
import mogilefs
except ImportError:
raise ImproperlyConfigured("Could not load mogilefs dependency.\
\nSee http://mogilefs.pbworks.com/Client-Libraries")
class MogileFSStorage(Storage):
"""MogileFS filesystem storage"""
def __init__(self, base_url=settings.MEDIA_URL):
# the MOGILEFS_MEDIA_URL overrides MEDIA_URL
if hasattr(settings, 'MOGILEFS_MEDIA_URL'):
self.base_url = settings.MOGILEFS_MEDIA_URL
else:
self.base_url = base_url
for var in ('MOGILEFS_TRACKERS', 'MOGILEFS_DOMAIN',):
if not hasattr(settings, var):
raise ImproperlyConfigured("You must define %s to use the MogileFS backend." % var)
self.trackers = settings.MOGILEFS_TRACKERS
self.domain = settings.MOGILEFS_DOMAIN
self.client = mogilefs.Client(self.domain, self.trackers)
def get_mogile_paths(self, filename):
return self.client.get_paths(filename)
# The following methods define the Backend API
def filesize(self, filename):
raise NotImplemented
#return os.path.getsize(self._get_absolute_path(filename))
def path(self, filename):
paths = self.get_mogile_paths(filename)
if paths:
return self.get_mogile_paths(filename)[0]
else:
return None
def url(self, filename):
return urlparse.urljoin(self.base_url, filename).replace('\\', '/')
def open(self, filename, mode='rb'):
raise NotImplemented
#return open(self._get_absolute_path(filename), mode)
def exists(self, filename):
return filename in self.client
def save(self, filename, raw_contents, max_length=None):
filename = self.get_available_name(filename, max_length)
if not hasattr(self, 'mogile_class'):
self.mogile_class = None
# Write the file to mogile
success = self.client.send_file(filename, BytesIO(raw_contents), self.mogile_class)
if success:
print("Wrote file to key %s, %s@%s" % (filename, self.domain, self.trackers[0]))
else:
print("FAILURE writing file %s" % (filename))
return force_text(filename.replace('\\', '/'))
def delete(self, filename):
self.client.delete(filename)
def serve_mogilefs_file(request, key=None):
"""
Called when a user requests an image.
Either reproxy the path to perlbal, or serve the image outright
"""
# not the best way to do this, since we create a client each time
mimetype = mimetypes.guess_type(key)[0] or "application/x-octet-stream"
client = mogilefs.Client(settings.MOGILEFS_DOMAIN, settings.MOGILEFS_TRACKERS)
if hasattr(settings, "SERVE_WITH_PERLBAL") and settings.SERVE_WITH_PERLBAL:
# we're reproxying with perlbal
# check the path cache
path = cache.get(key)
if not path:
path = client.get_paths(key)
cache.set(key, path, 60)
if path:
response = HttpResponse(content_type=mimetype)
response['X-REPROXY-URL'] = path[0]
else:
response = HttpResponseNotFound()
else:
# we don't have perlbal, let's just serve the image via django
file_data = client[key]
if file_data:
response = HttpResponse(file_data, mimetype=mimetype)
else:
response = HttpResponseNotFound()
return response
| [
"levabd@gmail.com"
] | levabd@gmail.com |
73a03dbe897ddbbceb91c33f3e51473d20761723 | 56f5b2ea36a2258b8ca21e2a3af9a5c7a9df3c6e | /CMGTools/H2TauTau/prod/25aug_corrMC/up/mc/DYJetsToLL_M-50_TuneZ2Star_8TeV-madgraph-tarball/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0_1377544840/HTT_24Jul_newTES_manzoni_Up_Jobs/Job_438/run_cfg.py | 143de067caaee38065ed63af168dcc03c9de543a | [] | no_license | rmanzoni/HTT | 18e6b583f04c0a6ca10142d9da3dd4c850cddabc | a03b227073b2d4d8a2abe95367c014694588bf98 | refs/heads/master | 2016-09-06T05:55:52.602604 | 2014-02-20T16:35:34 | 2014-02-20T16:35:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,537 | py | import FWCore.ParameterSet.Config as cms
import os,sys
sys.path.append('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/H2TauTau/prod/25aug_corrMC/up/mc/DYJetsToLL_M-50_TuneZ2Star_8TeV-madgraph-tarball/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0_1377544840/HTT_24Jul_newTES_manzoni_Up_Jobs')
from base_cfg import *
process.source = cms.Source("PoolSource",
noEventSort = cms.untracked.bool(True),
inputCommands = cms.untracked.vstring('keep *',
'drop cmgStructuredPFJets_cmgStructuredPFJetSel__PAT'),
duplicateCheckMode = cms.untracked.string('noDuplicateCheck'),
fileNames = cms.untracked.vstring('/store/cmst3/user/cmgtools/CMG/DYJetsToLL_M-50_TuneZ2Star_8TeV-madgraph-tarball/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_652.root',
'/store/cmst3/user/cmgtools/CMG/DYJetsToLL_M-50_TuneZ2Star_8TeV-madgraph-tarball/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_653.root',
'/store/cmst3/user/cmgtools/CMG/DYJetsToLL_M-50_TuneZ2Star_8TeV-madgraph-tarball/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_654.root',
'/store/cmst3/user/cmgtools/CMG/DYJetsToLL_M-50_TuneZ2Star_8TeV-madgraph-tarball/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_655.root',
'/store/cmst3/user/cmgtools/CMG/DYJetsToLL_M-50_TuneZ2Star_8TeV-madgraph-tarball/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_656.root')
)
| [
"riccardo.manzoni@cern.ch"
] | riccardo.manzoni@cern.ch |
26bfaaa35b9290e0391bef9a2347fed6f0d202b4 | ed6c149d5d661c034bc59a7811104a69cfd3fd0e | /version2/calculate_STD_Month.py | 0b9d07bf243d922158c81c9f58bc2e26962bffaf | [] | no_license | changshun/Wind_Python | edcc87bd4c076fadd8d77a1d3e79a84b29f35b8f | f97c33d4cbb8607c9d86f46a4990dac862dff361 | refs/heads/master | 2021-04-06T08:31:43.347857 | 2017-11-16T02:53:59 | 2017-11-16T02:53:59 | 124,359,994 | 1 | 4 | null | 2018-03-08T08:21:38 | 2018-03-08T08:21:38 | null | UTF-8 | Python | false | false | 5,937 | py | #coding=utf-8
import sys
import MySQLdb
import datetime
import pandas as pd
from deal_with_day_data import *
reload(sys)
sys.setdefaultencoding('utf8')
def calculate_daily_return_date_list(stock_code):
all_close_price_list,all_date_list = get_date_close_list(stock_code,table_name="table_day_data")
temp_1 = all_close_price_list[0:-1]
temp_2 = all_close_price_list[1:]
# temp_1 = [5,5,5,5,5]
# temp_2 = [7.5,7.5,7.5,7.5,7.5]
# temp_daily_return_list = map(lambda (a,b):(b-a)/a, zip(temp_1,temp_2))
# daily_return_list = [0] + temp_daily_return_list
temp_daily_return_list = []
for i in range(len(temp_1)):
if temp_1[i] != None and temp_2[i] != None:
temp_daily_return = (temp_2[i] - temp_1[i]) / temp_1[i]
else:
temp_daily_return = None
temp_daily_return_list.append(temp_daily_return)
# temp_1 = pd.Series(temp_1)
# temp_2 = pd.Series(temp_2)
# # 针对两个list中数据都完整时,是可以的。但有数据为None的话,就会计算错误
# # 所以用pandas处理
daily_return_list = [0] + temp_daily_return_list
return daily_return_list,all_date_list
def STD_function(input_list):
temp = pd.Series(input_list)
std = temp.std()
return std
def insert_data_into_table(table_name,insert_column,stock,date_list,data_list):
db = MySQLdb.connect(host='127.0.0.1', user='root', passwd='zjz4818774', db='invest_after_calculation', port=3306,charset='utf8')
cursor = db.cursor()
try:
if len(date_list)==len(data_list):
for i in range(len(date_list)):
# print table_name,insert_column,data_list[i],date_list[i],stock
if data_list[i] != None and pd.isnull(data_list[i]) == False:
sql = "UPDATE "+table_name+" SET "+insert_column+"="+str(data_list[i])+" WHERE trade_date='"+date_list[i]+" 00:00:00'"+" AND stock_code='"+stock+"'"
print sql
cursor.execute(sql)
else:
print "this data is None , we don not need to update it"
else:
print "ERROR: insert data into table...len(date_list)!=len(data_list)"
db.commit()
print "update data successfully"
except MySQLdb.Error, e:
print "Mysql Error %d: %s" % (e.args[0], e.args[1])
print "but this error will not cause wrong data, everything is ok"
cursor.close()
db.close()
##################################### ######计算3、6个月#########################################################
############################################统一封装好的函数###############################
def calculate_std_month(stock_code,time_length):
std_month_list = [0 for i in range(time_length-1) ]
# stock_code = "000002.SZ"
daily_return_list,all_date_list = calculate_daily_return_date_list(stock_code)
#获取2010到2016的月初日期的列表和月末日期的列表
all_BOM,all_EOM = get_all_BOM_EOM()
BOM_index_list = []
EOM_index_list = []
for i in range(len(all_BOM)):
BOM_index_list.append(all_date_list.index(all_BOM[i]))
for i in range(len(all_EOM)):
EOM_index_list.append(all_date_list.index(all_EOM[i]))
# print EOM_index_list
if len(BOM_index_list)==len(EOM_index_list):
print "len(BOM_index_list)==len(EOM_index_list)"
for i in range(len(BOM_index_list)-(time_length-1)):
# EOM_index 里面在all_close_price_list和all_date_list中的位置,EOM_index_list[i]中的i是EOM_index_list中的位置
# EOM_index_list[i+2]+1 加1是因为切片是"取头不取尾"
std_month = STD_function(daily_return_list[BOM_index_list[i]:(EOM_index_list[i+(time_length-1)]+1)])
std_month_list.append(std_month)
else:
print "calculate_std_month执行出错error"
return
insert_data_into_table("table_month_data",str(time_length)+"_month_std",stock_code,all_EOM,std_month_list)
return std_month_list
###############################################控制计算和存储#########################################################
##############################################统一封装好的函数########################################################
def calculate_and_store_std_month(symbols,time_length):
db = MySQLdb.connect(host='127.0.0.1', user='root', passwd='zjz4818774', db='invest_after_calculation', port=3306,charset='utf8')
cursor = db.cursor()
sql = "alter table table_month_data add "+str(time_length)+"_month_std double"
try:
cursor.execute(sql)
print "add key:"+str(time_length)+"_month_std successfully"
except MySQLdb.Error, e:
print "Mysql Error %d: %s" % (e.args[0], e.args[1])
print "but this error will not cause wrong data, everything is ok"
cursor.close()
db.close()
# symbols=['000002.SZ','000008.SZ','000009.SZ','000060.SZ','000063.SZ','000069.SZ','000100.SZ','000156.SZ','000157.SZ']#通过直接赋值获取股票代码用于测试
for symbol in symbols:
calculate_std_month(symbol,time_length)
if __name__ == "__main__":
# symbols=['000002.SZ','000008.SZ','000009.SZ','000060.SZ','000063.SZ','000069.SZ','000100.SZ','000156.SZ','000157.SZ']#通过直接赋值获取股票代码用于测试
from stocks_pool_for_HZ300 import select_good_stocks
symbols = select_good_stocks()
# a_list = [5 for i in range(100)]+ [0 for i in range(100)]
# print STD_function(a_list)
# print pd.Series(a_list).std()
calculate_and_store_std_month(symbols,1)
calculate_and_store_std_month(symbols,3)
calculate_and_store_std_month(symbols,6)
| [
"584392383@qq.com"
] | 584392383@qq.com |
1d636b5b499388ffc28a88f1f1e0afbf042662d1 | 1dacbf90eeb384455ab84a8cf63d16e2c9680a90 | /pkgs/ipykernel-4.3.1-py27_0/lib/python2.7/site-packages/ipykernel/parentpoller.py | 227614d44e0cf146bcb8c42edf673ea0ab86a5be | [
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-unknown"
] | permissive | wangyum/Anaconda | ac7229b21815dd92b0bd1c8b7ec4e85c013b8994 | 2c9002f16bb5c265e0d14f4a2314c86eeaa35cb6 | refs/heads/master | 2022-10-21T15:14:23.464126 | 2022-10-05T12:10:31 | 2022-10-05T12:10:31 | 76,526,728 | 11 | 10 | Apache-2.0 | 2022-10-05T12:10:32 | 2016-12-15T05:26:12 | Python | UTF-8 | Python | false | false | 3,969 | py | # Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
try:
import ctypes
except:
ctypes = None
import os
import platform
import signal
import time
try:
from _thread import interrupt_main # Py 3
except ImportError:
from thread import interrupt_main # Py 2
from threading import Thread
from IPython.utils.warn import warn
class ParentPollerUnix(Thread):
""" A Unix-specific daemon thread that terminates the program immediately
when the parent process no longer exists.
"""
def __init__(self):
super(ParentPollerUnix, self).__init__()
self.daemon = True
def run(self):
# We cannot use os.waitpid because it works only for child processes.
from errno import EINTR
while True:
try:
if os.getppid() == 1:
os._exit(1)
time.sleep(1.0)
except OSError as e:
if e.errno == EINTR:
continue
raise
class ParentPollerWindows(Thread):
""" A Windows-specific daemon thread that listens for a special event that
signals an interrupt and, optionally, terminates the program immediately
when the parent process no longer exists.
"""
def __init__(self, interrupt_handle=None, parent_handle=None):
""" Create the poller. At least one of the optional parameters must be
provided.
Parameters
----------
interrupt_handle : HANDLE (int), optional
If provided, the program will generate a Ctrl+C event when this
handle is signaled.
parent_handle : HANDLE (int), optional
If provided, the program will terminate immediately when this
handle is signaled.
"""
assert(interrupt_handle or parent_handle)
super(ParentPollerWindows, self).__init__()
if ctypes is None:
raise ImportError("ParentPollerWindows requires ctypes")
self.daemon = True
self.interrupt_handle = interrupt_handle
self.parent_handle = parent_handle
def run(self):
""" Run the poll loop. This method never returns.
"""
try:
from _winapi import WAIT_OBJECT_0, INFINITE
except ImportError:
from _subprocess import WAIT_OBJECT_0, INFINITE
# Build the list of handle to listen on.
handles = []
if self.interrupt_handle:
handles.append(self.interrupt_handle)
if self.parent_handle:
handles.append(self.parent_handle)
arch = platform.architecture()[0]
c_int = ctypes.c_int64 if arch.startswith('64') else ctypes.c_int
# Listen forever.
while True:
result = ctypes.windll.kernel32.WaitForMultipleObjects(
len(handles), # nCount
(c_int * len(handles))(*handles), # lpHandles
False, # bWaitAll
INFINITE) # dwMilliseconds
if WAIT_OBJECT_0 <= result < len(handles):
handle = handles[result - WAIT_OBJECT_0]
if handle == self.interrupt_handle:
# check if signal handler is callable
# to avoid 'int not callable' error (Python issue #23395)
if callable(signal.getsignal(signal.SIGINT)):
interrupt_main()
elif handle == self.parent_handle:
os._exit(1)
elif result < 0:
# wait failed, just give up and stop polling.
warn("""Parent poll failed. If the frontend dies,
the kernel may be left running. Please let us know
about your system (bitness, Python, etc.) at
ipython-dev@scipy.org""")
return
| [
"wgyumg@mgail.com"
] | wgyumg@mgail.com |
283ed7d85ec7bd24039b2553c5fb906115ec1c38 | ad212b92beac17c4d061848c1dcd443d02a168c8 | /python/1641_count_sorted_vowel_strings/dfs.py | e1bbd040e0ec74005cea31f9eda206125a247880 | [] | no_license | 21eleven/leetcode-solutions | 5ec97e4391c8ebaa77f4404a1155f3ef464953b3 | 35c91e6f5f5ed348186b8641e6fc49c825322d32 | refs/heads/master | 2023-03-03T10:22:41.726612 | 2021-02-13T21:02:13 | 2021-02-13T21:02:13 | 260,374,085 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,232 | py | """
1641. Count Sorted Vowel Strings
Medium
Given an integer n, return the number of strings of length n that consist only of vowels (a, e, i, o, u) and are lexicographically sorted.
A string s is lexicographically sorted if for all valid i, s[i] is the same as or comes before s[i+1] in the alphabet.
Example 1:
Input: n = 1
Output: 5
Explanation: The 5 sorted strings that consist of vowels only are ["a","e","i","o","u"].
Example 2:
Input: n = 2
Output: 15
Explanation: The 15 sorted strings that consist of vowels only are
["aa","ae","ai","ao","au","ee","ei","eo","eu","ii","io","iu","oo","ou","uu"].
Note that "ea" is not a valid string since 'e' comes after 'a' in the alphabet.
Example 3:
Input: n = 33
Output: 66045
Constraints:
1 <= n <= 50
"""
class Solution:
def countVowelStrings(self, n: int) -> int:
idx = {"a":0, 'e':1, 'i':2, 'o':3, 'u':4}
vowels = ['a', 'e', 'i', 'o', 'u']
def childs(c, l):
if c == 1:
return len(vowels[idx[l]:])
else:
num = 0
for char in vowels[idx[l]:]:
num += childs(c-1,char)
return num
return childs(n, 'a')
| [
"noahlidell@gmail.com"
] | noahlidell@gmail.com |
decb665f6168dde3b4141a3dbdd4194e8aecb9ce | ddd466457316662a1455bae429740eb3c8411444 | /python_bbq/file_handling/bytearray_write.py | 27bc2693f0830c56592588d15585fbaf6fe72429 | [] | no_license | fingerman/python_fundamentals | 9ef46e51d6e9b8328e9c949fa0f807f30bd6e482 | 1fb604220922530d1171200a3cf3a927c028a6ed | refs/heads/master | 2023-01-09T12:02:26.712810 | 2020-01-22T16:12:32 | 2020-01-22T16:12:32 | 151,728,846 | 0 | 0 | null | 2022-12-27T15:34:12 | 2018-10-05T13:58:10 | Python | UTF-8 | Python | false | false | 243 | py | from os import strerror
data = bytearray(ord('a'))
for i in range(len(data)):
data[i] = 10 + i
try:
bf = open('file.bin', 'wb')
bf.write(data)
bf.close()
except IOError as e:
print("I/O error occurred: ", strerr(e.errno))
| [
"adamov.george@gmail.com"
] | adamov.george@gmail.com |
fe5794ead0dddb72d93f5b6d535c961ab22d2382 | d57c59070bb167a167b95971e32731c48587cb59 | /backend/wallet/api/v1/serializers.py | 38828a3947e6a817d0937cd1a1c5c264662471dd | [] | no_license | crowdbotics-apps/blind-school-27613 | 2ec76994185f6f15bd8ec0fe48b4a86f10e8a882 | ad3549e18f1841bdac12a558aa59fa97cf9a6691 | refs/heads/master | 2023-05-11T19:18:34.670974 | 2021-05-31T10:12:31 | 2021-05-31T10:12:31 | 372,459,032 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 875 | py | from rest_framework import serializers
from wallet.models import (
PaymentTransaction,
PaymentMethod,
TaskerWallet,
TaskerPaymentAccount,
CustomerWallet,
)
class TaskerPaymentAccountSerializer(serializers.ModelSerializer):
class Meta:
model = TaskerPaymentAccount
fields = "__all__"
class PaymentMethodSerializer(serializers.ModelSerializer):
class Meta:
model = PaymentMethod
fields = "__all__"
class TaskerWalletSerializer(serializers.ModelSerializer):
class Meta:
model = TaskerWallet
fields = "__all__"
class CustomerWalletSerializer(serializers.ModelSerializer):
class Meta:
model = CustomerWallet
fields = "__all__"
class PaymentTransactionSerializer(serializers.ModelSerializer):
class Meta:
model = PaymentTransaction
fields = "__all__"
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
e22248db541b874c00354eccfb4666f37e0fc02e | d864fd6c3d1452aa3cffb31ecb20fc14c46a5e91 | /ml_logger/log_client.py | 842e1532bb5078d31c8913b4ce27366ccaeac714 | [] | no_license | jonasrothfuss/ml_logger | 2c9e5592eb24bc70ae80e8d0c9e08e6e7054f3d7 | 2000b38177e3c4892e4fee74d769c1fc0a659424 | refs/heads/master | 2020-03-26T20:42:15.846183 | 2019-06-03T09:09:58 | 2019-06-03T09:09:58 | 145,340,704 | 1 | 0 | null | 2018-08-19T22:03:04 | 2018-08-19T22:03:03 | null | UTF-8 | Python | false | false | 3,940 | py | import os
from concurrent.futures import ThreadPoolExecutor
from requests_futures.sessions import FuturesSession
from ml_logger.serdes import serialize, deserialize
from ml_logger.server import LogEntry, LoadEntry, PingData, LoggingServer, ALLOWED_TYPES, Signal, LogOptions, \
RemoveEntry
class LogClient:
local_server = None
def __init__(self, url: str = None, max_workers=None):
if url.startswith("file://"):
self.local_server = LoggingServer(data_dir=url[6:])
elif os.path.isabs(url):
self.local_server = LoggingServer(data_dir=url)
elif url.startswith('http://'):
self.url = url
self.ping_url = os.path.join(url, "ping")
else:
# todo: add https://, and s3://
raise TypeError('log url need to begin with `/`, `file://` or `http://`.')
if max_workers:
self.session = FuturesSession(ThreadPoolExecutor(max_workers=max_workers))
else:
self.session = FuturesSession()
def _get(self, key, dtype):
if self.local_server:
return self.local_server.load(key, dtype)
else:
json = LoadEntry(key, dtype)._asdict()
# note: reading stuff from the server is always synchronous via the result call.
res = self.session.get(self.url, json=json).result()
result = deserialize(res.text)
return result
def _post(self, key, data, dtype, options: LogOptions = None):
if self.local_server:
self.local_server.log(key, data, dtype, options)
else:
# todo: make the json serialization more robust. Not priority b/c this' client-side.
json = LogEntry(key, serialize(data), dtype, options)._asdict()
self.session.post(self.url, json=json)
def _delete(self, key):
if self.local_server:
self.local_server.remove(key)
else:
# todo: make the json serialization more robust. Not priority b/c this' client-side.
json = RemoveEntry(key)._asdict()
self.session.delete(self.url, json=json)
def ping(self, exp_key, status, _duplex=True, burn=True):
# todo: add configuration for early termination
if self.local_server:
signals = self.local_server.ping(exp_key, status)
return deserialize(signals) if _duplex else None
else:
# todo: make the json serialization more robust. Not priority b/c this' client-side.
ping_data = PingData(exp_key, status, burn=burn)._asdict()
req = self.session.post(self.ping_url, json=ping_data)
if _duplex:
response = req.result()
# note: I wonder if we should raise if the response is non-ok.
return deserialize(response.text) if response.ok else None
# send signals to the worker
def send_signal(self, exp_key, signal=None):
options = LogOptions(overwrite=True)
channel = os.path.join(exp_key, "__signal.pkl")
self._post(channel, signal, dtype="log", options=options)
# Reads binary data
def read(self, key):
return self._get(key, dtype="read")
# Reads binary data
def read_pkl(self, key):
return self._get(key, dtype="read_pkl")
def read_np(self, key):
return self._get(key, dtype="read_np")
# appends data
def log(self, key, data, **options):
self._post(key, data, dtype="log", options=LogOptions(**options))
# appends text
def log_text(self, key, text):
self._post(key, text, dtype="text")
# sends out images
def send_image(self, key, data):
assert data.dtype in ALLOWED_TYPES, "image data must be one of {}".format(ALLOWED_TYPES)
self._post(key, data, dtype="image")
# appends text
def log_buffer(self, key, buf):
self._post(key, buf, dtype="byte")
| [
"yangge1987@gmail.com"
] | yangge1987@gmail.com |
b8735dea132ec85a36c4534b5df4368acaafad08 | eeb469954b768095f2b8ad2376f1a114a3adb3fa | /119.py | bfe18b6ba71c7a352c3f8459e86deaca6bf97484 | [
"MIT"
] | permissive | RafaelHuang87/Leet-Code-Practice | ef18dda633932e3cce479f7d5411552d43da0259 | 7754dcee38ffda18a5759113ef06d7becf4fe728 | refs/heads/master | 2020-07-18T20:09:10.311141 | 2020-02-11T09:56:39 | 2020-02-11T09:56:39 | 206,305,113 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 294 | py | class Solution:
def getRow(self, rowIndex: int) -> [int]:
res = []
for i in range(rowIndex):
temp = [1] * (i + 1)
res.append(temp)
for j in range(1, i):
res[i][j] = res[i - 1][j - 1] + res[i - 1][j]
return res[-1]
| [
"rafaelhuang@163.com"
] | rafaelhuang@163.com |
894ab3357bb45c22c65174d315ee30e93b7d6729 | 96ec8ea87fb2cfdd2d850a0471c9820f92152847 | /九章算法/动态规划/Longest Common Subsequence.py | 84c87a0e25d7fdbdd5276d45c2bada2c2fc4d0a7 | [] | no_license | bitterengsci/algorithm | ae0b9159fd21cc30c9865f981f9c18cf9c6898d7 | bf70d038b70c51edc6ddd6bfef1720fb5f9f2567 | refs/heads/master | 2023-08-10T10:22:18.774232 | 2023-07-31T21:04:11 | 2023-07-31T21:04:11 | 186,261,880 | 95 | 46 | null | 2023-07-31T21:04:12 | 2019-05-12T13:57:27 | Python | UTF-8 | Python | false | false | 843 | py | class Solution:
"""
@param A: A string
@param B: A string
@return: The length of longest common subsequence of A and B
"""
def longestCommonSubsequence(self, A, B):
dp = [[0] * (len(B)+1) for _ in range(len(A)+1)]
# initialization
for j in range(len(B)+1):
dp[0][j] = 0
for i in range(len(A)+1):
dp[i][0] = 0
# state transitition
# f[i][j] = max{f[i-1][j], f[i][j-1], f[i-1][j-1]+1 | A[i-1]=B[j-1]}
for i in range(1, len(A)+1):
for j in range(1, len(B)+1):
if A[i-1] == B[j-1] :
dp[i][j] = max(dp[i-1][j], dp[i][j-1], dp[i-1][j-1] + 1)
else:
dp[i][j] = max(dp[i-1][j], dp[i][j-1])
return dp[-1][-1] | [
"yanran2012@gmail.com"
] | yanran2012@gmail.com |
2e828986bbdd0616c1f339a48799eba5fca072c6 | 422943b00b6dab14ce72e6ed9e92c1c5c02883b0 | /updateTests.py | bf89b1d620c9e6705fe1c16beba02c1e9dc8f5d3 | [] | no_license | grid-control/testsuite | 412c376325feae3c744fd4ca4c32f254ffd9e6df | 3d2a8d3234dc1c83f7e2f71aa5704f8abe0d42de | refs/heads/master | 2021-08-06T08:16:12.209352 | 2019-07-11T13:37:56 | 2019-07-11T13:37:56 | 96,886,340 | 0 | 1 | null | 2017-08-14T12:09:12 | 2017-07-11T11:33:42 | Shell | UTF-8 | Python | false | false | 943 | py | import os
for (root, dirs, files) in os.walk('.'):
def findTestFwk(dn):
if 'testfwk.py' in os.listdir(dn):
return dn
return findTestFwk(os.path.join(dn, '..'))
for fn in files:
if fn.startswith('TEST_') and fn.endswith('.py'):
fn = os.path.join(root, fn)
print(fn)
lines = open(fn).readlines()
for idx, line in enumerate(lines):
if line.startswith('# - prolog marker'):
break
else:
raise Exception('No prolog marker found in %r!' % fn)
fp = open(fn, 'w')
fp.write("""#!/usr/bin/env python
__import__('sys').path.append(__import__('os').path.join(__import__('os').path.dirname(__file__), %r))
__import__('testfwk').setup(__file__)
""" % findTestFwk(root).replace(root, '').lstrip('/'))
fp.write('# - prolog marker\n')
fp.write(str.join('', lines[idx+1:]))
if not lines[-1].startswith('run_test(') and not (fn.endswith('fuzz.py') or fn.endswith('scale.py')):
print('run_test missing')
| [
"stober@cern.ch"
] | stober@cern.ch |
5e42f67ea15cac565bc2f72d4be281f11441b0a7 | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /data/p3BR/R2/benchmark/startQiskit65.py | 9ea013b14becf0bac967ec4fc7786a2d686f9843 | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,433 | py | # qubit number=3
# total number=10
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
# oracle.draw('mpl', filename=(kernel + '-oracle.png'))
return oracle
def build_circuit(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the Bernstein-Vazirani circuit
zero = np.binary_repr(0, n)
b = f(zero)
# initial n + 1 bits
input_qubit = QuantumRegister(n+1, "qc")
classicals = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classicals)
# inverse last one (can be omitted if using O_f^\pm)
prog.x(input_qubit[n])
# circuit begin
prog.h(input_qubit[1]) # number=1
prog.cx(input_qubit[0],input_qubit[2]) # number=7
prog.x(input_qubit[2]) # number=8
prog.cx(input_qubit[0],input_qubit[2]) # number=9
prog.cx(input_qubit[2],input_qubit[1]) # number=6
prog.cx(input_qubit[2],input_qubit[1]) # number=4
prog.z(input_qubit[2]) # number=3
prog.y(input_qubit[2]) # number=5
# apply H to get superposition
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[n])
prog.barrier()
# apply oracle O_f
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [input_qubit[n]])
# apply H back (QFT on Z_2^n)
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
# measure
return prog
def get_statevector(prog: QuantumCircuit) -> Any:
state_backend = Aer.get_backend('statevector_simulator')
statevec = execute(prog, state_backend).result()
quantum_state = statevec.get_statevector()
qubits = round(log2(len(quantum_state)))
quantum_state = {
"|" + np.binary_repr(i, qubits) + ">": quantum_state[i]
for i in range(2 ** qubits)
}
return quantum_state
def evaluate(backend_str: str, prog: QuantumCircuit, shots: int, b: str) -> Any:
# Q: which backend should we use?
# get state vector
quantum_state = get_statevector(prog)
# get simulate results
# provider = IBMQ.load_account()
# backend = provider.get_backend(backend_str)
# qobj = compile(prog, backend, shots)
# job = backend.run(qobj)
# job.result()
backend = Aer.get_backend(backend_str)
# transpile/schedule -> assemble -> backend.run
results = execute(prog, backend, shots=shots).result()
counts = results.get_counts()
a = Counter(counts).most_common(1)[0][0][::-1]
return {
"measurements": counts,
# "state": statevec,
"quantum_state": quantum_state,
"a": a,
"b": b
}
def bernstein_test_1(rep: str):
"""011 . x + 1"""
a = "011"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_2(rep: str):
"""000 . x + 0"""
a = "000"
b = "0"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_3(rep: str):
"""111 . x + 1"""
a = "111"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
if __name__ == "__main__":
n = 2
a = "11"
b = "1"
f = lambda rep: \
bitwise_xor(bitwise_dot(a, rep), b)
prog = build_circuit(n, f)
sample_shot =4000
writefile = open("../data/startQiskit65.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
backend = BasicAer.get_backend('qasm_simulator')
circuit1 = transpile(prog, FakeYorktown())
circuit1.h(qubit=2)
circuit1.x(qubit=3)
circuit1.measure_all()
info = execute(circuit1,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
| [
"wangjiyuan123@yeah.net"
] | wangjiyuan123@yeah.net |
00f0ed14558402c0d3ef771f6e79a3f3375bd9a6 | 0316925e2bad29d60f0dcccdf91277fd8f03ef09 | /q015.py | 286af0b71dec819126a696d98ce11b5eb6fe2589 | [] | no_license | nomadlife/project-euler | 426614df7b2c9368a4db59954dc1df2902a44d6b | 9bc09843637a361fa93c7abb20ac990f973b08e5 | refs/heads/master | 2021-07-03T00:52:07.760948 | 2021-06-26T14:28:32 | 2021-06-26T14:28:32 | 100,214,096 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 251 | py | # Q015 Lattice paths
# How many such routes are there through a 20×20 grid?
def grid(r,c):
if r==1 or c==1:
return c+r
if (r,c) not in cache:
cache[r,c] = grid(r,c-1) + grid(r-1,c)
return cache[r,c]
cache={}
grid(20,20)
| [
"joonwoo3816@gmail.com"
] | joonwoo3816@gmail.com |
13c46f9d5b80299f0706ab94baace17772ea3776 | 23bdb9a3102b9aad3ec20419593bbc3b1b7b3f66 | /piecrust/constants.py | 0b2fbbcf2a285f5d0c06563f396591e71adbc8c9 | [
"BSD-3-Clause"
] | permissive | toastdriven/piecrust | ec961998627145c0a6f312137564b1737fdb8113 | 95dadd7614a2fe7fe9186243f998f66c0909eb21 | refs/heads/master | 2015-08-06T10:31:46.352977 | 2011-10-18T07:55:18 | 2011-10-18T07:55:18 | 2,465,163 | 19 | 1 | null | null | null | null | UTF-8 | Python | false | false | 481 | py | # Enable all basic ORM filters but do not allow filtering across relationships.
ALL = 1
# Enable all ORM filters, including across relationships
ALL_WITH_RELATIONS = 2
# Ripped from Django.
QUERY_TERMS = dict([(x, None) for x in (
'exact', 'iexact', 'contains', 'icontains', 'gt', 'gte', 'lt', 'lte', 'in',
'startswith', 'istartswith', 'endswith', 'iendswith', 'range', 'year',
'month', 'day', 'week_day', 'isnull', 'search', 'regex', 'iregex',
)])
LOOKUP_SEP = '__'
| [
"daniel@toastdriven.com"
] | daniel@toastdriven.com |
e18ab1abab1a1499c8df298fcb95380e0105243f | 54f352a242a8ad6ff5516703e91da61e08d9a9e6 | /Source Codes/AtCoder/abc039/C/4891512.py | 2563e2d210a483f9606a5a131073753a575e0dcf | [] | no_license | Kawser-nerd/CLCDSA | 5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb | aee32551795763b54acb26856ab239370cac4e75 | refs/heads/master | 2022-02-09T11:08:56.588303 | 2022-01-26T18:53:40 | 2022-01-26T18:53:40 | 211,783,197 | 23 | 9 | null | null | null | null | UTF-8 | Python | false | false | 427 | py | def pianist_takahashi(S: str)->int:
scales = [
'Do', 'Do#', 'Re', 'Re#', 'Mi',
'Fa', 'Fa#', 'So', 'So#', 'La', 'La#', 'Si'
]
keyboard = 'WBWBWWBWBWBW'
for i, scale in enumerate(scales):
if (S).startswith(keyboard[i:] + keyboard[:i]):
return scale
return 'Unknown'
if __name__ == "__main__":
S = input()
ans = pianist_takahashi(S)
print(ans) | [
"kwnafi@yahoo.com"
] | kwnafi@yahoo.com |
bc4cbaa20cc1ae062653619f557fe8e7c1106adc | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p04000/s890347924.py | e08156f14ab959ff622a85fa9f2278635cf92fe4 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 407 | py | from collections import defaultdict, Counter
H, W, N = map(int, input().split())
D = defaultdict(int)
for _ in range(N):
a, b = map(int, input().split())
for i in range(a - 2, a + 1):
if 1 <= i <= H - 2:
for j in range(b - 2, b + 1):
if 1 <= j <= W - 2:
D[(i, j)] += 1
print((H - 2) * (W - 2) - len(D))
c = Counter(D.values())
for i in range(1, 10):
print(c.get(i, 0)) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
c71b4a1f83067c3a098d6a37edad98bc9a120439 | a611d9ecfae0ecaed9848bf0474e8d2249764c6c | /bundestag/link_crawler.py | 25a20ec56200e4b138fa99fc4d49894fc5ca9f3a | [] | no_license | JokusPokus/TINART-finetuning | 8317ad9f1a42e10e032ab77ce998300dc605ff57 | 667ccfae1ddae1eece070bef2934160d816a9a24 | refs/heads/master | 2023-01-28T04:51:32.051599 | 2020-12-02T15:36:39 | 2020-12-02T15:36:39 | 313,454,702 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,176 | py | """
Crawls links to parliament proceeding protocols for the current voting period.
"""
import requests
from bs4 import BeautifulSoup
from typing import Dict, List
# metadata for the HTTP requests
HEADERS = {
'authority': 'www.bundestag.de',
'accept': '*/*',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.75 Safari/537.36',
'x-requested-with': 'XMLHttpRequest',
'sec-fetch-site': 'same-origin',
'sec-fetch-mode': 'cors',
'sec-fetch-dest': 'empty',
'referer': 'https://www.bundestag.de/services/opendata',
'accept-language': 'de-DE,de;q=0.9,en-US;q=0.8,en;q=0.7,nl;q=0.6,es;q=0.5',
'cookie': 'INGRESSCOOKIE=1602572682.198.40.821660; CM_SESSIONID=4A5EDDBABCC058A39020EBCA61518DB9.cae-live-1',
'dnt': '1',
'sec-gpc': '1',
}
# Link to the web page where XML protocols can be accessed.
# Note that this link might change in the future.
SOURCE = 'https://www.bundestag.de/ajax/filterlist/de/services/opendata/543410-543410'
OUTPUT_DIRECTORY = ".\\bundestag\\resource_links\\"
class Params:
"""
Defines the parameters passed to the HTTP request
"""
limit = ("limit", "10")
noFilterSet = ("noFilterSet", "true")
def __init__(self, offset):
self.offset = ("offset", str(offset))
def to_tuple(self):
return (
self.limit,
self.noFilterSet,
self.offset
)
class HTMLParser:
"""
Crawls the links to the XML files containing the parliament protocols
of the current legislative period.
Stores the links in a txt file in the output directory.
"""
def __init__(self,
headers: Dict = HEADERS,
source: str = SOURCE,
output_directory: str = OUTPUT_DIRECTORY):
"""
:param headers: metadata passed to the HTTP request
:param source: link to the web page where the XML protocols can be accessed
:param output_directory: directory where the list of links shall be stored
"""
self.headers = headers
self.source = source
self.output_directory = output_directory
@staticmethod
def _get_links(doc_string: str) -> List[str]:
"""
Takes an html string and returns a list with links to XML resources.
:param doc_string: the HTML content of an HTTP response in string format
:return: a list of strings, each representing a link to an XML resource
"""
soup = BeautifulSoup(doc_string, features="html.parser")
links = []
for a in soup.find_all("a"):
new_link = "bundestag.de" + a.get("href")
links.append(new_link)
return links
@staticmethod
def _has_link(doc_string: str) -> bool:
"""
Checks whether given html string contains an "a" element.
"""
soup = BeautifulSoup(doc_string, features="html.parser")
has_link = bool(soup.a and soup.a["href"])
return has_link
def _append_links(self, links: List[str]):
"""
Takes a list of links in string format and appends them to a text file,
each link in a new line.
The text file is saved into the parser's output directory.
:param links: list of links in string format
"""
with open(self.output_directory + "resource_links.txt", "a+") as links_file:
for link in links:
links_file.write(link + "\n")
def write_links_to_file(self):
"""
Crawls the whole source website for links to xml resources
and writes the links to a text file stored in the output directory.
"""
offset = 0
while True:
params = Params(offset).to_tuple()
response = requests.get(self.source, headers=self.headers, params=params)
if not self._has_link(response.content):
break
links = self._get_links(response.content)
self._append_links(links)
offset += 10
if __name__ == "__main__":
parser = HTMLParser()
parser.write_links_to_file()
| [
"jakob.schmitt@code.berlin"
] | jakob.schmitt@code.berlin |
bd53a8dbbf7296ec1350fc2ed8664fd62d46a3fb | ee682ade4fcafb5648a99ceb66a2d4b7bb186cb3 | /app/members/admin.py | 3a05a99558505445f0f1ca38c8377de03a63a575 | [] | no_license | gambler1541/hw-django | a7ea4ad743cb435c2f9ecbf3ff86ce3adf036069 | 99639fb3da7639bffe8b22fe9fc7ac91260f05ed | refs/heads/master | 2020-03-21T04:19:08.601227 | 2018-06-21T01:10:35 | 2018-06-21T01:10:35 | 138,102,510 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 151 | py | from django.contrib import admin
from members.models import InstagramUser, UserInfo
admin.site.register(InstagramUser)
admin.site.register(UserInfo)
| [
"gambler1541@gmail.com"
] | gambler1541@gmail.com |
f6607a1124301e9ff2d135311f8538737bc61608 | bbf874cf4abb20e7ec5c66e808e97ae6f2043c3f | /0x0B-python-input_output/14-pascal_triangle.py | c7d2d1a3c815f704485b95a44abecae4c2d99415 | [] | no_license | imperfectskillz/holbertonschool-higher_level_programming | 105fd80c2bea8fbb60eb786ce9019b3f63188342 | 704e99b29125d6449db32b9d52ede443318df620 | refs/heads/master | 2021-09-14T10:44:22.551896 | 2018-05-12T03:38:59 | 2018-05-12T03:38:59 | 113,130,980 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 419 | py | #!/usr/bin/python3
"""
module contains pascal traingle
"""
def pascal_triangle(n):
"""
pascal triangle
"""
if n <= 0:
return []
result = []
for i in range(n):
result.append([])
result[i].append(1)
for j in range(1, i):
result[i].append(result[i - 1][j - 1] + result[i - 1][j])
if (i > 0):
result[i].append(1)
return result
| [
"j.choi.89@gmail.com"
] | j.choi.89@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.